Merge branch 'v0.11.0rc' into feature/opencode-docker-support

This commit is contained in:
Web Dev Cody
2026-01-13 09:35:18 -05:00
committed by GitHub
42 changed files with 1464 additions and 1207 deletions

View File

@@ -59,6 +59,11 @@ FROM node:22-slim AS server
ARG GIT_COMMIT_SHA=unknown
LABEL automaker.git.commit.sha="${GIT_COMMIT_SHA}"
# Build arguments for user ID matching (allows matching host user for mounted volumes)
# Override at build time: docker build --build-arg UID=$(id -u) --build-arg GID=$(id -g) ...
ARG UID=1001
ARG GID=1001
# Install git, curl, bash (for terminal), gosu (for user switching), and GitHub CLI (pinned version, multi-arch)
RUN apt-get update && apt-get install -y --no-install-recommends \
git curl bash gosu ca-certificates openssh-client \
@@ -79,8 +84,10 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
RUN npm install -g @anthropic-ai/claude-code
# Create non-root user with home directory BEFORE installing Cursor CLI
RUN groupadd -g 1001 automaker && \
useradd -u 1001 -g automaker -m -d /home/automaker -s /bin/bash automaker && \
# Uses UID/GID build args to match host user for mounted volume permissions
# Use -o flag to allow non-unique IDs (GID 1000 may already exist as 'node' group)
RUN groupadd -o -g ${GID} automaker && \
useradd -o -u ${UID} -g automaker -m -d /home/automaker -s /bin/bash automaker && \
mkdir -p /home/automaker/.local/bin && \
mkdir -p /home/automaker/.cursor && \
chown -R automaker:automaker /home/automaker && \

View File

@@ -27,9 +27,15 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
# Install Claude CLI globally
RUN npm install -g @anthropic-ai/claude-code
# Create non-root user
RUN groupadd -g 1001 automaker && \
useradd -u 1001 -g automaker -m -d /home/automaker -s /bin/bash automaker && \
# Build arguments for user ID matching (allows matching host user for mounted volumes)
# Override at build time: docker-compose build --build-arg UID=$(id -u) --build-arg GID=$(id -g)
ARG UID=1001
ARG GID=1001
# Create non-root user with configurable UID/GID
# Use -o flag to allow non-unique IDs (GID 1000 may already exist as 'node' group)
RUN groupadd -o -g ${GID} automaker && \
useradd -o -u ${UID} -g automaker -m -d /home/automaker -s /bin/bash automaker && \
mkdir -p /home/automaker/.local/bin && \
mkdir -p /home/automaker/.cursor && \
chown -R automaker:automaker /home/automaker && \

View File

@@ -99,6 +99,8 @@ export class ClaudeProvider extends BaseProvider {
...(maxThinkingTokens && { maxThinkingTokens }),
// Subagents configuration for specialized task delegation
...(options.agents && { agents: options.agents }),
// Pass through outputFormat for structured JSON outputs
...(options.outputFormat && { outputFormat: options.outputFormat }),
};
// Build prompt payload

View File

@@ -30,3 +30,11 @@ export { OpencodeProvider } from './opencode-provider.js';
// Provider factory
export { ProviderFactory } from './provider-factory.js';
// Simple query service - unified interface for basic AI queries
export { simpleQuery, streamingQuery } from './simple-query-service.js';
export type {
SimpleQueryOptions,
SimpleQueryResult,
StreamingQueryOptions,
} from './simple-query-service.js';

View File

@@ -0,0 +1,240 @@
/**
* Simple Query Service - Simplified interface for basic AI queries
*
* Use this for routes that need simple text responses without
* complex event handling. This service abstracts away the provider
* selection and streaming details, providing a clean interface
* for common query patterns.
*
* Benefits:
* - No direct SDK imports needed in route files
* - Consistent provider routing based on model
* - Automatic text extraction from streaming responses
* - Structured output support for JSON schema responses
* - Eliminates duplicate extractTextFromStream() functions
*/
import { ProviderFactory } from './provider-factory.js';
import type { ProviderMessage, ContentBlock, ThinkingLevel } from '@automaker/types';
/**
* Options for simple query execution
*/
export interface SimpleQueryOptions {
/** The prompt to send to the AI (can be text or multi-part content) */
prompt: string | Array<{ type: string; text?: string; source?: object }>;
/** Model to use (with or without provider prefix) */
model?: string;
/** Working directory for the query */
cwd: string;
/** System prompt (combined with user prompt for some providers) */
systemPrompt?: string;
/** Maximum turns for agentic operations (default: 1) */
maxTurns?: number;
/** Tools to allow (default: [] for simple queries) */
allowedTools?: string[];
/** Abort controller for cancellation */
abortController?: AbortController;
/** Structured output format for JSON responses */
outputFormat?: {
type: 'json_schema';
schema: Record<string, unknown>;
};
/** Thinking level for Claude models */
thinkingLevel?: ThinkingLevel;
/** If true, runs in read-only mode (no file writes) */
readOnly?: boolean;
/** Setting sources for CLAUDE.md loading */
settingSources?: Array<'user' | 'project' | 'local'>;
}
/**
* Result from a simple query
*/
export interface SimpleQueryResult {
/** The accumulated text response */
text: string;
/** Structured output if outputFormat was specified and provider supports it */
structured_output?: Record<string, unknown>;
}
/**
* Options for streaming query execution
*/
export interface StreamingQueryOptions extends SimpleQueryOptions {
/** Callback for each text chunk received */
onText?: (text: string) => void;
/** Callback for tool use events */
onToolUse?: (tool: string, input: unknown) => void;
/** Callback for thinking blocks (if available) */
onThinking?: (thinking: string) => void;
}
/**
* Default model to use when none specified
*/
const DEFAULT_MODEL = 'claude-sonnet-4-20250514';
/**
* Execute a simple query and return the text result
*
* Use this for simple, non-streaming queries where you just need
* the final text response. For more complex use cases with progress
* callbacks, use streamingQuery() instead.
*
* @example
* ```typescript
* const result = await simpleQuery({
* prompt: 'Generate a title for: user authentication',
* cwd: process.cwd(),
* systemPrompt: 'You are a title generator...',
* maxTurns: 1,
* allowedTools: [],
* });
* console.log(result.text); // "Add user authentication"
* ```
*/
export async function simpleQuery(options: SimpleQueryOptions): Promise<SimpleQueryResult> {
const model = options.model || DEFAULT_MODEL;
const provider = ProviderFactory.getProviderForModel(model);
let responseText = '';
let structuredOutput: Record<string, unknown> | undefined;
// Build provider options
const providerOptions = {
prompt: options.prompt,
model: model,
cwd: options.cwd,
systemPrompt: options.systemPrompt,
maxTurns: options.maxTurns ?? 1,
allowedTools: options.allowedTools ?? [],
abortController: options.abortController,
outputFormat: options.outputFormat,
thinkingLevel: options.thinkingLevel,
readOnly: options.readOnly,
settingSources: options.settingSources,
};
for await (const msg of provider.executeQuery(providerOptions)) {
// Handle error messages
if (msg.type === 'error') {
const errorMessage = msg.error || 'Provider returned an error';
throw new Error(errorMessage);
}
// Extract text from assistant messages
if (msg.type === 'assistant' && msg.message?.content) {
for (const block of msg.message.content) {
if (block.type === 'text' && block.text) {
responseText += block.text;
}
}
}
// Handle result messages
if (msg.type === 'result') {
if (msg.subtype === 'success') {
// Use result text if longer than accumulated text
if (msg.result && msg.result.length > responseText.length) {
responseText = msg.result;
}
// Capture structured output if present
if (msg.structured_output) {
structuredOutput = msg.structured_output;
}
} else if (msg.subtype === 'error_max_turns') {
// Max turns reached - return what we have
break;
} else if (msg.subtype === 'error_max_structured_output_retries') {
throw new Error('Could not produce valid structured output after retries');
}
}
}
return { text: responseText, structured_output: structuredOutput };
}
/**
* Execute a streaming query with event callbacks
*
* Use this for queries where you need real-time progress updates,
* such as when displaying streaming output to a user.
*
* @example
* ```typescript
* const result = await streamingQuery({
* prompt: 'Analyze this project and suggest improvements',
* cwd: '/path/to/project',
* maxTurns: 250,
* allowedTools: ['Read', 'Glob', 'Grep'],
* onText: (text) => emitProgress(text),
* onToolUse: (tool, input) => emitToolUse(tool, input),
* });
* ```
*/
export async function streamingQuery(options: StreamingQueryOptions): Promise<SimpleQueryResult> {
const model = options.model || DEFAULT_MODEL;
const provider = ProviderFactory.getProviderForModel(model);
let responseText = '';
let structuredOutput: Record<string, unknown> | undefined;
// Build provider options
const providerOptions = {
prompt: options.prompt,
model: model,
cwd: options.cwd,
systemPrompt: options.systemPrompt,
maxTurns: options.maxTurns ?? 250,
allowedTools: options.allowedTools ?? ['Read', 'Glob', 'Grep'],
abortController: options.abortController,
outputFormat: options.outputFormat,
thinkingLevel: options.thinkingLevel,
readOnly: options.readOnly,
settingSources: options.settingSources,
};
for await (const msg of provider.executeQuery(providerOptions)) {
// Handle error messages
if (msg.type === 'error') {
const errorMessage = msg.error || 'Provider returned an error';
throw new Error(errorMessage);
}
// Extract content from assistant messages
if (msg.type === 'assistant' && msg.message?.content) {
for (const block of msg.message.content) {
if (block.type === 'text' && block.text) {
responseText += block.text;
options.onText?.(block.text);
} else if (block.type === 'tool_use' && block.name) {
options.onToolUse?.(block.name, block.input);
} else if (block.type === 'thinking' && block.thinking) {
options.onThinking?.(block.thinking);
}
}
}
// Handle result messages
if (msg.type === 'result') {
if (msg.subtype === 'success') {
// Use result text if longer than accumulated text
if (msg.result && msg.result.length > responseText.length) {
responseText = msg.result;
}
// Capture structured output if present
if (msg.structured_output) {
structuredOutput = msg.structured_output;
}
} else if (msg.subtype === 'error_max_turns') {
// Max turns reached - return what we have
break;
} else if (msg.subtype === 'error_max_structured_output_retries') {
throw new Error('Could not produce valid structured output after retries');
}
}
}
return { text: responseText, structured_output: structuredOutput };
}

View File

@@ -5,15 +5,12 @@
* (defaults to Sonnet for balanced speed and quality).
*/
import { query } from '@anthropic-ai/claude-agent-sdk';
import * as secureFs from '../../lib/secure-fs.js';
import type { EventEmitter } from '../../lib/events.js';
import { createLogger } from '@automaker/utils';
import { DEFAULT_PHASE_MODELS, isCursorModel, stripProviderPrefix } from '@automaker/types';
import { DEFAULT_PHASE_MODELS } from '@automaker/types';
import { resolvePhaseModel } from '@automaker/model-resolver';
import { createFeatureGenerationOptions } from '../../lib/sdk-options.js';
import { ProviderFactory } from '../../providers/provider-factory.js';
import { logAuthStatus } from './common.js';
import { streamingQuery } from '../../providers/simple-query-service.js';
import { parseAndCreateFeatures } from './parse-and-create-features.js';
import { getAppSpecPath } from '@automaker/platform';
import type { SettingsService } from '../../services/settings-service.js';
@@ -115,121 +112,30 @@ IMPORTANT: Do not ask for clarification. The specification is provided above. Ge
logger.info('Using model:', model);
let responseText = '';
let messageCount = 0;
// Use streamingQuery with event callbacks
const result = await streamingQuery({
prompt,
model,
cwd: projectPath,
maxTurns: 250,
allowedTools: ['Read', 'Glob', 'Grep'],
abortController,
thinkingLevel,
readOnly: true, // Feature generation only reads code, doesn't write
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
onText: (text) => {
logger.debug(`Feature text block received (${text.length} chars)`);
events.emit('spec-regeneration:event', {
type: 'spec_regeneration_progress',
content: text,
projectPath: projectPath,
});
},
});
// Route to appropriate provider based on model type
if (isCursorModel(model)) {
// Use Cursor provider for Cursor models
logger.info('[FeatureGeneration] Using Cursor provider');
const responseText = result.text;
const provider = ProviderFactory.getProviderForModel(model);
// Strip provider prefix - providers expect bare model IDs
const bareModel = stripProviderPrefix(model);
// Add explicit instructions for Cursor to return JSON in response
const cursorPrompt = `${prompt}
CRITICAL INSTRUCTIONS:
1. DO NOT write any files. Return the JSON in your response only.
2. Respond with ONLY a JSON object - no explanations, no markdown, just raw JSON.
3. Your entire response should be valid JSON starting with { and ending with }. No text before or after.`;
for await (const msg of provider.executeQuery({
prompt: cursorPrompt,
model: bareModel,
cwd: projectPath,
maxTurns: 250,
allowedTools: ['Read', 'Glob', 'Grep'],
abortController,
readOnly: true, // Feature generation only reads code, doesn't write
})) {
messageCount++;
if (msg.type === 'assistant' && msg.message?.content) {
for (const block of msg.message.content) {
if (block.type === 'text' && block.text) {
responseText += block.text;
logger.debug(`Feature text block received (${block.text.length} chars)`);
events.emit('spec-regeneration:event', {
type: 'spec_regeneration_progress',
content: block.text,
projectPath: projectPath,
});
}
}
} else if (msg.type === 'result' && msg.subtype === 'success' && msg.result) {
// Use result if it's a final accumulated message
if (msg.result.length > responseText.length) {
responseText = msg.result;
}
}
}
} else {
// Use Claude SDK for Claude models
logger.info('[FeatureGeneration] Using Claude SDK');
const options = createFeatureGenerationOptions({
cwd: projectPath,
abortController,
autoLoadClaudeMd,
model,
thinkingLevel, // Pass thinking level for extended thinking
});
logger.debug('SDK Options:', JSON.stringify(options, null, 2));
logger.info('Calling Claude Agent SDK query() for features...');
logAuthStatus('Right before SDK query() for features');
let stream;
try {
stream = query({ prompt, options });
logger.debug('query() returned stream successfully');
} catch (queryError) {
logger.error('❌ query() threw an exception:');
logger.error('Error:', queryError);
throw queryError;
}
logger.debug('Starting to iterate over feature stream...');
try {
for await (const msg of stream) {
messageCount++;
logger.debug(
`Feature stream message #${messageCount}:`,
JSON.stringify({ type: msg.type, subtype: (msg as any).subtype }, null, 2)
);
if (msg.type === 'assistant' && msg.message.content) {
for (const block of msg.message.content) {
if (block.type === 'text') {
responseText += block.text;
logger.debug(`Feature text block received (${block.text.length} chars)`);
events.emit('spec-regeneration:event', {
type: 'spec_regeneration_progress',
content: block.text,
projectPath: projectPath,
});
}
}
} else if (msg.type === 'result' && (msg as any).subtype === 'success') {
logger.debug('Received success result for features');
responseText = (msg as any).result || responseText;
} else if ((msg as { type: string }).type === 'error') {
logger.error('❌ Received error message from feature stream:');
logger.error('Error message:', JSON.stringify(msg, null, 2));
}
}
} catch (streamError) {
logger.error('❌ Error while iterating feature stream:');
logger.error('Stream error:', streamError);
throw streamError;
}
}
logger.info(`Feature stream complete. Total messages: ${messageCount}`);
logger.info(`Feature stream complete.`);
logger.info(`Feature response length: ${responseText.length} chars`);
logger.info('========== FULL RESPONSE TEXT ==========');
logger.info(responseText);

View File

@@ -5,8 +5,6 @@
* (defaults to Opus for high-quality specification generation).
*/
import { query } from '@anthropic-ai/claude-agent-sdk';
import path from 'path';
import * as secureFs from '../../lib/secure-fs.js';
import type { EventEmitter } from '../../lib/events.js';
import {
@@ -16,12 +14,10 @@ import {
type SpecOutput,
} from '../../lib/app-spec-format.js';
import { createLogger } from '@automaker/utils';
import { DEFAULT_PHASE_MODELS, isCursorModel, stripProviderPrefix } from '@automaker/types';
import { DEFAULT_PHASE_MODELS, isCursorModel } from '@automaker/types';
import { resolvePhaseModel } from '@automaker/model-resolver';
import { createSpecGenerationOptions } from '../../lib/sdk-options.js';
import { extractJson } from '../../lib/json-extractor.js';
import { ProviderFactory } from '../../providers/provider-factory.js';
import { logAuthStatus } from './common.js';
import { streamingQuery } from '../../providers/simple-query-service.js';
import { generateFeaturesFromSpec } from './generate-features-from-spec.js';
import { ensureAutomakerDir, getAppSpecPath } from '@automaker/platform';
import type { SettingsService } from '../../services/settings-service.js';
@@ -109,21 +105,15 @@ ${getStructuredSpecPromptInstruction()}`;
logger.info('Using model:', model);
let responseText = '';
let messageCount = 0;
let structuredOutput: SpecOutput | null = null;
// Route to appropriate provider based on model type
if (isCursorModel(model)) {
// Use Cursor provider for Cursor models
logger.info('[SpecGeneration] Using Cursor provider');
// Determine if we should use structured output (Claude supports it, Cursor doesn't)
const useStructuredOutput = !isCursorModel(model);
const provider = ProviderFactory.getProviderForModel(model);
// Strip provider prefix - providers expect bare model IDs
const bareModel = stripProviderPrefix(model);
// For Cursor, include the JSON schema in the prompt with clear instructions
// to return JSON in the response (not write to a file)
const cursorPrompt = `${prompt}
// Build the final prompt - for Cursor, include JSON schema instructions
let finalPrompt = prompt;
if (!useStructuredOutput) {
finalPrompt = `${prompt}
CRITICAL INSTRUCTIONS:
1. DO NOT write any files. DO NOT create any files like "project_specification.json".
@@ -133,153 +123,57 @@ CRITICAL INSTRUCTIONS:
${JSON.stringify(specOutputSchema, null, 2)}
Your entire response should be valid JSON starting with { and ending with }. No text before or after.`;
for await (const msg of provider.executeQuery({
prompt: cursorPrompt,
model: bareModel,
cwd: projectPath,
maxTurns: 250,
allowedTools: ['Read', 'Glob', 'Grep'],
abortController,
readOnly: true, // Spec generation only reads code, we write the spec ourselves
})) {
messageCount++;
if (msg.type === 'assistant' && msg.message?.content) {
for (const block of msg.message.content) {
if (block.type === 'text' && block.text) {
responseText += block.text;
logger.info(
`Text block received (${block.text.length} chars), total now: ${responseText.length} chars`
);
events.emit('spec-regeneration:event', {
type: 'spec_regeneration_progress',
content: block.text,
projectPath: projectPath,
});
} else if (block.type === 'tool_use') {
logger.info('Tool use:', block.name);
events.emit('spec-regeneration:event', {
type: 'spec_tool',
tool: block.name,
input: block.input,
});
}
}
} else if (msg.type === 'result' && msg.subtype === 'success' && msg.result) {
// Use result if it's a final accumulated message
if (msg.result.length > responseText.length) {
responseText = msg.result;
}
}
}
// Parse JSON from the response text using shared utility
if (responseText) {
structuredOutput = extractJson<SpecOutput>(responseText, { logger });
}
} else {
// Use Claude SDK for Claude models
logger.info('[SpecGeneration] Using Claude SDK');
const options = createSpecGenerationOptions({
cwd: projectPath,
abortController,
autoLoadClaudeMd,
model,
thinkingLevel, // Pass thinking level for extended thinking
outputFormat: {
type: 'json_schema',
schema: specOutputSchema,
},
});
logger.debug('SDK Options:', JSON.stringify(options, null, 2));
logger.info('Calling Claude Agent SDK query()...');
// Log auth status right before the SDK call
logAuthStatus('Right before SDK query()');
let stream;
try {
stream = query({ prompt, options });
logger.debug('query() returned stream successfully');
} catch (queryError) {
logger.error('❌ query() threw an exception:');
logger.error('Error:', queryError);
throw queryError;
}
logger.info('Starting to iterate over stream...');
try {
for await (const msg of stream) {
messageCount++;
logger.info(
`Stream message #${messageCount}: type=${msg.type}, subtype=${(msg as any).subtype}`
);
if (msg.type === 'assistant') {
const msgAny = msg as any;
if (msgAny.message?.content) {
for (const block of msgAny.message.content) {
if (block.type === 'text') {
responseText += block.text;
logger.info(
`Text block received (${block.text.length} chars), total now: ${responseText.length} chars`
);
events.emit('spec-regeneration:event', {
type: 'spec_regeneration_progress',
content: block.text,
projectPath: projectPath,
});
} else if (block.type === 'tool_use') {
logger.info('Tool use:', block.name);
events.emit('spec-regeneration:event', {
type: 'spec_tool',
tool: block.name,
input: block.input,
});
}
}
}
} else if (msg.type === 'result' && (msg as any).subtype === 'success') {
logger.info('Received success result');
// Check for structured output - this is the reliable way to get spec data
const resultMsg = msg as any;
if (resultMsg.structured_output) {
structuredOutput = resultMsg.structured_output as SpecOutput;
logger.info('✅ Received structured output');
logger.debug('Structured output:', JSON.stringify(structuredOutput, null, 2));
} else {
logger.warn('⚠️ No structured output in result, will fall back to text parsing');
}
} else if (msg.type === 'result') {
// Handle error result types
const subtype = (msg as any).subtype;
logger.info(`Result message: subtype=${subtype}`);
if (subtype === 'error_max_turns') {
logger.error('❌ Hit max turns limit!');
} else if (subtype === 'error_max_structured_output_retries') {
logger.error('❌ Failed to produce valid structured output after retries');
throw new Error('Could not produce valid spec output');
}
} else if ((msg as { type: string }).type === 'error') {
logger.error('❌ Received error message from stream:');
logger.error('Error message:', JSON.stringify(msg, null, 2));
} else if (msg.type === 'user') {
// Log user messages (tool results)
logger.info(`User message (tool result): ${JSON.stringify(msg).substring(0, 500)}`);
}
}
} catch (streamError) {
logger.error('❌ Error while iterating stream:');
logger.error('Stream error:', streamError);
throw streamError;
}
}
logger.info(`Stream iteration complete. Total messages: ${messageCount}`);
// Use streamingQuery with event callbacks
const result = await streamingQuery({
prompt: finalPrompt,
model,
cwd: projectPath,
maxTurns: 250,
allowedTools: ['Read', 'Glob', 'Grep'],
abortController,
thinkingLevel,
readOnly: true, // Spec generation only reads code, we write the spec ourselves
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
outputFormat: useStructuredOutput
? {
type: 'json_schema',
schema: specOutputSchema,
}
: undefined,
onText: (text) => {
responseText += text;
logger.info(
`Text block received (${text.length} chars), total now: ${responseText.length} chars`
);
events.emit('spec-regeneration:event', {
type: 'spec_regeneration_progress',
content: text,
projectPath: projectPath,
});
},
onToolUse: (tool, input) => {
logger.info('Tool use:', tool);
events.emit('spec-regeneration:event', {
type: 'spec_tool',
tool,
input,
});
},
});
// Get structured output if available
if (result.structured_output) {
structuredOutput = result.structured_output as unknown as SpecOutput;
logger.info('✅ Received structured output');
logger.debug('Structured output:', JSON.stringify(structuredOutput, null, 2));
} else if (!useStructuredOutput && responseText) {
// For non-Claude providers, parse JSON from response text
structuredOutput = extractJson<SpecOutput>(responseText, { logger });
}
logger.info(`Stream iteration complete.`);
logger.info(`Response text length: ${responseText.length} chars`);
// Determine XML content to save

View File

@@ -11,13 +11,11 @@
*/
import type { Request, Response } from 'express';
import { query } from '@anthropic-ai/claude-agent-sdk';
import { createLogger } from '@automaker/utils';
import { DEFAULT_PHASE_MODELS, isCursorModel, stripProviderPrefix } from '@automaker/types';
import { DEFAULT_PHASE_MODELS } from '@automaker/types';
import { PathNotAllowedError } from '@automaker/platform';
import { resolvePhaseModel } from '@automaker/model-resolver';
import { createCustomOptions } from '../../../lib/sdk-options.js';
import { ProviderFactory } from '../../../providers/provider-factory.js';
import { simpleQuery } from '../../../providers/simple-query-service.js';
import * as secureFs from '../../../lib/secure-fs.js';
import * as path from 'path';
import type { SettingsService } from '../../../services/settings-service.js';
@@ -49,31 +47,6 @@ interface DescribeFileErrorResponse {
error: string;
}
/**
* Extract text content from Claude SDK response messages
*/
async function extractTextFromStream(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
stream: AsyncIterable<any>
): Promise<string> {
let responseText = '';
for await (const msg of stream) {
if (msg.type === 'assistant' && msg.message?.content) {
const blocks = msg.message.content as Array<{ type: string; text?: string }>;
for (const block of blocks) {
if (block.type === 'text' && block.text) {
responseText += block.text;
}
}
} else if (msg.type === 'result' && msg.subtype === 'success') {
responseText = msg.result || responseText;
}
}
return responseText;
}
/**
* Create the describe-file request handler
*
@@ -159,16 +132,14 @@ export function createDescribeFileHandler(
// Build prompt with file content passed as structured data
// The file content is included directly, not via tool invocation
const instructionText = `Analyze the following file and provide a 1-2 sentence description suitable for use as context in an AI coding assistant. Focus on what the file contains, its purpose, and why an AI agent might want to use this context in the future (e.g., "API documentation for the authentication endpoints", "Configuration file for database connections", "Coding style guidelines for the project").
const prompt = `Analyze the following file and provide a 1-2 sentence description suitable for use as context in an AI coding assistant. Focus on what the file contains, its purpose, and why an AI agent might want to use this context in the future (e.g., "API documentation for the authentication endpoints", "Configuration file for database connections", "Coding style guidelines for the project").
Respond with ONLY the description text, no additional formatting, preamble, or explanation.
File: ${fileName}${truncated ? ' (truncated)' : ''}`;
File: ${fileName}${truncated ? ' (truncated)' : ''}
const promptContent = [
{ type: 'text' as const, text: instructionText },
{ type: 'text' as const, text: `\n\n--- FILE CONTENT ---\n${contentToAnalyze}` },
];
--- FILE CONTENT ---
${contentToAnalyze}`;
// Use the file's directory as the working directory
const cwd = path.dirname(resolvedPath);
@@ -190,67 +161,19 @@ File: ${fileName}${truncated ? ' (truncated)' : ''}`;
logger.info(`Resolved model: ${model}, thinkingLevel: ${thinkingLevel}`);
let description: string;
// Use simpleQuery - provider abstraction handles routing to correct provider
const result = await simpleQuery({
prompt,
model,
cwd,
maxTurns: 1,
allowedTools: [],
thinkingLevel,
readOnly: true, // File description only reads, doesn't write
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
});
// Route to appropriate provider based on model type
if (isCursorModel(model)) {
// Use Cursor provider for Cursor models
logger.info(`Using Cursor provider for model: ${model}`);
const provider = ProviderFactory.getProviderForModel(model);
// Strip provider prefix - providers expect bare model IDs
const bareModel = stripProviderPrefix(model);
// Build a simple text prompt for Cursor (no multi-part content blocks)
const cursorPrompt = `${instructionText}\n\n--- FILE CONTENT ---\n${contentToAnalyze}`;
let responseText = '';
for await (const msg of provider.executeQuery({
prompt: cursorPrompt,
model: bareModel,
cwd,
maxTurns: 1,
allowedTools: [],
readOnly: true, // File description only reads, doesn't write
})) {
if (msg.type === 'assistant' && msg.message?.content) {
for (const block of msg.message.content) {
if (block.type === 'text' && block.text) {
responseText += block.text;
}
}
}
}
description = responseText;
} else {
// Use Claude SDK for Claude models
logger.info(`Using Claude SDK for model: ${model}`);
// Use centralized SDK options with proper cwd validation
// No tools needed since we're passing file content directly
const sdkOptions = createCustomOptions({
cwd,
model,
maxTurns: 1,
allowedTools: [],
autoLoadClaudeMd,
thinkingLevel, // Pass thinking level for extended thinking
});
const promptGenerator = (async function* () {
yield {
type: 'user' as const,
session_id: '',
message: { role: 'user' as const, content: promptContent },
parent_tool_use_id: null,
};
})();
const stream = query({ prompt: promptGenerator, options: sdkOptions });
// Extract the description from the response
description = await extractTextFromStream(stream);
}
const description = result.text;
if (!description || description.trim().length === 0) {
logger.warn('Received empty response from Claude');

View File

@@ -12,12 +12,10 @@
*/
import type { Request, Response } from 'express';
import { query } from '@anthropic-ai/claude-agent-sdk';
import { createLogger, readImageAsBase64 } from '@automaker/utils';
import { DEFAULT_PHASE_MODELS, isCursorModel, stripProviderPrefix } from '@automaker/types';
import { DEFAULT_PHASE_MODELS, isCursorModel } from '@automaker/types';
import { resolvePhaseModel } from '@automaker/model-resolver';
import { createCustomOptions } from '../../../lib/sdk-options.js';
import { ProviderFactory } from '../../../providers/provider-factory.js';
import { simpleQuery } from '../../../providers/simple-query-service.js';
import * as secureFs from '../../../lib/secure-fs.js';
import * as path from 'path';
import type { SettingsService } from '../../../services/settings-service.js';
@@ -178,57 +176,10 @@ function mapDescribeImageError(rawMessage: string | undefined): {
return baseResponse;
}
/**
* Extract text content from Claude SDK response messages and log high-signal stream events.
*/
async function extractTextFromStream(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
stream: AsyncIterable<any>,
requestId: string
): Promise<string> {
let responseText = '';
let messageCount = 0;
logger.info(`[${requestId}] [Stream] Begin reading SDK stream...`);
for await (const msg of stream) {
messageCount++;
const msgType = msg?.type;
const msgSubtype = msg?.subtype;
// Keep this concise but informative. Full error object is logged in catch blocks.
logger.info(
`[${requestId}] [Stream] #${messageCount} type=${String(msgType)} subtype=${String(msgSubtype ?? '')}`
);
if (msgType === 'assistant' && msg.message?.content) {
const blocks = msg.message.content as Array<{ type: string; text?: string }>;
logger.info(`[${requestId}] [Stream] assistant blocks=${blocks.length}`);
for (const block of blocks) {
if (block.type === 'text' && block.text) {
responseText += block.text;
}
}
}
if (msgType === 'result' && msgSubtype === 'success') {
if (typeof msg.result === 'string' && msg.result.length > 0) {
responseText = msg.result;
}
}
}
logger.info(
`[${requestId}] [Stream] End of stream. messages=${messageCount} textLength=${responseText.length}`
);
return responseText;
}
/**
* Create the describe-image request handler
*
* Uses Claude SDK query with multi-part content blocks to include the image (base64),
* Uses the provider abstraction with multi-part content blocks to include the image (base64),
* matching the agent runner behavior.
*
* @param settingsService - Optional settings service for loading autoLoadClaudeMd setting
@@ -309,27 +260,6 @@ export function createDescribeImageHandler(
`[${requestId}] image meta filename=${imageData.filename} mime=${imageData.mimeType} base64Len=${base64Length} estBytes=${estimatedBytes}`
);
// Build multi-part prompt with image block (no Read tool required)
const instructionText =
`Describe this image in 1-2 sentences suitable for use as context in an AI coding assistant. ` +
`Focus on what the image shows and its purpose (e.g., "UI mockup showing login form with email/password fields", ` +
`"Architecture diagram of microservices", "Screenshot of error message in terminal").\n\n` +
`Respond with ONLY the description text, no additional formatting, preamble, or explanation.`;
const promptContent = [
{ type: 'text' as const, text: instructionText },
{
type: 'image' as const,
source: {
type: 'base64' as const,
media_type: imageData.mimeType,
data: imageData.base64,
},
},
];
logger.info(`[${requestId}] Built multi-part prompt blocks=${promptContent.length}`);
const cwd = path.dirname(actualPath);
logger.info(`[${requestId}] Using cwd=${cwd}`);
@@ -348,85 +278,59 @@ export function createDescribeImageHandler(
logger.info(`[${requestId}] Using model: ${model}`);
let description: string;
// Build the instruction text
const instructionText =
`Describe this image in 1-2 sentences suitable for use as context in an AI coding assistant. ` +
`Focus on what the image shows and its purpose (e.g., "UI mockup showing login form with email/password fields", ` +
`"Architecture diagram of microservices", "Screenshot of error message in terminal").\n\n` +
`Respond with ONLY the description text, no additional formatting, preamble, or explanation.`;
// Build prompt based on provider capability
// Some providers (like Cursor) may not support image content blocks
let prompt: string | Array<{ type: string; text?: string; source?: object }>;
// Route to appropriate provider based on model type
if (isCursorModel(model)) {
// Use Cursor provider for Cursor models
// Note: Cursor may have limited support for image content blocks
logger.info(`[${requestId}] Using Cursor provider for model: ${model}`);
const provider = ProviderFactory.getProviderForModel(model);
// Strip provider prefix - providers expect bare model IDs
const bareModel = stripProviderPrefix(model);
// Build prompt with image reference for Cursor
// Note: Cursor CLI may not support base64 image blocks directly,
// so we include the image path as context
const cursorPrompt = `${instructionText}\n\nImage file: ${actualPath}\nMIME type: ${imageData.mimeType}`;
let responseText = '';
const queryStart = Date.now();
for await (const msg of provider.executeQuery({
prompt: cursorPrompt,
model: bareModel,
cwd,
maxTurns: 1,
allowedTools: ['Read'], // Allow Read tool so Cursor can read the image if needed
readOnly: true, // Image description only reads, doesn't write
})) {
if (msg.type === 'assistant' && msg.message?.content) {
for (const block of msg.message.content) {
if (block.type === 'text' && block.text) {
responseText += block.text;
}
}
}
}
logger.info(`[${requestId}] Cursor query completed in ${Date.now() - queryStart}ms`);
description = responseText;
// Cursor may not support base64 image blocks directly
// Use text prompt with image path reference
logger.info(`[${requestId}] Using text prompt for Cursor model`);
prompt = `${instructionText}\n\nImage file: ${actualPath}\nMIME type: ${imageData.mimeType}`;
} else {
// Use Claude SDK for Claude models (supports image content blocks)
logger.info(`[${requestId}] Using Claude SDK for model: ${model}`);
// Use the same centralized option builder used across the server (validates cwd)
const sdkOptions = createCustomOptions({
cwd,
model,
maxTurns: 1,
allowedTools: [],
autoLoadClaudeMd,
thinkingLevel, // Pass thinking level for extended thinking
});
logger.info(
`[${requestId}] SDK options model=${sdkOptions.model} maxTurns=${sdkOptions.maxTurns} allowedTools=${JSON.stringify(
sdkOptions.allowedTools
)}`
);
const promptGenerator = (async function* () {
yield {
type: 'user' as const,
session_id: '',
message: { role: 'user' as const, content: promptContent },
parent_tool_use_id: null,
};
})();
logger.info(`[${requestId}] Calling query()...`);
const queryStart = Date.now();
const stream = query({ prompt: promptGenerator, options: sdkOptions });
logger.info(`[${requestId}] query() returned stream in ${Date.now() - queryStart}ms`);
// Extract the description from the response
const extractStart = Date.now();
description = await extractTextFromStream(stream, requestId);
logger.info(`[${requestId}] extractMs=${Date.now() - extractStart}`);
// Claude and other vision-capable models support multi-part prompts with images
logger.info(`[${requestId}] Using multi-part prompt with image block`);
prompt = [
{ type: 'text', text: instructionText },
{
type: 'image',
source: {
type: 'base64',
media_type: imageData.mimeType,
data: imageData.base64,
},
},
];
}
logger.info(`[${requestId}] Calling simpleQuery...`);
const queryStart = Date.now();
// Use simpleQuery - provider abstraction handles routing
const result = await simpleQuery({
prompt,
model,
cwd,
maxTurns: 1,
allowedTools: isCursorModel(model) ? ['Read'] : [], // Allow Read for Cursor to read image if needed
thinkingLevel,
readOnly: true, // Image description only reads, doesn't write
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
});
logger.info(`[${requestId}] simpleQuery completed in ${Date.now() - queryStart}ms`);
const description = result.text;
if (!description || description.trim().length === 0) {
logger.warn(`[${requestId}] Received empty response from Claude`);
logger.warn(`[${requestId}] Received empty response from AI`);
const response: DescribeImageErrorResponse = {
success: false,
error: 'Failed to generate description - empty response',

View File

@@ -1,23 +1,16 @@
/**
* POST /enhance-prompt endpoint - Enhance user input text
*
* Uses Claude AI or Cursor to enhance text based on the specified enhancement mode.
* Supports modes: improve, technical, simplify, acceptance
* Uses the provider abstraction to enhance text based on the specified
* enhancement mode. Works with any configured provider (Claude, Cursor, etc.).
* Supports modes: improve, technical, simplify, acceptance, ux-reviewer
*/
import type { Request, Response } from 'express';
import { query } from '@anthropic-ai/claude-agent-sdk';
import { createLogger } from '@automaker/utils';
import { resolveModelString } from '@automaker/model-resolver';
import {
CLAUDE_MODEL_MAP,
isCursorModel,
isOpencodeModel,
stripProviderPrefix,
ThinkingLevel,
getThinkingTokenBudget,
} from '@automaker/types';
import { ProviderFactory } from '../../../providers/provider-factory.js';
import { CLAUDE_MODEL_MAP, type ThinkingLevel } from '@automaker/types';
import { simpleQuery } from '../../../providers/simple-query-service.js';
import type { SettingsService } from '../../../services/settings-service.js';
import { getPromptCustomization } from '../../../lib/settings-helpers.js';
import {
@@ -38,7 +31,7 @@ interface EnhanceRequestBody {
enhancementMode: string;
/** Optional model override */
model?: string;
/** Optional thinking level for Claude models (ignored for Cursor models) */
/** Optional thinking level for Claude models */
thinkingLevel?: ThinkingLevel;
}
@@ -58,80 +51,6 @@ interface EnhanceErrorResponse {
error: string;
}
/**
* Extract text content from Claude SDK response messages
*
* @param stream - The async iterable from the query function
* @returns The extracted text content
*/
async function extractTextFromStream(
stream: AsyncIterable<{
type: string;
subtype?: string;
result?: string;
message?: {
content?: Array<{ type: string; text?: string }>;
};
}>
): Promise<string> {
let responseText = '';
for await (const msg of stream) {
if (msg.type === 'assistant' && msg.message?.content) {
for (const block of msg.message.content) {
if (block.type === 'text' && block.text) {
responseText += block.text;
}
}
} else if (msg.type === 'result' && msg.subtype === 'success') {
responseText = msg.result || responseText;
}
}
return responseText;
}
/**
* Execute enhancement using a provider (Cursor, OpenCode, etc.)
*
* @param prompt - The enhancement prompt
* @param model - The model to use
* @returns The enhanced text
*/
async function executeWithProvider(prompt: string, model: string): Promise<string> {
const provider = ProviderFactory.getProviderForModel(model);
// Strip provider prefix - providers expect bare model IDs
const bareModel = stripProviderPrefix(model);
let responseText = '';
for await (const msg of provider.executeQuery({
prompt,
model: bareModel,
cwd: process.cwd(), // Enhancement doesn't need a specific working directory
readOnly: true, // Prompt enhancement only generates text, doesn't write files
})) {
if (msg.type === 'error') {
// Throw error with the message from the provider
const errorMessage = msg.error || 'Provider returned an error';
throw new Error(errorMessage);
} else if (msg.type === 'assistant' && msg.message?.content) {
for (const block of msg.message.content) {
if (block.type === 'text' && block.text) {
responseText += block.text;
}
}
} else if (msg.type === 'result' && msg.subtype === 'success' && msg.result) {
// Use result if it's a final accumulated message
if (msg.result.length > responseText.length) {
responseText = msg.result;
}
}
}
return responseText;
}
/**
* Create the enhance request handler
*
@@ -200,7 +119,6 @@ export function createEnhanceHandler(
logger.debug(`Using ${validMode} system prompt (length: ${systemPrompt.length} chars)`);
// Build the user prompt with few-shot examples
// This helps the model understand this is text transformation, not a coding task
const userPrompt = buildUserPrompt(validMode, trimmedText, true);
// Resolve the model - use the passed model, default to sonnet for quality
@@ -208,47 +126,20 @@ export function createEnhanceHandler(
logger.debug(`Using model: ${resolvedModel}`);
let enhancedText: string;
// Use simpleQuery - provider abstraction handles routing to correct provider
// The system prompt is combined with user prompt since some providers
// don't have a separate system prompt concept
const result = await simpleQuery({
prompt: `${systemPrompt}\n\n${userPrompt}`,
model: resolvedModel,
cwd: process.cwd(), // Enhancement doesn't need a specific working directory
maxTurns: 1,
allowedTools: [],
thinkingLevel,
readOnly: true, // Prompt enhancement only generates text, doesn't write files
});
// Route to appropriate provider based on model
if (isCursorModel(resolvedModel)) {
// Use Cursor provider for Cursor models
logger.info(`Using Cursor provider for model: ${resolvedModel}`);
// Cursor doesn't have a separate system prompt concept, so combine them
const combinedPrompt = `${systemPrompt}\n\n${userPrompt}`;
enhancedText = await executeWithProvider(combinedPrompt, resolvedModel);
} else if (isOpencodeModel(resolvedModel)) {
// Use OpenCode provider for OpenCode models (static and dynamic)
logger.info(`Using OpenCode provider for model: ${resolvedModel}`);
// OpenCode CLI handles the system prompt, so combine them
const combinedPrompt = `${systemPrompt}\n\n${userPrompt}`;
enhancedText = await executeWithProvider(combinedPrompt, resolvedModel);
} else {
// Use Claude SDK for Claude models
logger.info(`Using Claude provider for model: ${resolvedModel}`);
// Convert thinkingLevel to maxThinkingTokens for SDK
const maxThinkingTokens = getThinkingTokenBudget(thinkingLevel);
const queryOptions: Parameters<typeof query>[0]['options'] = {
model: resolvedModel,
systemPrompt,
maxTurns: 1,
allowedTools: [],
permissionMode: 'acceptEdits',
};
if (maxThinkingTokens) {
queryOptions.maxThinkingTokens = maxThinkingTokens;
}
const stream = query({
prompt: userPrompt,
options: queryOptions,
});
enhancedText = await extractTextFromStream(stream);
}
const enhancedText = result.text;
if (!enhancedText || enhancedText.trim().length === 0) {
logger.warn('Received empty response from AI');

View File

@@ -1,13 +1,14 @@
/**
* POST /features/generate-title endpoint - Generate a concise title from description
*
* Uses Claude Haiku to generate a short, descriptive title from feature description.
* Uses the provider abstraction to generate a short, descriptive title
* from a feature description. Works with any configured provider (Claude, Cursor, etc.).
*/
import type { Request, Response } from 'express';
import { query } from '@anthropic-ai/claude-agent-sdk';
import { createLogger } from '@automaker/utils';
import { CLAUDE_MODEL_MAP } from '@automaker/model-resolver';
import { simpleQuery } from '../../../providers/simple-query-service.js';
const logger = createLogger('GenerateTitle');
@@ -34,33 +35,6 @@ Rules:
- No quotes, periods, or extra formatting
- Capture the essence of the feature in a scannable way`;
async function extractTextFromStream(
stream: AsyncIterable<{
type: string;
subtype?: string;
result?: string;
message?: {
content?: Array<{ type: string; text?: string }>;
};
}>
): Promise<string> {
let responseText = '';
for await (const msg of stream) {
if (msg.type === 'assistant' && msg.message?.content) {
for (const block of msg.message.content) {
if (block.type === 'text' && block.text) {
responseText += block.text;
}
}
} else if (msg.type === 'result' && msg.subtype === 'success') {
responseText = msg.result || responseText;
}
}
return responseText;
}
export function createGenerateTitleHandler(): (req: Request, res: Response) => Promise<void> {
return async (req: Request, res: Response): Promise<void> => {
try {
@@ -89,21 +63,19 @@ export function createGenerateTitleHandler(): (req: Request, res: Response) => P
const userPrompt = `Generate a concise title for this feature:\n\n${trimmedDescription}`;
const stream = query({
prompt: userPrompt,
options: {
model: CLAUDE_MODEL_MAP.haiku,
systemPrompt: SYSTEM_PROMPT,
maxTurns: 1,
allowedTools: [],
permissionMode: 'default',
},
// Use simpleQuery - provider abstraction handles all the streaming/extraction
const result = await simpleQuery({
prompt: `${SYSTEM_PROMPT}\n\n${userPrompt}`,
model: CLAUDE_MODEL_MAP.haiku,
cwd: process.cwd(),
maxTurns: 1,
allowedTools: [],
});
const title = await extractTextFromStream(stream);
const title = result.text;
if (!title || title.trim().length === 0) {
logger.warn('Received empty response from Claude');
logger.warn('Received empty response from AI');
const response: GenerateTitleErrorResponse = {
success: false,
error: 'Failed to generate title - empty response',

View File

@@ -1,5 +1,5 @@
/**
* POST /validate-issue endpoint - Validate a GitHub issue using Claude SDK or Cursor (async)
* POST /validate-issue endpoint - Validate a GitHub issue using provider abstraction (async)
*
* Scans the codebase to determine if an issue is valid, invalid, or needs clarification.
* Runs asynchronously and emits events for progress and completion.
@@ -7,7 +7,6 @@
*/
import type { Request, Response } from 'express';
import { query } from '@anthropic-ai/claude-agent-sdk';
import type { EventEmitter } from '../../../lib/events.js';
import type {
IssueValidationResult,
@@ -18,12 +17,11 @@ import type {
LinkedPRInfo,
ThinkingLevel,
} from '@automaker/types';
import { isCursorModel, DEFAULT_PHASE_MODELS, stripProviderPrefix } from '@automaker/types';
import { isCursorModel, DEFAULT_PHASE_MODELS } from '@automaker/types';
import { resolvePhaseModel } from '@automaker/model-resolver';
import { createSuggestionsOptions } from '../../../lib/sdk-options.js';
import { extractJson } from '../../../lib/json-extractor.js';
import { writeValidation } from '../../../lib/validation-storage.js';
import { ProviderFactory } from '../../../providers/provider-factory.js';
import { streamingQuery } from '../../../providers/simple-query-service.js';
import {
issueValidationSchema,
ISSUE_VALIDATION_SYSTEM_PROMPT,
@@ -102,7 +100,7 @@ async function runValidation(
try {
// Build the prompt (include comments and linked PRs if provided)
const prompt = buildValidationPrompt(
const basePrompt = buildValidationPrompt(
issueNumber,
issueTitle,
issueBody,
@@ -111,20 +109,15 @@ async function runValidation(
linkedPRs
);
let validationResult: IssueValidationResult | null = null;
let responseText = '';
// Route to appropriate provider based on model
if (isCursorModel(model)) {
// Use Cursor provider for Cursor models
logger.info(`Using Cursor provider for validation with model: ${model}`);
// Determine if we should use structured output (Claude supports it, Cursor doesn't)
const useStructuredOutput = !isCursorModel(model);
const provider = ProviderFactory.getProviderForModel(model);
// Strip provider prefix - providers expect bare model IDs
const bareModel = stripProviderPrefix(model);
// For Cursor, include the system prompt and schema in the user prompt
const cursorPrompt = `${ISSUE_VALIDATION_SYSTEM_PROMPT}
// Build the final prompt - for Cursor, include system prompt and JSON schema instructions
let finalPrompt = basePrompt;
if (!useStructuredOutput) {
finalPrompt = `${ISSUE_VALIDATION_SYSTEM_PROMPT}
CRITICAL INSTRUCTIONS:
1. DO NOT write any files. Return the JSON in your response only.
@@ -135,121 +128,71 @@ ${JSON.stringify(issueValidationSchema, null, 2)}
Your entire response should be valid JSON starting with { and ending with }. No text before or after.
${prompt}`;
for await (const msg of provider.executeQuery({
prompt: cursorPrompt,
model: bareModel,
cwd: projectPath,
readOnly: true, // Issue validation only reads code, doesn't write
})) {
if (msg.type === 'assistant' && msg.message?.content) {
for (const block of msg.message.content) {
if (block.type === 'text' && block.text) {
responseText += block.text;
// Emit progress event
const progressEvent: IssueValidationEvent = {
type: 'issue_validation_progress',
issueNumber,
content: block.text,
projectPath,
};
events.emit('issue-validation:event', progressEvent);
}
}
} else if (msg.type === 'result' && msg.subtype === 'success' && msg.result) {
// Use result if it's a final accumulated message
if (msg.result.length > responseText.length) {
responseText = msg.result;
}
}
}
// Parse JSON from the response text using shared utility
if (responseText) {
validationResult = extractJson<IssueValidationResult>(responseText, { logger });
}
} else {
// Use Claude SDK for Claude models
logger.info(`Using Claude provider for validation with model: ${model}`);
// Load autoLoadClaudeMd setting
const autoLoadClaudeMd = await getAutoLoadClaudeMdSetting(
projectPath,
settingsService,
'[ValidateIssue]'
);
// Use thinkingLevel from request if provided, otherwise fall back to settings
let effectiveThinkingLevel: ThinkingLevel | undefined = thinkingLevel;
if (!effectiveThinkingLevel) {
const settings = await settingsService?.getGlobalSettings();
const phaseModelEntry =
settings?.phaseModels?.validationModel || DEFAULT_PHASE_MODELS.validationModel;
const resolved = resolvePhaseModel(phaseModelEntry);
effectiveThinkingLevel = resolved.thinkingLevel;
}
// Create SDK options with structured output and abort controller
const options = createSuggestionsOptions({
cwd: projectPath,
model: model as ModelAlias,
systemPrompt: ISSUE_VALIDATION_SYSTEM_PROMPT,
abortController,
autoLoadClaudeMd,
thinkingLevel: effectiveThinkingLevel,
outputFormat: {
type: 'json_schema',
schema: issueValidationSchema as Record<string, unknown>,
},
});
// Execute the query
const stream = query({ prompt, options });
for await (const msg of stream) {
// Collect assistant text for debugging and emit progress
if (msg.type === 'assistant' && msg.message?.content) {
for (const block of msg.message.content) {
if (block.type === 'text') {
responseText += block.text;
// Emit progress event
const progressEvent: IssueValidationEvent = {
type: 'issue_validation_progress',
issueNumber,
content: block.text,
projectPath,
};
events.emit('issue-validation:event', progressEvent);
}
}
}
// Extract structured output on success
if (msg.type === 'result' && msg.subtype === 'success') {
const resultMsg = msg as { structured_output?: IssueValidationResult };
if (resultMsg.structured_output) {
validationResult = resultMsg.structured_output;
logger.debug('Received structured output:', validationResult);
}
}
// Handle errors
if (msg.type === 'result') {
const resultMsg = msg as { subtype?: string };
if (resultMsg.subtype === 'error_max_structured_output_retries') {
logger.error('Failed to produce valid structured output after retries');
throw new Error('Could not produce valid validation output');
}
}
}
${basePrompt}`;
}
// Load autoLoadClaudeMd setting
const autoLoadClaudeMd = await getAutoLoadClaudeMdSetting(
projectPath,
settingsService,
'[ValidateIssue]'
);
// Use thinkingLevel from request if provided, otherwise fall back to settings
let effectiveThinkingLevel: ThinkingLevel | undefined = thinkingLevel;
if (!effectiveThinkingLevel) {
const settings = await settingsService?.getGlobalSettings();
const phaseModelEntry =
settings?.phaseModels?.validationModel || DEFAULT_PHASE_MODELS.validationModel;
const resolved = resolvePhaseModel(phaseModelEntry);
effectiveThinkingLevel = resolved.thinkingLevel;
}
logger.info(`Using model: ${model}`);
// Use streamingQuery with event callbacks
const result = await streamingQuery({
prompt: finalPrompt,
model: model as string,
cwd: projectPath,
systemPrompt: useStructuredOutput ? ISSUE_VALIDATION_SYSTEM_PROMPT : undefined,
abortController,
thinkingLevel: effectiveThinkingLevel,
readOnly: true, // Issue validation only reads code, doesn't write
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
outputFormat: useStructuredOutput
? {
type: 'json_schema',
schema: issueValidationSchema as Record<string, unknown>,
}
: undefined,
onText: (text) => {
responseText += text;
// Emit progress event
const progressEvent: IssueValidationEvent = {
type: 'issue_validation_progress',
issueNumber,
content: text,
projectPath,
};
events.emit('issue-validation:event', progressEvent);
},
});
// Clear timeout
clearTimeout(timeoutId);
// Get validation result from structured output or parse from text
let validationResult: IssueValidationResult | null = null;
if (result.structured_output) {
validationResult = result.structured_output as unknown as IssueValidationResult;
logger.debug('Received structured output:', validationResult);
} else if (responseText) {
// Parse JSON from response text
validationResult = extractJson<IssueValidationResult>(responseText, { logger });
}
// Require validation result
if (!validationResult) {
logger.error('No validation result received from AI provider');
@@ -299,7 +242,7 @@ ${prompt}`;
/**
* Creates the handler for validating GitHub issues against the codebase.
*
* Uses Claude SDK with:
* Uses the provider abstraction with:
* - Read-only tools (Read, Glob, Grep) for codebase analysis
* - JSON schema structured output for reliable parsing
* - System prompt guiding the validation process

View File

@@ -5,19 +5,12 @@
* (AI Suggestions in the UI). Supports both Claude and Cursor models.
*/
import { query } from '@anthropic-ai/claude-agent-sdk';
import type { EventEmitter } from '../../lib/events.js';
import { createLogger } from '@automaker/utils';
import {
DEFAULT_PHASE_MODELS,
isCursorModel,
stripProviderPrefix,
type ThinkingLevel,
} from '@automaker/types';
import { DEFAULT_PHASE_MODELS, isCursorModel, type ThinkingLevel } from '@automaker/types';
import { resolvePhaseModel } from '@automaker/model-resolver';
import { createSuggestionsOptions } from '../../lib/sdk-options.js';
import { extractJsonWithArray } from '../../lib/json-extractor.js';
import { ProviderFactory } from '../../providers/provider-factory.js';
import { streamingQuery } from '../../providers/simple-query-service.js';
import { FeatureLoader } from '../../services/feature-loader.js';
import { getAppSpecPath } from '@automaker/platform';
import * as secureFs from '../../lib/secure-fs.js';
@@ -204,19 +197,14 @@ The response will be automatically formatted as structured JSON.`;
logger.info('[Suggestions] Using model:', model);
let responseText = '';
let structuredOutput: { suggestions: Array<Record<string, unknown>> } | null = null;
// Route to appropriate provider based on model type
if (isCursorModel(model)) {
// Use Cursor provider for Cursor models
logger.info('[Suggestions] Using Cursor provider');
// Determine if we should use structured output (Claude supports it, Cursor doesn't)
const useStructuredOutput = !isCursorModel(model);
const provider = ProviderFactory.getProviderForModel(model);
// Strip provider prefix - providers expect bare model IDs
const bareModel = stripProviderPrefix(model);
// For Cursor, include the JSON schema in the prompt with clear instructions
const cursorPrompt = `${prompt}
// Build the final prompt - for Cursor, include JSON schema instructions
let finalPrompt = prompt;
if (!useStructuredOutput) {
finalPrompt = `${prompt}
CRITICAL INSTRUCTIONS:
1. DO NOT write any files. Return the JSON in your response only.
@@ -226,104 +214,60 @@ CRITICAL INSTRUCTIONS:
${JSON.stringify(suggestionsSchema, null, 2)}
Your entire response should be valid JSON starting with { and ending with }. No text before or after.`;
for await (const msg of provider.executeQuery({
prompt: cursorPrompt,
model: bareModel,
cwd: projectPath,
maxTurns: 250,
allowedTools: ['Read', 'Glob', 'Grep'],
abortController,
readOnly: true, // Suggestions only reads code, doesn't write
})) {
if (msg.type === 'assistant' && msg.message?.content) {
for (const block of msg.message.content) {
if (block.type === 'text' && block.text) {
responseText += block.text;
events.emit('suggestions:event', {
type: 'suggestions_progress',
content: block.text,
});
} else if (block.type === 'tool_use') {
events.emit('suggestions:event', {
type: 'suggestions_tool',
tool: block.name,
input: block.input,
});
}
}
} else if (msg.type === 'result' && msg.subtype === 'success' && msg.result) {
// Use result if it's a final accumulated message (from Cursor provider)
logger.info('[Suggestions] Received result from Cursor, length:', msg.result.length);
logger.info('[Suggestions] Previous responseText length:', responseText.length);
if (msg.result.length > responseText.length) {
logger.info('[Suggestions] Using Cursor result (longer than accumulated text)');
responseText = msg.result;
} else {
logger.info('[Suggestions] Keeping accumulated text (longer than Cursor result)');
}
}
}
} else {
// Use Claude SDK for Claude models
logger.info('[Suggestions] Using Claude SDK');
const options = createSuggestionsOptions({
cwd: projectPath,
abortController,
autoLoadClaudeMd,
model, // Pass the model from settings
thinkingLevel, // Pass thinking level for extended thinking
outputFormat: {
type: 'json_schema',
schema: suggestionsSchema,
},
});
const stream = query({ prompt, options });
for await (const msg of stream) {
if (msg.type === 'assistant' && msg.message.content) {
for (const block of msg.message.content) {
if (block.type === 'text') {
responseText += block.text;
events.emit('suggestions:event', {
type: 'suggestions_progress',
content: block.text,
});
} else if (block.type === 'tool_use') {
events.emit('suggestions:event', {
type: 'suggestions_tool',
tool: block.name,
input: block.input,
});
}
}
} else if (msg.type === 'result' && msg.subtype === 'success') {
// Check for structured output
const resultMsg = msg as any;
if (resultMsg.structured_output) {
structuredOutput = resultMsg.structured_output as {
suggestions: Array<Record<string, unknown>>;
};
logger.debug('Received structured output:', structuredOutput);
}
} else if (msg.type === 'result') {
const resultMsg = msg as any;
if (resultMsg.subtype === 'error_max_structured_output_retries') {
logger.error('Failed to produce valid structured output after retries');
throw new Error('Could not produce valid suggestions output');
} else if (resultMsg.subtype === 'error_max_turns') {
logger.error('Hit max turns limit before completing suggestions generation');
logger.warn(`Response text length: ${responseText.length} chars`);
// Still try to parse what we have
}
}
}
}
// Use streamingQuery with event callbacks
const result = await streamingQuery({
prompt: finalPrompt,
model,
cwd: projectPath,
maxTurns: 250,
allowedTools: ['Read', 'Glob', 'Grep'],
abortController,
thinkingLevel,
readOnly: true, // Suggestions only reads code, doesn't write
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
outputFormat: useStructuredOutput
? {
type: 'json_schema',
schema: suggestionsSchema,
}
: undefined,
onText: (text) => {
responseText += text;
events.emit('suggestions:event', {
type: 'suggestions_progress',
content: text,
});
},
onToolUse: (tool, input) => {
events.emit('suggestions:event', {
type: 'suggestions_tool',
tool,
input,
});
},
});
// Use structured output if available, otherwise fall back to parsing text
try {
let structuredOutput: { suggestions: Array<Record<string, unknown>> } | null = null;
if (result.structured_output) {
structuredOutput = result.structured_output as {
suggestions: Array<Record<string, unknown>>;
};
logger.debug('Received structured output:', structuredOutput);
} else if (responseText) {
// Fallback: try to parse from text using shared extraction utility
logger.warn('No structured output received, attempting to parse from text');
structuredOutput = extractJsonWithArray<{ suggestions: Array<Record<string, unknown>> }>(
responseText,
'suggestions',
{ logger }
);
}
if (structuredOutput && structuredOutput.suggestions) {
// Use structured output directly
events.emit('suggestions:event', {
@@ -334,24 +278,7 @@ Your entire response should be valid JSON starting with { and ending with }. No
})),
});
} else {
// Fallback: try to parse from text using shared extraction utility
logger.warn('No structured output received, attempting to parse from text');
const parsed = extractJsonWithArray<{ suggestions: Array<Record<string, unknown>> }>(
responseText,
'suggestions',
{ logger }
);
if (parsed && parsed.suggestions) {
events.emit('suggestions:event', {
type: 'suggestions_complete',
suggestions: parsed.suggestions.map((s: Record<string, unknown>, i: number) => ({
...s,
id: s.id || `suggestion-${Date.now()}-${i}`,
})),
});
} else {
throw new Error('No valid JSON found in response');
}
throw new Error('No valid JSON found in response');
}
} catch (error) {
// Log the parsing error for debugging

View File

@@ -10,11 +10,14 @@
*/
import { ProviderFactory } from '../providers/provider-factory.js';
import { simpleQuery } from '../providers/simple-query-service.js';
import type {
ExecuteOptions,
Feature,
ModelProvider,
PipelineStep,
FeatureStatusWithPipeline,
PipelineConfig,
ThinkingLevel,
PlanningMode,
} from '@automaker/types';
@@ -83,6 +86,26 @@ interface PlanSpec {
tasks?: ParsedTask[];
}
/**
* Information about pipeline status when resuming a feature.
* Used to determine how to handle features stuck in pipeline execution.
*
* @property {boolean} isPipeline - Whether the feature is in a pipeline step
* @property {string | null} stepId - ID of the current pipeline step (e.g., 'step_123')
* @property {number} stepIndex - Index of the step in the sorted pipeline steps (-1 if not found)
* @property {number} totalSteps - Total number of steps in the pipeline
* @property {PipelineStep | null} step - The pipeline step configuration, or null if step not found
* @property {PipelineConfig | null} config - The full pipeline configuration, or null if no pipeline
*/
interface PipelineStatusInfo {
isPipeline: boolean;
stepId: string | null;
stepIndex: number;
totalSteps: number;
step: PipelineStep | null;
config: PipelineConfig | null;
}
/**
* Parse tasks from generated spec content
* Looks for the ```tasks code block and extracts task lines
@@ -917,6 +940,25 @@ Complete the pipeline step instructions above. Review the previous work and appl
throw new Error('already running');
}
// Load feature to check status
const feature = await this.loadFeature(projectPath, featureId);
if (!feature) {
throw new Error(`Feature ${featureId} not found`);
}
// Check if feature is stuck in a pipeline step
const pipelineInfo = await this.detectPipelineStatus(
projectPath,
featureId,
(feature.status || '') as FeatureStatusWithPipeline
);
if (pipelineInfo.isPipeline) {
// Feature stuck in pipeline - use pipeline resume
return this.resumePipelineFeature(projectPath, feature, useWorktrees, pipelineInfo);
}
// Normal resume flow for non-pipeline features
// Check if context exists in .automaker directory
const featureDir = getFeatureDir(projectPath, featureId);
const contextPath = path.join(featureDir, 'agent-output.md');
@@ -936,11 +978,252 @@ Complete the pipeline step instructions above. Review the previous work and appl
}
// No context, start fresh - executeFeature will handle adding to runningFeatures
// Remove the temporary entry we added
this.runningFeatures.delete(featureId);
return this.executeFeature(projectPath, featureId, useWorktrees, false);
}
/**
* Resume a feature that crashed during pipeline execution.
* Handles multiple edge cases to ensure robust recovery:
* - No context file: Restart entire pipeline from beginning
* - Step deleted from config: Complete feature without remaining pipeline steps
* - Valid step exists: Resume from the crashed step and continue
*
* @param {string} projectPath - Absolute path to the project directory
* @param {Feature} feature - The feature object (already loaded to avoid redundant reads)
* @param {boolean} useWorktrees - Whether to use git worktrees for isolation
* @param {PipelineStatusInfo} pipelineInfo - Information about the pipeline status from detectPipelineStatus()
* @returns {Promise<void>} Resolves when resume operation completes or throws on error
* @throws {Error} If pipeline config is null but stepIndex is valid (should never happen)
* @private
*/
private async resumePipelineFeature(
projectPath: string,
feature: Feature,
useWorktrees: boolean,
pipelineInfo: PipelineStatusInfo
): Promise<void> {
const featureId = feature.id;
console.log(
`[AutoMode] Resuming feature ${featureId} from pipeline step ${pipelineInfo.stepId}`
);
// Check for context file
const featureDir = getFeatureDir(projectPath, featureId);
const contextPath = path.join(featureDir, 'agent-output.md');
let hasContext = false;
try {
await secureFs.access(contextPath);
hasContext = true;
} catch {
// No context
}
// Edge Case 1: No context file - restart entire pipeline from beginning
if (!hasContext) {
console.warn(
`[AutoMode] No context found for pipeline feature ${featureId}, restarting from beginning`
);
// Reset status to in_progress and start fresh
await this.updateFeatureStatus(projectPath, featureId, 'in_progress');
return this.executeFeature(projectPath, featureId, useWorktrees, false);
}
// Edge Case 2: Step no longer exists in pipeline config
if (pipelineInfo.stepIndex === -1) {
console.warn(
`[AutoMode] Step ${pipelineInfo.stepId} no longer exists in pipeline, completing feature without pipeline`
);
const finalStatus = feature.skipTests ? 'waiting_approval' : 'verified';
await this.updateFeatureStatus(projectPath, featureId, finalStatus);
this.emitAutoModeEvent('auto_mode_feature_complete', {
featureId,
passes: true,
message:
'Pipeline step no longer exists - feature completed without remaining pipeline steps',
projectPath,
});
return;
}
// Normal case: Valid pipeline step exists, has context
// Resume from the stuck step (re-execute the step that crashed)
if (!pipelineInfo.config) {
throw new Error('Pipeline config is null but stepIndex is valid - this should not happen');
}
return this.resumeFromPipelineStep(
projectPath,
feature,
useWorktrees,
pipelineInfo.stepIndex,
pipelineInfo.config
);
}
/**
* Resume pipeline execution from a specific step index.
* Re-executes the step that crashed (to handle partial completion),
* then continues executing all remaining pipeline steps in order.
*
* This method handles the complete pipeline resume workflow:
* - Validates feature and step index
* - Locates or creates git worktree if needed
* - Executes remaining steps starting from the crashed step
* - Updates feature status to verified/waiting_approval when complete
* - Emits progress events throughout execution
*
* @param {string} projectPath - Absolute path to the project directory
* @param {Feature} feature - The feature object (already loaded to avoid redundant reads)
* @param {boolean} useWorktrees - Whether to use git worktrees for isolation
* @param {number} startFromStepIndex - Zero-based index of the step to resume from
* @param {PipelineConfig} pipelineConfig - Pipeline config passed from detectPipelineStatus to avoid re-reading
* @returns {Promise<void>} Resolves when pipeline execution completes successfully
* @throws {Error} If feature not found, step index invalid, or pipeline execution fails
* @private
*/
private async resumeFromPipelineStep(
projectPath: string,
feature: Feature,
useWorktrees: boolean,
startFromStepIndex: number,
pipelineConfig: PipelineConfig
): Promise<void> {
const featureId = feature.id;
const sortedSteps = [...pipelineConfig.steps].sort((a, b) => a.order - b.order);
// Validate step index
if (startFromStepIndex < 0 || startFromStepIndex >= sortedSteps.length) {
throw new Error(`Invalid step index: ${startFromStepIndex}`);
}
// Get steps to execute (from startFromStepIndex onwards)
const stepsToExecute = sortedSteps.slice(startFromStepIndex);
console.log(
`[AutoMode] Resuming pipeline for feature ${featureId} from step ${startFromStepIndex + 1}/${sortedSteps.length}`
);
// Add to running features immediately
const abortController = new AbortController();
this.runningFeatures.set(featureId, {
featureId,
projectPath,
worktreePath: null, // Will be set below
branchName: feature.branchName ?? null,
abortController,
isAutoMode: false,
startTime: Date.now(),
});
try {
// Validate project path
validateWorkingDirectory(projectPath);
// Derive workDir from feature.branchName
let worktreePath: string | null = null;
const branchName = feature.branchName;
if (useWorktrees && branchName) {
worktreePath = await this.findExistingWorktreeForBranch(projectPath, branchName);
if (worktreePath) {
console.log(`[AutoMode] Using worktree for branch "${branchName}": ${worktreePath}`);
} else {
console.warn(
`[AutoMode] Worktree for branch "${branchName}" not found, using project path`
);
}
}
const workDir = worktreePath ? path.resolve(worktreePath) : path.resolve(projectPath);
validateWorkingDirectory(workDir);
// Update running feature with worktree info
const runningFeature = this.runningFeatures.get(featureId);
if (runningFeature) {
runningFeature.worktreePath = worktreePath;
runningFeature.branchName = branchName ?? null;
}
// Emit resume event
this.emitAutoModeEvent('auto_mode_feature_start', {
featureId,
projectPath,
feature: {
id: featureId,
title: feature.title || 'Resuming Pipeline',
description: feature.description,
},
});
this.emitAutoModeEvent('auto_mode_progress', {
featureId,
content: `Resuming from pipeline step ${startFromStepIndex + 1}/${sortedSteps.length}`,
projectPath,
});
// Load autoLoadClaudeMd setting
const autoLoadClaudeMd = await getAutoLoadClaudeMdSetting(
projectPath,
this.settingsService,
'[AutoMode]'
);
// Execute remaining pipeline steps (starting from crashed step)
await this.executePipelineSteps(
projectPath,
featureId,
feature,
stepsToExecute,
workDir,
abortController,
autoLoadClaudeMd
);
// Determine final status
const finalStatus = feature.skipTests ? 'waiting_approval' : 'verified';
await this.updateFeatureStatus(projectPath, featureId, finalStatus);
console.log('[AutoMode] Pipeline resume completed successfully');
this.emitAutoModeEvent('auto_mode_feature_complete', {
featureId,
passes: true,
message: 'Pipeline resumed and completed successfully',
projectPath,
});
} catch (error) {
const errorInfo = classifyError(error);
if (errorInfo.isAbort) {
this.emitAutoModeEvent('auto_mode_feature_complete', {
featureId,
passes: false,
message: 'Pipeline resume stopped by user',
projectPath,
});
} else {
console.error(`[AutoMode] Pipeline resume failed for feature ${featureId}:`, error);
await this.updateFeatureStatus(projectPath, featureId, 'backlog');
this.emitAutoModeEvent('auto_mode_error', {
featureId,
error: errorInfo.message,
errorType: errorInfo.type,
projectPath,
});
}
} finally {
this.runningFeatures.delete(featureId);
}
}
/**
* Follow up on a feature with additional instructions
*/
@@ -2885,6 +3168,111 @@ Review the previous work and continue the implementation. If the feature appears
});
}
/**
* Detect if a feature is stuck in a pipeline step and extract step information.
* Parses the feature status to determine if it's a pipeline status (e.g., 'pipeline_step_xyz'),
* loads the pipeline configuration, and validates that the step still exists.
*
* This method handles several scenarios:
* - Non-pipeline status: Returns default PipelineStatusInfo with isPipeline=false
* - Invalid pipeline status format: Returns isPipeline=true but null step info
* - Step deleted from config: Returns stepIndex=-1 to signal missing step
* - Valid pipeline step: Returns full step information and config
*
* @param {string} projectPath - Absolute path to the project directory
* @param {string} featureId - Unique identifier of the feature
* @param {FeatureStatusWithPipeline} currentStatus - Current feature status (may include pipeline step info)
* @returns {Promise<PipelineStatusInfo>} Information about the pipeline status and step
* @private
*/
private async detectPipelineStatus(
projectPath: string,
featureId: string,
currentStatus: FeatureStatusWithPipeline
): Promise<PipelineStatusInfo> {
// Check if status is pipeline format using PipelineService
const isPipeline = pipelineService.isPipelineStatus(currentStatus);
if (!isPipeline) {
return {
isPipeline: false,
stepId: null,
stepIndex: -1,
totalSteps: 0,
step: null,
config: null,
};
}
// Extract step ID using PipelineService
const stepId = pipelineService.getStepIdFromStatus(currentStatus);
if (!stepId) {
console.warn(
`[AutoMode] Feature ${featureId} has invalid pipeline status format: ${currentStatus}`
);
return {
isPipeline: true,
stepId: null,
stepIndex: -1,
totalSteps: 0,
step: null,
config: null,
};
}
// Load pipeline config
const config = await pipelineService.getPipelineConfig(projectPath);
if (!config || config.steps.length === 0) {
// Pipeline config doesn't exist or empty - feature stuck with invalid pipeline status
console.warn(
`[AutoMode] Feature ${featureId} has pipeline status but no pipeline config exists`
);
return {
isPipeline: true,
stepId,
stepIndex: -1,
totalSteps: 0,
step: null,
config: null,
};
}
// Find the step directly from config (already loaded, avoid redundant file read)
const sortedSteps = [...config.steps].sort((a, b) => a.order - b.order);
const stepIndex = sortedSteps.findIndex((s) => s.id === stepId);
const step = stepIndex === -1 ? null : sortedSteps[stepIndex];
if (!step) {
// Step not found in current config - step was deleted/changed
console.warn(
`[AutoMode] Feature ${featureId} stuck in step ${stepId} which no longer exists in pipeline config`
);
return {
isPipeline: true,
stepId,
stepIndex: -1,
totalSteps: sortedSteps.length,
step: null,
config,
};
}
console.log(
`[AutoMode] Detected pipeline status for feature ${featureId}: step ${stepIndex + 1}/${sortedSteps.length} (${step.name})`
);
return {
isPipeline: true,
stepId,
stepIndex,
totalSteps: sortedSteps.length,
step,
config,
};
}
/**
* Build a focused prompt for executing a single task.
* Each task gets minimal context to keep the agent focused.
@@ -3193,40 +3581,23 @@ IMPORTANT: Only include NON-OBVIOUS learnings with real reasoning. Skip trivial
If nothing notable: {"learnings": []}`;
try {
// Import query dynamically to avoid circular dependencies
const { query } = await import('@anthropic-ai/claude-agent-sdk');
// Get model from phase settings
const settings = await this.settingsService?.getGlobalSettings();
const phaseModelEntry =
settings?.phaseModels?.memoryExtractionModel || DEFAULT_PHASE_MODELS.memoryExtractionModel;
const { model } = resolvePhaseModel(phaseModelEntry);
const stream = query({
const result = await simpleQuery({
prompt: userPrompt,
options: {
model,
maxTurns: 1,
allowedTools: [],
permissionMode: 'acceptEdits',
systemPrompt:
'You are a JSON extraction assistant. You MUST respond with ONLY valid JSON, no explanations, no markdown, no other text. Extract learnings from the provided implementation context and return them as JSON.',
},
model,
cwd: projectPath,
maxTurns: 1,
allowedTools: [],
systemPrompt:
'You are a JSON extraction assistant. You MUST respond with ONLY valid JSON, no explanations, no markdown, no other text. Extract learnings from the provided implementation context and return them as JSON.',
});
// Extract text from stream
let responseText = '';
for await (const msg of stream) {
if (msg.type === 'assistant' && msg.message?.content) {
for (const block of msg.message.content) {
if (block.type === 'text' && block.text) {
responseText += block.text;
}
}
} else if (msg.type === 'result' && msg.subtype === 'success') {
responseText = msg.result || responseText;
}
}
const responseText = result.text;
console.log(`[AutoMode] Learning extraction response: ${responseText.length} chars`);
console.log(`[AutoMode] Response preview: ${responseText.substring(0, 300)}`);

View File

@@ -117,73 +117,90 @@ export function CardActions({
)}
</>
)}
{!isCurrentAutoTask && feature.status === 'in_progress' && (
<>
{/* Approve Plan button - shows when plan is generated and waiting for approval */}
{feature.planSpec?.status === 'generated' && onApprovePlan && (
<Button
variant="default"
size="sm"
className="flex-1 h-7 text-[11px] bg-purple-600 hover:bg-purple-700 text-white animate-pulse"
onClick={(e) => {
e.stopPropagation();
onApprovePlan();
}}
onPointerDown={(e) => e.stopPropagation()}
data-testid={`approve-plan-${feature.id}`}
>
<FileText className="w-3 h-3 mr-1" />
Approve Plan
</Button>
)}
{feature.skipTests && onManualVerify ? (
<Button
variant="default"
size="sm"
className="flex-1 h-7 text-[11px]"
onClick={(e) => {
e.stopPropagation();
onManualVerify();
}}
onPointerDown={(e) => e.stopPropagation()}
data-testid={`manual-verify-${feature.id}`}
>
<CheckCircle2 className="w-3 h-3 mr-1" />
Verify
</Button>
) : onResume ? (
<Button
variant="default"
size="sm"
className="flex-1 h-7 text-[11px] bg-[var(--status-success)] hover:bg-[var(--status-success)]/90"
onClick={(e) => {
e.stopPropagation();
onResume();
}}
onPointerDown={(e) => e.stopPropagation()}
data-testid={`resume-feature-${feature.id}`}
>
<RotateCcw className="w-3 h-3 mr-1" />
Resume
</Button>
) : null}
{onViewOutput && !feature.skipTests && (
<Button
variant="secondary"
size="sm"
className="h-7 text-[11px] px-2"
onClick={(e) => {
e.stopPropagation();
onViewOutput();
}}
onPointerDown={(e) => e.stopPropagation()}
data-testid={`view-output-inprogress-${feature.id}`}
>
<FileText className="w-3 h-3" />
</Button>
)}
</>
)}
{!isCurrentAutoTask &&
(feature.status === 'in_progress' ||
(typeof feature.status === 'string' && feature.status.startsWith('pipeline_'))) && (
<>
{/* Approve Plan button - shows when plan is generated and waiting for approval */}
{feature.planSpec?.status === 'generated' && onApprovePlan && (
<Button
variant="default"
size="sm"
className="flex-1 h-7 text-[11px] bg-purple-600 hover:bg-purple-700 text-white animate-pulse"
onClick={(e) => {
e.stopPropagation();
onApprovePlan();
}}
onPointerDown={(e) => e.stopPropagation()}
data-testid={`approve-plan-${feature.id}`}
>
<FileText className="w-3 h-3 mr-1" />
Approve Plan
</Button>
)}
{feature.skipTests && onManualVerify ? (
<Button
variant="default"
size="sm"
className="flex-1 h-7 text-[11px]"
onClick={(e) => {
e.stopPropagation();
onManualVerify();
}}
onPointerDown={(e) => e.stopPropagation()}
data-testid={`manual-verify-${feature.id}`}
>
<CheckCircle2 className="w-3 h-3 mr-1" />
Verify
</Button>
) : onResume ? (
<Button
variant="default"
size="sm"
className="flex-1 h-7 text-[11px] bg-[var(--status-success)] hover:bg-[var(--status-success)]/90"
onClick={(e) => {
e.stopPropagation();
onResume();
}}
onPointerDown={(e) => e.stopPropagation()}
data-testid={`resume-feature-${feature.id}`}
>
<RotateCcw className="w-3 h-3 mr-1" />
Resume
</Button>
) : onVerify ? (
<Button
variant="default"
size="sm"
className="flex-1 h-7 text-[11px] bg-[var(--status-success)] hover:bg-[var(--status-success)]/90"
onClick={(e) => {
e.stopPropagation();
onVerify();
}}
onPointerDown={(e) => e.stopPropagation()}
data-testid={`verify-feature-${feature.id}`}
>
<CheckCircle2 className="w-3 h-3 mr-1" />
Verify
</Button>
) : null}
{onViewOutput && !feature.skipTests && (
<Button
variant="secondary"
size="sm"
className="h-7 text-[11px] px-2"
onClick={(e) => {
e.stopPropagation();
onViewOutput();
}}
onPointerDown={(e) => e.stopPropagation()}
data-testid={`view-output-inprogress-${feature.id}`}
>
<FileText className="w-3 h-3" />
</Button>
)}
</>
)}
{!isCurrentAutoTask && feature.status === 'verified' && (
<>
{/* Logs button */}

View File

@@ -21,7 +21,8 @@ import {
FeatureTextFilePath as DescriptionTextFilePath,
ImagePreviewMap,
} from '@/components/ui/description-image-dropzone';
import { Play, Cpu, FolderKanban } from 'lucide-react';
import { Play, Cpu, FolderKanban, Settings2 } from 'lucide-react';
import { useNavigate } from '@tanstack/react-router';
import { toast } from 'sonner';
import { cn } from '@/lib/utils';
import { modelSupportsThinking } from '@/lib/utils';
@@ -33,7 +34,7 @@ import {
PlanningMode,
Feature,
} from '@/store/app-store';
import type { ReasoningEffort, PhaseModelEntry } from '@automaker/types';
import type { ReasoningEffort, PhaseModelEntry, AgentModel } from '@automaker/types';
import { supportsReasoningEffort, isClaudeModel } from '@automaker/types';
import {
TestingTabContent,
@@ -152,6 +153,7 @@ export function AddFeatureDialog({
forceCurrentBranchMode,
}: AddFeatureDialogProps) {
const isSpawnMode = !!parentFeature;
const navigate = useNavigate();
const [workMode, setWorkMode] = useState<WorkMode>('current');
// Form state
@@ -187,7 +189,8 @@ export function AddFeatureDialog({
const [selectedAncestorIds, setSelectedAncestorIds] = useState<Set<string>>(new Set());
// Get defaults from store
const { defaultPlanningMode, defaultRequirePlanApproval, useWorktrees } = useAppStore();
const { defaultPlanningMode, defaultRequirePlanApproval, useWorktrees, defaultFeatureModel } =
useAppStore();
// Track previous open state to detect when dialog opens
const wasOpenRef = useRef(false);
@@ -207,7 +210,7 @@ export function AddFeatureDialog({
);
setPlanningMode(defaultPlanningMode);
setRequirePlanApproval(defaultRequirePlanApproval);
setModelEntry({ model: 'opus' });
setModelEntry(defaultFeatureModel);
// Initialize description history (empty for new feature)
setDescriptionHistory([]);
@@ -228,6 +231,7 @@ export function AddFeatureDialog({
defaultBranch,
defaultPlanningMode,
defaultRequirePlanApproval,
defaultFeatureModel,
useWorktrees,
selectedNonMainWorktreeBranch,
forceCurrentBranchMode,
@@ -318,7 +322,7 @@ export function AddFeatureDialog({
// When a non-main worktree is selected, use its branch name for custom mode
setBranchName(selectedNonMainWorktreeBranch || '');
setPriority(2);
setModelEntry({ model: 'opus' });
setModelEntry(defaultFeatureModel);
setWorkMode(
getDefaultWorkMode(useWorktrees, selectedNonMainWorktreeBranch, forceCurrentBranchMode)
);
@@ -473,9 +477,31 @@ export function AddFeatureDialog({
{/* AI & Execution Section */}
<div className={cardClass}>
<div className={sectionHeaderClass}>
<Cpu className="w-4 h-4 text-muted-foreground" />
<span>AI & Execution</span>
<div className="flex items-center justify-between">
<div className={sectionHeaderClass}>
<Cpu className="w-4 h-4 text-muted-foreground" />
<span>AI & Execution</span>
</div>
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>
<button
type="button"
onClick={() => {
onOpenChange(false);
navigate({ to: '/settings', search: { view: 'defaults' } });
}}
className="flex items-center gap-1 text-xs text-muted-foreground hover:text-foreground transition-colors"
>
<Settings2 className="w-3.5 h-3.5" />
<span>Edit Defaults</span>
</button>
</TooltipTrigger>
<TooltipContent>
<p>Change default model and planning settings for new features</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
</div>
<div className="space-y-1.5">

View File

@@ -21,7 +21,8 @@ import {
FeatureTextFilePath as DescriptionTextFilePath,
ImagePreviewMap,
} from '@/components/ui/description-image-dropzone';
import { GitBranch, Cpu, FolderKanban } from 'lucide-react';
import { GitBranch, Cpu, FolderKanban, Settings2 } from 'lucide-react';
import { useNavigate } from '@tanstack/react-router';
import { toast } from 'sonner';
import { cn, modelSupportsThinking } from '@/lib/utils';
import { Feature, ModelAlias, ThinkingLevel, useAppStore, PlanningMode } from '@/store/app-store';
@@ -86,6 +87,7 @@ export function EditFeatureDialog({
isMaximized,
allFeatures,
}: EditFeatureDialogProps) {
const navigate = useNavigate();
const [editingFeature, setEditingFeature] = useState<Feature | null>(feature);
// Derive initial workMode from feature's branchName
const [workMode, setWorkMode] = useState<WorkMode>(() => {
@@ -363,9 +365,31 @@ export function EditFeatureDialog({
{/* AI & Execution Section */}
<div className={cardClass}>
<div className={sectionHeaderClass}>
<Cpu className="w-4 h-4 text-muted-foreground" />
<span>AI & Execution</span>
<div className="flex items-center justify-between">
<div className={sectionHeaderClass}>
<Cpu className="w-4 h-4 text-muted-foreground" />
<span>AI & Execution</span>
</div>
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>
<button
type="button"
onClick={() => {
onClose();
navigate({ to: '/settings', search: { view: 'defaults' } });
}}
className="flex items-center gap-1 text-xs text-muted-foreground hover:text-foreground transition-colors"
>
<Settings2 className="w-3.5 h-3.5" />
<span>Edit Defaults</span>
</button>
</TooltipTrigger>
<TooltipContent>
<p>Change default model and planning settings for new features</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
</div>
<div className="space-y-1.5">

View File

@@ -102,7 +102,10 @@ export function useBoardEffects({
const checkAllContexts = async () => {
const featuresWithPotentialContext = features.filter(
(f) =>
f.status === 'in_progress' || f.status === 'waiting_approval' || f.status === 'verified'
f.status === 'in_progress' ||
f.status === 'waiting_approval' ||
f.status === 'verified' ||
(typeof f.status === 'string' && f.status.startsWith('pipeline_'))
);
const contextChecks = await Promise.all(
featuresWithPotentialContext.map(async (f) => ({

View File

@@ -4,7 +4,6 @@ import { Badge } from '@/components/ui/badge';
import { Brain, AlertTriangle } from 'lucide-react';
import { AnthropicIcon, CursorIcon, OpenAIIcon } from '@/components/ui/provider-icon';
import { cn } from '@/lib/utils';
import type { ModelAlias } from '@/store/app-store';
import { useAppStore } from '@/store/app-store';
import { useSetupStore } from '@/store/setup-store';
import { getModelProvider, PROVIDER_PREFIXES, stripProviderPrefix } from '@automaker/types';
@@ -19,6 +18,10 @@ interface ModelSelectorProps {
testIdPrefix?: string;
}
const CODEX_EMPTY_AVAILABLE_MESSAGE = 'No Codex models available';
const CODEX_EMPTY_ENABLED_MESSAGE =
'No Codex models enabled. Enable models in Settings → AI Providers.';
export function ModelSelector({
selectedModel,
onModelSelect,
@@ -27,6 +30,8 @@ export function ModelSelector({
const {
enabledCursorModels,
cursorDefaultModel,
enabledCodexModels,
codexDefaultModel,
codexModels,
codexModelsLoading,
codexModelsError,
@@ -49,8 +54,10 @@ export function ModelSelector({
}
}, [isCodexAvailable, codexModels.length, codexModelsLoading, fetchCodexModels]);
const enabledCodexModelIds = new Set(enabledCodexModels);
// Transform codex models from store to ModelOption format
const dynamicCodexModels: ModelOption[] = codexModels.map((model) => {
const codexModelOptions: ModelOption[] = codexModels.map((model) => {
// Infer badge based on tier
let badge: string | undefined;
if (model.tier === 'premium') badge = 'Premium';
@@ -67,6 +74,10 @@ export function ModelSelector({
};
});
const enabledCodexModelOptions = codexModelOptions.filter((model) =>
enabledCodexModelIds.has(model.id)
);
// Filter Cursor models based on enabled models from global settings
const filteredCursorModels = CURSOR_MODELS.filter((model) => {
// Extract the cursor model ID from the prefixed ID (e.g., "cursor-auto" -> "auto")
@@ -74,21 +85,36 @@ export function ModelSelector({
return enabledCursorModels.includes(cursorModelId as any);
});
const hasEnabledCodexModels = enabledCodexModelOptions.length > 0;
const codexDefaultSelection =
codexModelOptions.find((model) => model.id === codexDefaultModel)?.id ||
enabledCodexModelOptions[0]?.id ||
codexModelOptions[0]?.id;
const handleProviderChange = (provider: ModelProvider) => {
if (provider === 'cursor' && selectedProvider !== 'cursor') {
// Switch to Cursor's default model (from global settings)
onModelSelect(`${PROVIDER_PREFIXES.cursor}${cursorDefaultModel}`);
} else if (provider === 'codex' && selectedProvider !== 'codex') {
// Switch to Codex's default model (use isDefault flag from dynamic models)
const defaultModel = codexModels.find((m) => m.isDefault);
const defaultModelId = defaultModel?.id || codexModels[0]?.id || 'codex-gpt-5.2-codex';
onModelSelect(defaultModelId);
// Switch to Codex's default model (from global settings)
if (codexDefaultSelection) {
onModelSelect(codexDefaultSelection);
}
} else if (provider === 'claude' && selectedProvider !== 'claude') {
// Switch to Claude's default model
onModelSelect('sonnet');
}
};
const showCodexAvailableEmpty =
!codexModelsLoading && !codexModelsError && codexModelOptions.length === 0;
const showCodexEnabledEmpty =
!codexModelsLoading &&
!codexModelsError &&
codexModelOptions.length > 0 &&
!hasEnabledCodexModels;
const showCodexList = !codexModelsLoading && !codexModelsError && hasEnabledCodexModels;
return (
<div className="space-y-4">
{/* Provider Selection */}
@@ -272,7 +298,7 @@ export function ModelSelector({
</div>
{/* Loading state */}
{codexModelsLoading && dynamicCodexModels.length === 0 && (
{codexModelsLoading && codexModelOptions.length === 0 && (
<div className="flex items-center justify-center gap-2 p-6 text-sm text-muted-foreground">
<RefreshCw className="w-4 h-4 animate-spin" />
Loading models...
@@ -297,15 +323,21 @@ export function ModelSelector({
)}
{/* Model list */}
{!codexModelsLoading && !codexModelsError && dynamicCodexModels.length === 0 && (
{showCodexAvailableEmpty && (
<div className="text-sm text-muted-foreground p-3 border border-dashed rounded-md text-center">
No Codex models available
{CODEX_EMPTY_AVAILABLE_MESSAGE}
</div>
)}
{!codexModelsLoading && dynamicCodexModels.length > 0 && (
{showCodexEnabledEmpty && (
<div className="text-sm text-muted-foreground p-3 border border-dashed rounded-md text-center">
{CODEX_EMPTY_ENABLED_MESSAGE}
</div>
)}
{showCodexList && (
<div className="flex flex-col gap-2">
{dynamicCodexModels.map((option) => {
{enabledCodexModelOptions.map((option) => {
const isSelected = selectedModel === option.id;
return (
<button

View File

@@ -1,6 +1,6 @@
import { useState } from 'react';
import { useSearch } from '@tanstack/react-router';
import { useAppStore } from '@/store/app-store';
import { useSetupStore } from '@/store/setup-store';
import { useSettingsView, type SettingsViewId } from './settings-view/hooks';
import { NAV_ITEMS } from './settings-view/config/navigation';
@@ -51,6 +51,8 @@ export function SettingsView() {
setDefaultPlanningMode,
defaultRequirePlanApproval,
setDefaultRequirePlanApproval,
defaultFeatureModel,
setDefaultFeatureModel,
autoLoadClaudeMd,
setAutoLoadClaudeMd,
promptCustomization,
@@ -86,8 +88,11 @@ export function SettingsView() {
}
};
// Get initial view from URL search params
const { view: initialView } = useSearch({ from: '/settings' });
// Use settings view navigation hook
const { activeView, navigateTo } = useSettingsView();
const { activeView, navigateTo } = useSettingsView({ initialView });
// Handle navigation - if navigating to 'providers', default to 'claude-provider'
const handleNavigate = (viewId: SettingsViewId) => {
@@ -152,11 +157,13 @@ export function SettingsView() {
skipVerificationInAutoMode={skipVerificationInAutoMode}
defaultPlanningMode={defaultPlanningMode}
defaultRequirePlanApproval={defaultRequirePlanApproval}
defaultFeatureModel={defaultFeatureModel}
onDefaultSkipTestsChange={setDefaultSkipTests}
onEnableDependencyBlockingChange={setEnableDependencyBlocking}
onSkipVerificationInAutoModeChange={setSkipVerificationInAutoMode}
onDefaultPlanningModeChange={setDefaultPlanningMode}
onDefaultRequirePlanApprovalChange={setDefaultRequirePlanApproval}
onDefaultFeatureModelChange={setDefaultFeatureModel}
/>
);
case 'worktrees':

View File

@@ -37,8 +37,8 @@ export const GLOBAL_NAV_GROUPS: NavigationGroup[] = [
{
label: 'Model & Prompts',
items: [
{ id: 'model-defaults', label: 'Model Defaults', icon: Workflow },
{ id: 'defaults', label: 'Feature Defaults', icon: FlaskConical },
{ id: 'model-defaults', label: 'Model Defaults', icon: Workflow },
{ id: 'worktrees', label: 'Worktrees', icon: GitBranch },
{ id: 'prompts', label: 'Prompt Customization', icon: MessageSquareText },
{ id: 'api-keys', label: 'API Keys', icon: Key },

View File

@@ -10,6 +10,7 @@ import {
ScrollText,
ShieldCheck,
FastForward,
Cpu,
} from 'lucide-react';
import { cn } from '@/lib/utils';
import {
@@ -19,6 +20,8 @@ import {
SelectTrigger,
SelectValue,
} from '@/components/ui/select';
import type { PhaseModelEntry } from '@automaker/types';
import { PhaseModelSelector } from '../model-defaults/phase-model-selector';
type PlanningMode = 'skip' | 'lite' | 'spec' | 'full';
@@ -28,11 +31,13 @@ interface FeatureDefaultsSectionProps {
skipVerificationInAutoMode: boolean;
defaultPlanningMode: PlanningMode;
defaultRequirePlanApproval: boolean;
defaultFeatureModel: PhaseModelEntry;
onDefaultSkipTestsChange: (value: boolean) => void;
onEnableDependencyBlockingChange: (value: boolean) => void;
onSkipVerificationInAutoModeChange: (value: boolean) => void;
onDefaultPlanningModeChange: (value: PlanningMode) => void;
onDefaultRequirePlanApprovalChange: (value: boolean) => void;
onDefaultFeatureModelChange: (value: PhaseModelEntry) => void;
}
export function FeatureDefaultsSection({
@@ -41,11 +46,13 @@ export function FeatureDefaultsSection({
skipVerificationInAutoMode,
defaultPlanningMode,
defaultRequirePlanApproval,
defaultFeatureModel,
onDefaultSkipTestsChange,
onEnableDependencyBlockingChange,
onSkipVerificationInAutoModeChange,
onDefaultPlanningModeChange,
onDefaultRequirePlanApprovalChange,
onDefaultFeatureModelChange,
}: FeatureDefaultsSectionProps) {
return (
<div
@@ -68,6 +75,30 @@ export function FeatureDefaultsSection({
</p>
</div>
<div className="p-6 space-y-5">
{/* Default Feature Model Setting */}
<div className="group flex items-start space-x-3 p-3 rounded-xl hover:bg-accent/30 transition-colors duration-200 -mx-3">
<div className="w-10 h-10 mt-0.5 rounded-xl flex items-center justify-center shrink-0 bg-brand-500/10">
<Cpu className="w-5 h-5 text-brand-500" />
</div>
<div className="flex-1 space-y-2">
<div className="flex items-center justify-between">
<Label className="text-foreground font-medium">Default Model</Label>
<PhaseModelSelector
value={defaultFeatureModel}
onChange={onDefaultFeatureModelChange}
compact
align="end"
/>
</div>
<p className="text-xs text-muted-foreground/80 leading-relaxed">
The default AI model and thinking level used when creating new feature cards.
</p>
</div>
</div>
{/* Separator */}
<div className="border-t border-border/30" />
{/* Planning Mode Default */}
<div className="group flex items-start space-x-3 p-3 rounded-xl hover:bg-accent/30 transition-colors duration-200 -mx-3">
<div
@@ -165,12 +196,11 @@ export function FeatureDefaultsSection({
</p>
</div>
</div>
<div className="border-t border-border/30" />
</>
)}
{/* Separator */}
{defaultPlanningMode === 'skip' && <div className="border-t border-border/30" />}
<div className="border-t border-border/30" />
{/* Automated Testing Setting */}
<div className="group flex items-start space-x-3 p-3 rounded-xl hover:bg-accent/30 transition-colors duration-200 -mx-3">

View File

@@ -159,6 +159,9 @@ export function PhaseModelSelector({
const expandedCodexTriggerRef = useRef<HTMLDivElement>(null);
const {
enabledCursorModels,
enabledCodexModels,
enabledOpencodeModels,
enabledDynamicModelIds,
favoriteModels,
toggleFavoriteModel,
codexModels,
@@ -261,6 +264,14 @@ export function PhaseModelSelector({
}));
}, [codexModels]);
const availableCodexModels = useMemo(
() =>
transformedCodexModels.filter((model) =>
enabledCodexModels.includes(model.id as CodexModelId)
),
[transformedCodexModels, enabledCodexModels]
);
// Filter Cursor models to only show enabled ones
const availableCursorModels = CURSOR_MODELS.filter((model) => {
const cursorId = stripProviderPrefix(model.id) as CursorModelId;
@@ -366,16 +377,20 @@ export function PhaseModelSelector({
// Combine static and dynamic OpenCode models
const allOpencodeModels: ModelOption[] = useMemo(() => {
// Start with static models
const staticModels = [...OPENCODE_MODELS];
const staticModels = OPENCODE_MODELS.filter((model) =>
enabledOpencodeModels.includes(model.id)
);
// Add dynamic models (convert ModelDefinition to ModelOption)
const dynamicModelOptions: ModelOption[] = dynamicOpencodeModels.map((model) => ({
id: model.id,
label: model.name,
description: model.description,
badge: model.tier === 'premium' ? 'Premium' : model.tier === 'basic' ? 'Free' : undefined,
provider: 'opencode' as const,
}));
const dynamicModelOptions: ModelOption[] = dynamicOpencodeModels
.filter((model) => enabledDynamicModelIds.includes(model.id))
.map((model) => ({
id: model.id,
label: model.name,
description: model.description,
badge: model.tier === 'premium' ? 'Premium' : model.tier === 'basic' ? 'Free' : undefined,
provider: 'opencode' as const,
}));
// Merge, avoiding duplicates (static models take precedence for same ID)
// In practice, static and dynamic IDs don't overlap
@@ -383,14 +398,14 @@ export function PhaseModelSelector({
const uniqueDynamic = dynamicModelOptions.filter((m) => !staticIds.has(m.id));
return [...staticModels, ...uniqueDynamic];
}, [dynamicOpencodeModels]);
}, [dynamicOpencodeModels, enabledOpencodeModels, enabledDynamicModelIds]);
// Group models
const { favorites, claude, cursor, codex, opencode } = useMemo(() => {
const favs: typeof CLAUDE_MODELS = [];
const cModels: typeof CLAUDE_MODELS = [];
const curModels: typeof CURSOR_MODELS = [];
const codModels: typeof transformedCodexModels = [];
const codModels: typeof availableCodexModels = [];
const ocModels: ModelOption[] = [];
// Process Claude Models
@@ -412,7 +427,7 @@ export function PhaseModelSelector({
});
// Process Codex Models
transformedCodexModels.forEach((model) => {
availableCodexModels.forEach((model) => {
if (favoriteModels.includes(model.id)) {
favs.push(model);
} else {
@@ -436,7 +451,7 @@ export function PhaseModelSelector({
codex: codModels,
opencode: ocModels,
};
}, [favoriteModels, availableCursorModels, transformedCodexModels, allOpencodeModels]);
}, [favoriteModels, availableCursorModels, availableCodexModels, allOpencodeModels]);
// Group OpenCode models by model type for better organization
const opencodeSections = useMemo(() => {
@@ -453,8 +468,11 @@ export function PhaseModelSelector({
free: {},
dynamic: {},
};
const enabledDynamicProviders = dynamicOpencodeModels.filter((model) =>
enabledDynamicModelIds.includes(model.id)
);
const dynamicProviderById = new Map(
dynamicOpencodeModels.map((model) => [model.id, model.provider])
enabledDynamicProviders.map((model) => [model.id, model.provider])
);
const resolveProviderKey = (modelId: string): string => {
@@ -524,10 +542,10 @@ export function PhaseModelSelector({
}).filter(Boolean) as OpencodeSection[];
return builtSections;
}, [opencode, dynamicOpencodeModels]);
}, [opencode, dynamicOpencodeModels, enabledDynamicModelIds]);
// Render Codex model item with secondary popover for reasoning effort (only for models that support it)
const renderCodexModelItem = (model: (typeof transformedCodexModels)[0]) => {
const renderCodexModelItem = (model: (typeof availableCodexModels)[0]) => {
const isSelected = selectedModel === model.id;
const isFavorite = favoriteModels.includes(model.id);
const hasReasoning = codexModelHasThinking(model.id as CodexModelId);
@@ -708,7 +726,7 @@ export function PhaseModelSelector({
};
// Render OpenCode model item (simple selector, no thinking/reasoning options)
const renderOpencodeModelItem = (model: (typeof OPENCODE_MODELS)[0]) => {
const renderOpencodeModelItem = (model: ModelOption) => {
const isSelected = selectedModel === model.id;
const isFavorite = favoriteModels.includes(model.id);
@@ -1154,7 +1172,7 @@ export function PhaseModelSelector({
}
// Codex model
if (model.provider === 'codex') {
return renderCodexModelItem(model as (typeof transformedCodexModels)[0]);
return renderCodexModelItem(model as (typeof availableCodexModels)[0]);
}
// OpenCode model
if (model.provider === 'opencode') {

View File

@@ -562,6 +562,7 @@ export function hydrateStoreFromSettings(settings: GlobalSettings): void {
useWorktrees: settings.useWorktrees ?? true,
defaultPlanningMode: settings.defaultPlanningMode ?? 'skip',
defaultRequirePlanApproval: settings.defaultRequirePlanApproval ?? false,
defaultFeatureModel: settings.defaultFeatureModel ?? { model: 'opus' },
muteDoneSound: settings.muteDoneSound ?? false,
enhancementModel: settings.enhancementModel ?? 'sonnet',
validationModel: settings.validationModel ?? 'opus',

View File

@@ -42,6 +42,7 @@ const SETTINGS_FIELDS_TO_SYNC = [
'useWorktrees',
'defaultPlanningMode',
'defaultRequirePlanApproval',
'defaultFeatureModel',
'muteDoneSound',
'enhancementModel',
'validationModel',
@@ -466,6 +467,7 @@ export async function refreshSettingsFromServer(): Promise<boolean> {
useWorktrees: serverSettings.useWorktrees,
defaultPlanningMode: serverSettings.defaultPlanningMode,
defaultRequirePlanApproval: serverSettings.defaultRequirePlanApproval,
defaultFeatureModel: serverSettings.defaultFeatureModel ?? { model: 'opus' },
muteDoneSound: serverSettings.muteDoneSound,
enhancementModel: serverSettings.enhancementModel,
validationModel: serverSettings.validationModel,

View File

@@ -1,6 +1,16 @@
import { createFileRoute } from '@tanstack/react-router';
import { SettingsView } from '@/components/views/settings-view';
import type { SettingsViewId } from '@/components/views/settings-view/hooks';
interface SettingsSearchParams {
view?: SettingsViewId;
}
export const Route = createFileRoute('/settings')({
component: SettingsView,
validateSearch: (search: Record<string, unknown>): SettingsSearchParams => {
return {
view: search.view as SettingsViewId | undefined,
};
},
});

View File

@@ -657,6 +657,7 @@ export interface AppState {
defaultPlanningMode: PlanningMode;
defaultRequirePlanApproval: boolean;
defaultFeatureModel: PhaseModelEntry;
// Plan Approval State
// When a plan requires user approval, this holds the pending approval details
@@ -689,6 +690,7 @@ export interface AppState {
codexModelsLoading: boolean;
codexModelsError: string | null;
codexModelsLastFetched: number | null;
codexModelsLastFailedAt: number | null;
// Pipeline Configuration (per-project, keyed by project path)
pipelineConfigByProject: Record<string, PipelineConfig>;
@@ -1103,6 +1105,7 @@ export interface AppActions {
setDefaultPlanningMode: (mode: PlanningMode) => void;
setDefaultRequirePlanApproval: (require: boolean) => void;
setDefaultFeatureModel: (entry: PhaseModelEntry) => void;
// Plan Approval actions
setPendingPlanApproval: (
@@ -1276,6 +1279,7 @@ const initialState: AppState = {
specCreatingForProject: null,
defaultPlanningMode: 'skip' as PlanningMode,
defaultRequirePlanApproval: false,
defaultFeatureModel: { model: 'opus' } as PhaseModelEntry,
pendingPlanApproval: null,
claudeRefreshInterval: 60,
claudeUsage: null,
@@ -1286,6 +1290,7 @@ const initialState: AppState = {
codexModelsLoading: false,
codexModelsError: null,
codexModelsLastFetched: null,
codexModelsLastFailedAt: null,
pipelineConfigByProject: {},
worktreePanelVisibleByProject: {},
showInitScriptIndicatorByProject: {},
@@ -3091,6 +3096,7 @@ export const useAppStore = create<AppState & AppActions>()((set, get) => ({
setDefaultPlanningMode: (mode) => set({ defaultPlanningMode: mode }),
setDefaultRequirePlanApproval: (require) => set({ defaultRequirePlanApproval: require }),
setDefaultFeatureModel: (entry) => set({ defaultFeatureModel: entry }),
// Plan Approval actions
setPendingPlanApproval: (approval) => set({ pendingPlanApproval: approval }),
@@ -3113,13 +3119,29 @@ export const useAppStore = create<AppState & AppActions>()((set, get) => ({
// Codex Models actions
fetchCodexModels: async (forceRefresh = false) => {
const { codexModelsLastFetched, codexModelsLoading } = get();
const FAILURE_COOLDOWN_MS = 30 * 1000; // 30 seconds
const SUCCESS_CACHE_MS = 5 * 60 * 1000; // 5 minutes
const { codexModelsLastFetched, codexModelsLoading, codexModelsLastFailedAt } = get();
// Skip if already loading
if (codexModelsLoading) return;
// Skip if recently fetched (< 5 minutes ago) and not forcing refresh
if (!forceRefresh && codexModelsLastFetched && Date.now() - codexModelsLastFetched < 300000) {
// Skip if recently failed and not forcing refresh
if (
!forceRefresh &&
codexModelsLastFailedAt &&
Date.now() - codexModelsLastFailedAt < FAILURE_COOLDOWN_MS
) {
return;
}
// Skip if recently fetched successfully and not forcing refresh
if (
!forceRefresh &&
codexModelsLastFetched &&
Date.now() - codexModelsLastFetched < SUCCESS_CACHE_MS
) {
return;
}
@@ -3142,12 +3164,14 @@ export const useAppStore = create<AppState & AppActions>()((set, get) => ({
codexModelsLastFetched: Date.now(),
codexModelsLoading: false,
codexModelsError: null,
codexModelsLastFailedAt: null, // Clear failure on success
});
} catch (error) {
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
set({
codexModelsError: errorMessage,
codexModelsLoading: false,
codexModelsLastFailedAt: Date.now(), // Record failure time for cooldown
});
}
},

View File

@@ -123,8 +123,17 @@ test.describe('Feature Manual Review Flow', () => {
await waitForNetworkIdle(page);
await expect(page.locator('[data-testid="board-view"]')).toBeVisible({ timeout: 10000 });
// Verify we're on the correct project
await expect(page.getByText(projectName).first()).toBeVisible({ timeout: 10000 });
// Expand sidebar if collapsed to see project name
const expandSidebarButton = page.locator('button:has-text("Expand sidebar")');
if (await expandSidebarButton.isVisible()) {
await expandSidebarButton.click();
await page.waitForTimeout(300);
}
// Verify we're on the correct project (project name appears in sidebar button)
await expect(page.getByRole('button', { name: new RegExp(projectName) })).toBeVisible({
timeout: 10000,
});
// Create the feature via HTTP API (writes to disk)
const feature = {

View File

@@ -122,9 +122,7 @@ test.describe('Board Background Persistence', () => {
],
});
await authenticateForTests(page);
// Intercept settings API to use our test projects and clear currentProjectId
// Intercept settings API BEFORE authenticateForTests (which navigates to the page)
// This ensures the app shows the welcome view with our test projects
await page.route('**/api/settings/global', async (route) => {
const response = await route.fetch();
@@ -151,6 +149,8 @@ test.describe('Board Background Persistence', () => {
await route.fulfill({ response, json });
});
await authenticateForTests(page);
// Track API calls to /api/settings/project to verify settings are being loaded
const settingsApiCalls: Array<{ url: string; method: string; body: string }> = [];
page.on('request', (request) => {
@@ -163,24 +163,31 @@ test.describe('Board Background Persistence', () => {
}
});
// Navigate to the app
await page.goto('/');
// Navigate directly to dashboard to avoid auto-open which would bypass the project selection
await page.goto('/dashboard');
await page.waitForLoadState('load');
await handleLoginScreenIfPresent(page);
// Wait for welcome view
await expect(page.locator('[data-testid="welcome-view"]')).toBeVisible({ timeout: 10000 });
// Wait for dashboard view
await expect(page.locator('[data-testid="dashboard-view"]')).toBeVisible({ timeout: 10000 });
// Open project A (has background settings)
const projectACard = page.locator(`[data-testid="recent-project-${projectAId}"]`);
const projectACard = page.locator(`[data-testid="project-card-${projectAId}"]`);
await expect(projectACard).toBeVisible();
await projectACard.click();
// Wait for board view
await expect(page.locator('[data-testid="board-view"]')).toBeVisible({ timeout: 15000 });
// Verify project A is current (check header paragraph which is always visible)
await expect(page.locator('[data-testid="board-view"]').getByText(projectAName)).toBeVisible({
// Ensure sidebar is expanded before checking project name
const expandSidebarButton = page.locator('button:has-text("Expand sidebar")');
if (await expandSidebarButton.isVisible()) {
await expandSidebarButton.click();
await page.waitForTimeout(300);
}
// Verify project A is current (project name appears in sidebar button)
await expect(page.getByRole('button', { name: new RegExp(projectAName) })).toBeVisible({
timeout: 5000,
});
@@ -196,13 +203,6 @@ test.describe('Board Background Persistence', () => {
// Wait for initial project load to stabilize
await page.waitForTimeout(500);
// Ensure sidebar is expanded before interacting with project selector
const expandSidebarButton = page.locator('button:has-text("Expand sidebar")');
if (await expandSidebarButton.isVisible()) {
await expandSidebarButton.click();
await page.waitForTimeout(300);
}
// Switch to project B (no background)
const projectSelector = page.locator('[data-testid="project-selector"]');
await expect(projectSelector).toBeVisible({ timeout: 5000 });

View File

@@ -33,27 +33,29 @@ test.describe('Project Creation', () => {
const projectName = `test-project-${Date.now()}`;
await setupWelcomeView(page, { workspaceDir: TEST_TEMP_DIR });
await authenticateForTests(page);
// Intercept settings API to ensure it doesn't return a currentProjectId
// This prevents settings hydration from restoring a project
// Intercept settings API BEFORE authenticateForTests (which navigates to the page)
// This prevents settings hydration from restoring a project and disables auto-open
await page.route('**/api/settings/global', async (route) => {
const response = await route.fetch();
const json = await response.json();
// Remove currentProjectId to prevent restoring a project
// Remove currentProjectId and clear projects to prevent auto-open
if (json.settings) {
json.settings.currentProjectId = null;
json.settings.projects = [];
}
await route.fulfill({ response, json });
});
// Navigate to root
await page.goto('/');
await authenticateForTests(page);
// Navigate directly to dashboard to avoid auto-open logic
await page.goto('/dashboard');
await page.waitForLoadState('load');
await handleLoginScreenIfPresent(page);
// Wait for welcome view to be visible
await expect(page.locator('[data-testid="welcome-view"]')).toBeVisible({ timeout: 15000 });
// Wait for dashboard view
await expect(page.locator('[data-testid="dashboard-view"]')).toBeVisible({ timeout: 15000 });
await page.locator('[data-testid="create-new-project"]').click();
await page.locator('[data-testid="quick-setup-option"]').click();
@@ -67,10 +69,18 @@ test.describe('Project Creation', () => {
await expect(page.locator('[data-testid="board-view"]')).toBeVisible({ timeout: 15000 });
// Expand sidebar if collapsed to see project name
const expandSidebarButton = page.locator('button:has-text("Expand sidebar")');
if (await expandSidebarButton.isVisible()) {
await expandSidebarButton.click();
await page.waitForTimeout(300);
}
// Wait for project to be set as current and visible on the page
// The project name appears in multiple places: project-selector, board header paragraph, etc.
// Check any element containing the project name
await expect(page.getByText(projectName).first()).toBeVisible({ timeout: 15000 });
// The project name appears in the sidebar project selector button
await expect(page.getByRole('button', { name: new RegExp(projectName) })).toBeVisible({
timeout: 15000,
});
// Project was created successfully if we're on board view with project name visible
// Note: The actual project directory is created in the server's default workspace,

View File

@@ -113,12 +113,13 @@ test.describe('Open Project', () => {
// Now navigate to the app
await authenticateForTests(page);
await page.goto('/');
// Navigate directly to dashboard to avoid auto-open which would bypass the project selection
await page.goto('/dashboard');
await page.waitForLoadState('load');
await handleLoginScreenIfPresent(page);
// Wait for welcome view to be visible
await expect(page.locator('[data-testid="welcome-view"]')).toBeVisible({ timeout: 15000 });
// Wait for dashboard view
await expect(page.locator('[data-testid="dashboard-view"]')).toBeVisible({ timeout: 15000 });
// Verify we see the "Recent Projects" section
await expect(page.getByText('Recent Projects')).toBeVisible({ timeout: 5000 });
@@ -135,7 +136,7 @@ test.describe('Open Project', () => {
if (!isOurProjectVisible) {
// Our project isn't visible - use the first available recent project card instead
// This tests the "open recent project" flow even if our specific project didn't get injected
const firstProjectCard = page.locator('[data-testid^="recent-project-"]').first();
const firstProjectCard = page.locator('[data-testid^="project-card-"]').first();
await expect(firstProjectCard).toBeVisible({ timeout: 5000 });
// Get the project name from the card to verify later
targetProjectName = (await firstProjectCard.locator('p').first().textContent()) || '';
@@ -147,10 +148,19 @@ test.describe('Open Project', () => {
// Wait for the board view to appear (project was opened)
await expect(page.locator('[data-testid="board-view"]')).toBeVisible({ timeout: 15000 });
// Expand sidebar if collapsed to see project name
const expandSidebarButton = page.locator('button:has-text("Expand sidebar")');
if (await expandSidebarButton.isVisible()) {
await expandSidebarButton.click();
await page.waitForTimeout(300);
}
// Wait for a project to be set as current and visible on the page
// The project name appears in multiple places: project-selector, board header paragraph, etc.
// The project name appears in the sidebar project selector button
if (targetProjectName) {
await expect(page.getByText(targetProjectName).first()).toBeVisible({ timeout: 15000 });
await expect(page.getByRole('button', { name: new RegExp(targetProjectName) })).toBeVisible({
timeout: 15000,
});
}
// Only verify filesystem if we opened our specific test project

View File

@@ -93,7 +93,9 @@ test.describe('Settings startup sync race', () => {
// App should eventually render a main view after settings hydration.
await page
.locator('[data-testid="welcome-view"], [data-testid="board-view"]')
.locator(
'[data-testid="welcome-view"], [data-testid="dashboard-view"], [data-testid="board-view"]'
)
.first()
.waitFor({ state: 'visible', timeout: 30000 });
@@ -112,7 +114,9 @@ test.describe('Settings startup sync race', () => {
await page.waitForLoadState('load');
await handleLoginScreenIfPresent(page);
await page
.locator('[data-testid="welcome-view"], [data-testid="board-view"]')
.locator(
'[data-testid="welcome-view"], [data-testid="dashboard-view"], [data-testid="board-view"]'
)
.first()
.waitFor({ state: 'visible', timeout: 30000 });

View File

@@ -89,6 +89,7 @@ export const TEST_IDS = {
agentView: 'agent-view',
settingsView: 'settings-view',
welcomeView: 'welcome-view',
dashboardView: 'dashboard-view',
setupView: 'setup-view',
// Board View Components

View File

@@ -75,28 +75,44 @@ export async function handleLoginScreenIfPresent(page: Page): Promise<boolean> {
.locator('[data-testid="login-api-key-input"], input[type="password"][placeholder*="API key"]')
.first();
const appContent = page.locator(
'[data-testid="welcome-view"], [data-testid="board-view"], [data-testid="context-view"], [data-testid="agent-view"]'
'[data-testid="welcome-view"], [data-testid="dashboard-view"], [data-testid="board-view"], [data-testid="context-view"], [data-testid="agent-view"]'
);
const loggedOutPage = page.getByRole('heading', { name: /logged out/i });
const goToLoginButton = page.locator('button:has-text("Go to login")');
const maxWaitMs = 15000;
// Race between login screen, a delayed redirect to /login, and actual content
const loginVisible = await Promise.race([
// Race between login screen, logged-out page, a delayed redirect to /login, and actual content
const result = await Promise.race([
page
.waitForURL((url) => url.pathname.includes('/login'), { timeout: maxWaitMs })
.then(() => true)
.catch(() => false),
.then(() => 'login-redirect' as const)
.catch(() => null),
loginInput
.waitFor({ state: 'visible', timeout: maxWaitMs })
.then(() => true)
.catch(() => false),
.then(() => 'login-input' as const)
.catch(() => null),
loggedOutPage
.waitFor({ state: 'visible', timeout: maxWaitMs })
.then(() => 'logged-out' as const)
.catch(() => null),
appContent
.first()
.waitFor({ state: 'visible', timeout: maxWaitMs })
.then(() => false)
.catch(() => false),
.then(() => 'app-content' as const)
.catch(() => null),
]);
// Handle logged-out page - click "Go to login" button and then login
if (result === 'logged-out') {
await goToLoginButton.click();
await page.waitForLoadState('load');
// Now handle the login screen
return handleLoginScreenIfPresent(page);
}
const loginVisible = result === 'login-redirect' || result === 'login-input';
if (loginVisible) {
const apiKey = process.env.AUTOMAKER_API_KEY || 'test-api-key-for-e2e-tests';
await loginInput.fill(apiKey);

View File

@@ -152,7 +152,8 @@ export async function navigateToSetup(page: Page): Promise<void> {
}
/**
* Navigate to the welcome view (clear project selection)
* Navigate to the welcome/dashboard view (clear project selection)
* Note: The app redirects from / to /dashboard when no project is selected
*/
export async function navigateToWelcome(page: Page): Promise<void> {
// Authenticate before navigating
@@ -167,7 +168,11 @@ export async function navigateToWelcome(page: Page): Promise<void> {
// Handle login redirect if needed
await handleLoginScreenIfPresent(page);
await waitForElement(page, 'welcome-view', { timeout: 10000 });
// Wait for either welcome-view or dashboard-view (app redirects to /dashboard when no project)
await page
.locator('[data-testid="welcome-view"], [data-testid="dashboard-view"]')
.first()
.waitFor({ state: 'visible', timeout: 10000 });
}
/**

View File

@@ -17,6 +17,11 @@ services:
build:
context: .
dockerfile: Dockerfile.dev
args:
# Match container user to host user for mounted volume permissions
# Override with: UID=$(id -u) GID=$(id -g) docker-compose build
UID: ${UID:-1001}
GID: ${GID:-1001}
container_name: automaker-dev-server-only
restart: unless-stopped
ports:

View File

@@ -18,6 +18,11 @@ services:
build:
context: .
dockerfile: Dockerfile.dev
args:
# Match container user to host user for mounted volume permissions
# Override with: UID=$(id -u) GID=$(id -g) docker-compose build
UID: ${UID:-1001}
GID: ${GID:-1001}
container_name: automaker-dev-server
restart: unless-stopped
ports:
@@ -94,6 +99,9 @@ services:
build:
context: .
dockerfile: Dockerfile.dev
args:
UID: ${UID:-1001}
GID: ${GID:-1001}
container_name: automaker-dev-ui
restart: unless-stopped
ports:

View File

@@ -28,6 +28,11 @@ services:
context: .
dockerfile: Dockerfile
target: server
args:
# Match container user to host user for mounted volume permissions
# Override with: UID=$(id -u) GID=$(id -g) docker-compose build
UID: ${UID:-1001}
GID: ${GID:-1001}
container_name: automaker-server
restart: unless-stopped
ports:

View File

@@ -57,6 +57,20 @@ docker-compose -f docker-compose.yml -f docker-compose.project.yml up -d
**Tip**: Use `:ro` (read-only) when possible for extra safety.
### Fixing File Permission Issues
When mounting host directories, files created by the container may be owned by UID 1001 (the default container user), causing permission mismatches with your host user. To fix this, rebuild the image with your host UID/GID:
```bash
# Rebuild with your user's UID/GID
UID=$(id -u) GID=$(id -g) docker-compose build
# Then start normally
docker-compose up -d
```
This creates the container user with the same UID/GID as your host user, so files in mounted volumes have correct ownership.
## CLI Authentication (macOS)
On macOS, OAuth tokens are stored in Keychain (Claude) and SQLite (Cursor). Use these scripts to extract and pass them to the container:
@@ -129,3 +143,5 @@ volumes:
| Need a fresh start | Run `docker-compose down && docker volume rm automaker-data && docker-compose up -d --build` |
| Cursor auth fails | Re-extract token with `./scripts/get-cursor-token.sh` - tokens expire periodically. Make sure you've run `cursor-agent login` on your host first. |
| OpenCode not detected | Mount `~/.local/share/opencode` to `/home/automaker/.local/share/opencode`. Make sure you've run `opencode auth login` on your host first. |
| File permission errors | Rebuild with `UID=$(id -u) GID=$(id -g) docker-compose build` to match container user to your host user. See [Fixing File Permission Issues](#fixing-file-permission-issues). |

View File

@@ -174,7 +174,7 @@ export interface ContentBlock {
*/
export interface ProviderMessage {
type: 'assistant' | 'user' | 'error' | 'result';
subtype?: 'success' | 'error';
subtype?: 'success' | 'error' | 'error_max_turns' | 'error_max_structured_output_retries';
session_id?: string;
message?: {
role: 'user' | 'assistant';
@@ -183,6 +183,8 @@ export interface ProviderMessage {
result?: string;
error?: string;
parent_tool_use_id?: string | null;
/** Structured output from SDK when using outputFormat */
structured_output?: Record<string, unknown>;
}
/**

View File

@@ -375,6 +375,8 @@ export interface GlobalSettings {
defaultPlanningMode: PlanningMode;
/** Default: require manual approval before generating */
defaultRequirePlanApproval: boolean;
/** Default model and thinking level for new feature cards */
defaultFeatureModel: PhaseModelEntry;
// Audio Preferences
/** Mute completion notification sound */
@@ -698,6 +700,7 @@ export const DEFAULT_GLOBAL_SETTINGS: GlobalSettings = {
useWorktrees: true,
defaultPlanningMode: 'skip',
defaultRequirePlanApproval: false,
defaultFeatureModel: { model: 'opus' },
muteDoneSound: false,
phaseModels: DEFAULT_PHASE_MODELS,
enhancementModel: 'sonnet',

93
package-lock.json generated
View File

@@ -679,6 +679,7 @@
"integrity": "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@babel/code-frame": "^7.27.1",
"@babel/generator": "^7.28.5",
@@ -1271,6 +1272,7 @@
"resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.39.4.tgz",
"integrity": "sha512-xMF6OfEAUVY5Waega4juo1QGACfNkNF+aJLqpd8oUJz96ms2zbfQ9Gh35/tI3y8akEV31FruKfj7hBnIU/nkqA==",
"license": "MIT",
"peer": true,
"dependencies": {
"@codemirror/state": "^6.5.0",
"crelt": "^1.0.6",
@@ -1313,6 +1315,7 @@
"resolved": "https://registry.npmjs.org/@dnd-kit/core/-/core-6.3.1.tgz",
"integrity": "sha512-xkGBRQQab4RLwgXxoqETICr6S5JlogafbhNsidmrkVv2YRs5MLwpjoF2qpiGjQt8S9AoxtIV603s0GIUpY5eYQ==",
"license": "MIT",
"peer": true,
"dependencies": {
"@dnd-kit/accessibility": "^3.1.1",
"@dnd-kit/utilities": "^3.2.2",
@@ -1479,7 +1482,7 @@
},
"node_modules/@electron/node-gyp": {
"version": "10.2.0-electron.1",
"resolved": "git+ssh://git@github.com/electron/node-gyp.git#06b29aafb7708acef8b3669835c8a7857ebc92d2",
"resolved": "git+https://github.com/electron/node-gyp.git#06b29aafb7708acef8b3669835c8a7857ebc92d2",
"integrity": "sha512-4MSBTT8y07YUDqf69/vSh80Hh791epYqGtWHO3zSKhYFwQg+gx9wi1PqbqP6YqC4WMsNxZ5l9oDmnWdK5pfCKQ==",
"dev": true,
"license": "MIT",
@@ -2133,7 +2136,6 @@
"dev": true,
"license": "BSD-2-Clause",
"optional": true,
"peer": true,
"dependencies": {
"cross-dirname": "^0.1.0",
"debug": "^4.3.4",
@@ -2155,7 +2157,6 @@
"dev": true,
"license": "MIT",
"optional": true,
"peer": true,
"dependencies": {
"graceful-fs": "^4.2.0",
"jsonfile": "^6.0.1",
@@ -2172,7 +2173,6 @@
"dev": true,
"license": "MIT",
"optional": true,
"peer": true,
"dependencies": {
"universalify": "^2.0.0"
},
@@ -2187,7 +2187,6 @@
"dev": true,
"license": "MIT",
"optional": true,
"peer": true,
"engines": {
"node": ">= 10.0.0"
}
@@ -2955,7 +2954,6 @@
"integrity": "sha512-A5P/LfWGFSl6nsckYtjw9da+19jB8hkJ6ACTGcDfEJ0aE+l2n2El7dsVM7UVHZQ9s2lmYMWlrS21YLy2IR1LUw==",
"license": "MIT",
"optional": true,
"peer": true,
"engines": {
"node": ">=18"
}
@@ -3080,7 +3078,6 @@
"os": [
"linux"
],
"peer": true,
"funding": {
"url": "https://opencollective.com/libvips"
}
@@ -3097,7 +3094,6 @@
"os": [
"linux"
],
"peer": true,
"funding": {
"url": "https://opencollective.com/libvips"
}
@@ -3114,7 +3110,6 @@
"os": [
"linux"
],
"peer": true,
"funding": {
"url": "https://opencollective.com/libvips"
}
@@ -3223,7 +3218,6 @@
"os": [
"linux"
],
"peer": true,
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
@@ -3246,7 +3240,6 @@
"os": [
"linux"
],
"peer": true,
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
@@ -3269,7 +3262,6 @@
"os": [
"linux"
],
"peer": true,
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
@@ -3355,7 +3347,6 @@
],
"license": "Apache-2.0 AND LGPL-3.0-or-later AND MIT",
"optional": true,
"peer": true,
"dependencies": {
"@emnapi/runtime": "^1.7.0"
},
@@ -3378,7 +3369,6 @@
"os": [
"win32"
],
"peer": true,
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
@@ -3398,7 +3388,6 @@
"os": [
"win32"
],
"peer": true,
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
@@ -3798,8 +3787,7 @@
"version": "16.0.10",
"resolved": "https://registry.npmjs.org/@next/env/-/env-16.0.10.tgz",
"integrity": "sha512-8tuaQkyDVgeONQ1MeT9Mkk8pQmZapMKFh5B+OrFUlG3rVmYTXcXlBetBgTurKXGaIZvkoqRT9JL5K3phXcgang==",
"license": "MIT",
"peer": true
"license": "MIT"
},
"node_modules/@next/swc-darwin-arm64": {
"version": "16.0.10",
@@ -3813,7 +3801,6 @@
"os": [
"darwin"
],
"peer": true,
"engines": {
"node": ">= 10"
}
@@ -3830,7 +3817,6 @@
"os": [
"darwin"
],
"peer": true,
"engines": {
"node": ">= 10"
}
@@ -3847,7 +3833,6 @@
"os": [
"linux"
],
"peer": true,
"engines": {
"node": ">= 10"
}
@@ -3864,7 +3849,6 @@
"os": [
"linux"
],
"peer": true,
"engines": {
"node": ">= 10"
}
@@ -3881,7 +3865,6 @@
"os": [
"linux"
],
"peer": true,
"engines": {
"node": ">= 10"
}
@@ -3898,7 +3881,6 @@
"os": [
"linux"
],
"peer": true,
"engines": {
"node": ">= 10"
}
@@ -3915,7 +3897,6 @@
"os": [
"win32"
],
"peer": true,
"engines": {
"node": ">= 10"
}
@@ -3932,7 +3913,6 @@
"os": [
"win32"
],
"peer": true,
"engines": {
"node": ">= 10"
}
@@ -4032,6 +4012,7 @@
"integrity": "sha512-6TyEnHgd6SArQO8UO2OMTxshln3QMWBtPGrOCgs3wVEmQmwyuNtB10IZMfmYDE0riwNR1cu4q+pPcxMVtaG3TA==",
"devOptional": true,
"license": "Apache-2.0",
"peer": true,
"dependencies": {
"playwright": "1.57.0"
},
@@ -5472,7 +5453,6 @@
"resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.15.tgz",
"integrity": "sha512-JQ5TuMi45Owi4/BIMAJBoSQoOJu12oOk/gADqlcUL9JEdHB8vyjUSsxqeNXnmXHjYKMi2WcYtezGEEhqUI/E2g==",
"license": "Apache-2.0",
"peer": true,
"dependencies": {
"tslib": "^2.8.0"
}
@@ -5806,6 +5786,7 @@
"resolved": "https://registry.npmjs.org/@tanstack/react-router/-/react-router-1.141.6.tgz",
"integrity": "sha512-qWFxi2D6eGc1L03RzUuhyEOplZ7Q6q62YOl7Of9Y0q4YjwQwxRm4zxwDVtvUIoy4RLVCpqp5UoE+Nxv2PY9trg==",
"license": "MIT",
"peer": true,
"dependencies": {
"@tanstack/history": "1.141.0",
"@tanstack/react-store": "^0.8.0",
@@ -6232,6 +6213,7 @@
"integrity": "sha512-sKYVuV7Sv9fbPIt/442koC7+IIwK5olP1KWeD88e/idgoJqDm3JV/YUiPwkoKK92ylff2MGxSz1CSjsXelx0YA==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@types/body-parser": "*",
"@types/express-serve-static-core": "^5.0.0",
@@ -6374,6 +6356,7 @@
"resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.7.tgz",
"integrity": "sha512-MWtvHrGZLFttgeEj28VXHxpmwYbor/ATPYbBfSFZEIRK0ecCFLl2Qo55z52Hss+UV9CRN7trSeq1zbgx7YDWWg==",
"license": "MIT",
"peer": true,
"dependencies": {
"csstype": "^3.2.2"
}
@@ -6384,6 +6367,7 @@
"integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==",
"devOptional": true,
"license": "MIT",
"peer": true,
"peerDependencies": {
"@types/react": "^19.2.0"
}
@@ -6489,6 +6473,7 @@
"integrity": "sha512-6/cmF2piao+f6wSxUsJLZjck7OQsYyRtcOZS02k7XINSNlz93v6emM8WutDQSXnroG2xwYlEVHJI+cPA7CPM3Q==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@typescript-eslint/scope-manager": "8.50.0",
"@typescript-eslint/types": "8.50.0",
@@ -6982,7 +6967,8 @@
"version": "5.5.0",
"resolved": "https://registry.npmjs.org/@xterm/xterm/-/xterm-5.5.0.tgz",
"integrity": "sha512-hqJHYaQb5OptNunnyAnkHyM8aCjZ1MEIDTQu1iIbbTD/xops91NB5yq1ZK/dC2JDbVWtF23zUtl9JE2NqwT87A==",
"license": "MIT"
"license": "MIT",
"peer": true
},
"node_modules/@xyflow/react": {
"version": "12.10.0",
@@ -7080,6 +7066,7 @@
"integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==",
"dev": true,
"license": "MIT",
"peer": true,
"bin": {
"acorn": "bin/acorn"
},
@@ -7140,6 +7127,7 @@
"integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"fast-deep-equal": "^3.1.1",
"fast-json-stable-stringify": "^2.0.0",
@@ -7738,6 +7726,7 @@
}
],
"license": "MIT",
"peer": true,
"dependencies": {
"baseline-browser-mapping": "^2.9.0",
"caniuse-lite": "^1.0.30001759",
@@ -8269,8 +8258,7 @@
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz",
"integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==",
"license": "MIT",
"peer": true
"license": "MIT"
},
"node_modules/cliui": {
"version": "8.0.1",
@@ -8575,8 +8563,7 @@
"integrity": "sha512-+R08/oI0nl3vfPcqftZRpytksBXDzOUveBq/NBVx0sUp1axwzPQrKinNx5yd5sxPu8j1wIy8AfnVQ+5eFdha6Q==",
"dev": true,
"license": "MIT",
"optional": true,
"peer": true
"optional": true
},
"node_modules/cross-env": {
"version": "10.1.0",
@@ -8673,6 +8660,7 @@
"resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz",
"integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==",
"license": "ISC",
"peer": true,
"engines": {
"node": ">=12"
}
@@ -8974,6 +8962,7 @@
"integrity": "sha512-59CAAjAhTaIMCN8y9kD573vDkxbs1uhDcrFLHSgutYdPcGOU35Rf95725snvzEOy4BFB7+eLJ8djCNPmGwG67w==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"app-builder-lib": "26.0.12",
"builder-util": "26.0.11",
@@ -9300,7 +9289,6 @@
"dev": true,
"hasInstallScript": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@electron/asar": "^3.2.1",
"debug": "^4.1.1",
@@ -9321,7 +9309,6 @@
"integrity": "sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"graceful-fs": "^4.1.2",
"jsonfile": "^4.0.0",
@@ -9572,6 +9559,7 @@
"integrity": "sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@eslint-community/eslint-utils": "^4.8.0",
"@eslint-community/regexpp": "^4.12.1",
@@ -9886,6 +9874,7 @@
"resolved": "https://registry.npmjs.org/express/-/express-5.2.1.tgz",
"integrity": "sha512-hIS4idWWai69NezIdRt2xFVofaF4j+6INOpJlVOLDO8zXGpUVEVzIYk12UUi2JzjEzWL3IOAxcTubgz9Po0yXw==",
"license": "MIT",
"peer": true,
"dependencies": {
"accepts": "^2.0.0",
"body-parser": "^2.2.1",
@@ -11553,7 +11542,6 @@
"os": [
"android"
],
"peer": true,
"engines": {
"node": ">= 12.0.0"
},
@@ -11619,7 +11607,6 @@
"os": [
"freebsd"
],
"peer": true,
"engines": {
"node": ">= 12.0.0"
},
@@ -14061,7 +14048,6 @@
}
],
"license": "MIT",
"peer": true,
"dependencies": {
"nanoid": "^3.3.6",
"picocolors": "^1.0.0",
@@ -14078,7 +14064,6 @@
"dev": true,
"license": "MIT",
"optional": true,
"peer": true,
"dependencies": {
"commander": "^9.4.0"
},
@@ -14096,7 +14081,6 @@
"dev": true,
"license": "MIT",
"optional": true,
"peer": true,
"engines": {
"node": "^12.20.0 || >=14"
}
@@ -14285,6 +14269,7 @@
"resolved": "https://registry.npmjs.org/react/-/react-19.2.3.tgz",
"integrity": "sha512-Ku/hhYbVjOQnXDZFv2+RibmLFGwFdeeKHFcOTlrt7xplBnya5OGn/hIRDsqDiSUcfORsDC7MPxwork8jBwsIWA==",
"license": "MIT",
"peer": true,
"engines": {
"node": ">=0.10.0"
}
@@ -14294,6 +14279,7 @@
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.3.tgz",
"integrity": "sha512-yELu4WmLPw5Mr/lmeEpox5rw3RETacE++JgHqQzd2dg+YbJuat3jH4ingc+WPZhxaoFzdv9y33G+F7Nl5O0GBg==",
"license": "MIT",
"peer": true,
"dependencies": {
"scheduler": "^0.27.0"
},
@@ -14652,7 +14638,6 @@
"deprecated": "Rimraf versions prior to v4 are no longer supported",
"dev": true,
"license": "ISC",
"peer": true,
"dependencies": {
"glob": "^7.1.3"
},
@@ -14841,6 +14826,7 @@
"resolved": "https://registry.npmjs.org/seroval/-/seroval-1.4.0.tgz",
"integrity": "sha512-BdrNXdzlofomLTiRnwJTSEAaGKyHHZkbMXIywOh7zlzp4uZnXErEwl9XZ+N1hJSNpeTtNxWvVwN0wUzAIQ4Hpg==",
"license": "MIT",
"peer": true,
"engines": {
"node": ">=10"
}
@@ -14889,7 +14875,6 @@
"hasInstallScript": true,
"license": "Apache-2.0",
"optional": true,
"peer": true,
"dependencies": {
"@img/colour": "^1.0.0",
"detect-libc": "^2.1.2",
@@ -14940,7 +14925,6 @@
"os": [
"darwin"
],
"peer": true,
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
@@ -14963,7 +14947,6 @@
"os": [
"darwin"
],
"peer": true,
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
@@ -14986,7 +14969,6 @@
"os": [
"darwin"
],
"peer": true,
"funding": {
"url": "https://opencollective.com/libvips"
}
@@ -15003,7 +14985,6 @@
"os": [
"darwin"
],
"peer": true,
"funding": {
"url": "https://opencollective.com/libvips"
}
@@ -15020,7 +15001,6 @@
"os": [
"linux"
],
"peer": true,
"funding": {
"url": "https://opencollective.com/libvips"
}
@@ -15037,7 +15017,6 @@
"os": [
"linux"
],
"peer": true,
"funding": {
"url": "https://opencollective.com/libvips"
}
@@ -15054,7 +15033,6 @@
"os": [
"linux"
],
"peer": true,
"funding": {
"url": "https://opencollective.com/libvips"
}
@@ -15071,7 +15049,6 @@
"os": [
"linux"
],
"peer": true,
"funding": {
"url": "https://opencollective.com/libvips"
}
@@ -15088,7 +15065,6 @@
"os": [
"linux"
],
"peer": true,
"funding": {
"url": "https://opencollective.com/libvips"
}
@@ -15105,7 +15081,6 @@
"os": [
"linux"
],
"peer": true,
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
@@ -15128,7 +15103,6 @@
"os": [
"linux"
],
"peer": true,
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
@@ -15151,7 +15125,6 @@
"os": [
"linux"
],
"peer": true,
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
@@ -15174,7 +15147,6 @@
"os": [
"linux"
],
"peer": true,
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
@@ -15197,7 +15169,6 @@
"os": [
"linux"
],
"peer": true,
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
@@ -15220,7 +15191,6 @@
"os": [
"win32"
],
"peer": true,
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
@@ -15689,7 +15659,6 @@
"resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.1.6.tgz",
"integrity": "sha512-qSVyDTeMotdvQYoHWLNGwRFJHC+i+ZvdBRYosOFgC+Wg1vx4frN2/RG/NA7SYqqvKNLf39P2LSRA2pu6n0XYZA==",
"license": "MIT",
"peer": true,
"dependencies": {
"client-only": "0.0.1"
},
@@ -15859,7 +15828,6 @@
"integrity": "sha512-yYrrsWnrXMcdsnu/7YMYAofM1ktpL5By7vZhf15CrXijWWrEYZks5AXBudalfSWJLlnen/QUJUB5aoB0kqZUGA==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"mkdirp": "^0.5.1",
"rimraf": "~2.6.2"
@@ -15923,7 +15891,6 @@
"integrity": "sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"minimist": "^1.2.6"
},
@@ -16021,6 +15988,7 @@
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
"dev": true,
"license": "MIT",
"peer": true,
"engines": {
"node": ">=12"
},
@@ -16225,6 +16193,7 @@
"integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==",
"dev": true,
"license": "Apache-2.0",
"peer": true,
"bin": {
"tsc": "bin/tsc",
"tsserver": "bin/tsserver"
@@ -16596,6 +16565,7 @@
"integrity": "sha512-dZwN5L1VlUBewiP6H9s2+B3e3Jg96D0vzN+Ry73sOefebhYr9f94wwkMNN/9ouoU8pV1BqA1d1zGk8928cx0rg==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"esbuild": "^0.27.0",
"fdir": "^6.5.0",
@@ -16685,7 +16655,8 @@
"resolved": "https://registry.npmjs.org/vite-plugin-electron-renderer/-/vite-plugin-electron-renderer-0.14.6.tgz",
"integrity": "sha512-oqkWFa7kQIkvHXG7+Mnl1RTroA4sP0yesKatmAy0gjZC4VwUqlvF9IvOpHd1fpLWsqYX/eZlVxlhULNtaQ78Jw==",
"dev": true,
"license": "MIT"
"license": "MIT",
"peer": true
},
"node_modules/vite/node_modules/fdir": {
"version": "6.5.0",
@@ -16711,6 +16682,7 @@
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
"dev": true,
"license": "MIT",
"peer": true,
"engines": {
"node": ">=12"
},
@@ -16753,6 +16725,7 @@
"integrity": "sha512-E4t7DJ9pESL6E3I8nFjPa4xGUd3PmiWDLsDztS2qXSJWfHtbQnwAWylaBvSNY48I3vr8PTqIZlyK8TE3V3CA4Q==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@vitest/expect": "4.0.16",
"@vitest/mocker": "4.0.16",
@@ -17010,6 +16983,7 @@
"integrity": "sha512-mplynKqc1C2hTVYxd0PU2xQAc22TI1vShAYGksCCfxbn/dFwnHTNi1bvYsBTkhdUNtGIf5xNOg938rrSSYvS9A==",
"dev": true,
"license": "ISC",
"peer": true,
"bin": {
"yaml": "bin.mjs"
},
@@ -17078,6 +17052,7 @@
"resolved": "https://registry.npmjs.org/zod/-/zod-4.2.1.tgz",
"integrity": "sha512-0wZ1IRqGGhMP76gLqz8EyfBXKk0J2qo2+H3fi4mcUP/KtTocoX08nmIAHl1Z2kJIZbZee8KOpBCSNPRgauucjw==",
"license": "MIT",
"peer": true,
"funding": {
"url": "https://github.com/sponsors/colinhacks"
}