Merge branch 'v0.9.0rc' into feat/subagents-skills

Resolved conflict in agent-service.ts by keeping both:
- agents parameter for custom subagents (from our branch)
- thinkingLevel and reasoningEffort parameters (from v0.9.0rc)

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Kacper
2026-01-08 22:57:09 +01:00
46 changed files with 1570 additions and 525 deletions

View File

@@ -10,7 +10,7 @@ import { BaseProvider } from './base-provider.js';
import { classifyError, getUserFriendlyErrorMessage, createLogger } from '@automaker/utils'; import { classifyError, getUserFriendlyErrorMessage, createLogger } from '@automaker/utils';
const logger = createLogger('ClaudeProvider'); const logger = createLogger('ClaudeProvider');
import { getThinkingTokenBudget } from '@automaker/types'; import { getThinkingTokenBudget, validateBareModelId } from '@automaker/types';
import type { import type {
ExecuteOptions, ExecuteOptions,
ProviderMessage, ProviderMessage,
@@ -53,6 +53,10 @@ export class ClaudeProvider extends BaseProvider {
* Execute a query using Claude Agent SDK * Execute a query using Claude Agent SDK
*/ */
async *executeQuery(options: ExecuteOptions): AsyncGenerator<ProviderMessage> { async *executeQuery(options: ExecuteOptions): AsyncGenerator<ProviderMessage> {
// Validate that model doesn't have a provider prefix
// AgentService should strip prefixes before passing to providers
validateBareModelId(options.model, 'ClaudeProvider');
const { const {
prompt, prompt,
model, model,

View File

@@ -7,16 +7,17 @@
import { CODEX_MODEL_MAP } from '@automaker/types'; import { CODEX_MODEL_MAP } from '@automaker/types';
import type { ModelDefinition } from './types.js'; import type { ModelDefinition } from './types.js';
const CONTEXT_WINDOW_200K = 200000; const CONTEXT_WINDOW_256K = 256000;
const CONTEXT_WINDOW_128K = 128000; const CONTEXT_WINDOW_128K = 128000;
const MAX_OUTPUT_32K = 32000; const MAX_OUTPUT_32K = 32000;
const MAX_OUTPUT_16K = 16000; const MAX_OUTPUT_16K = 16000;
/** /**
* All available Codex models with their specifications * All available Codex models with their specifications
* Based on https://developers.openai.com/codex/models/
*/ */
export const CODEX_MODELS: ModelDefinition[] = [ export const CODEX_MODELS: ModelDefinition[] = [
// ========== Codex-Specific Models ========== // ========== Recommended Codex Models ==========
{ {
id: CODEX_MODEL_MAP.gpt52Codex, id: CODEX_MODEL_MAP.gpt52Codex,
name: 'GPT-5.2-Codex', name: 'GPT-5.2-Codex',
@@ -24,7 +25,7 @@ export const CODEX_MODELS: ModelDefinition[] = [
provider: 'openai', provider: 'openai',
description: description:
'Most advanced agentic coding model for complex software engineering (default for ChatGPT users).', 'Most advanced agentic coding model for complex software engineering (default for ChatGPT users).',
contextWindow: CONTEXT_WINDOW_200K, contextWindow: CONTEXT_WINDOW_256K,
maxOutputTokens: MAX_OUTPUT_32K, maxOutputTokens: MAX_OUTPUT_32K,
supportsVision: true, supportsVision: true,
supportsTools: true, supportsTools: true,
@@ -33,38 +34,12 @@ export const CODEX_MODELS: ModelDefinition[] = [
hasReasoning: true, hasReasoning: true,
}, },
{ {
id: CODEX_MODEL_MAP.gpt5Codex, id: CODEX_MODEL_MAP.gpt51CodexMax,
name: 'GPT-5-Codex', name: 'GPT-5.1-Codex-Max',
modelString: CODEX_MODEL_MAP.gpt5Codex, modelString: CODEX_MODEL_MAP.gpt51CodexMax,
provider: 'openai', provider: 'openai',
description: 'Purpose-built for Codex CLI with versatile tool use (default for CLI users).', description: 'Optimized for long-horizon, agentic coding tasks in Codex.',
contextWindow: CONTEXT_WINDOW_200K, contextWindow: CONTEXT_WINDOW_256K,
maxOutputTokens: MAX_OUTPUT_32K,
supportsVision: true,
supportsTools: true,
tier: 'standard' as const,
hasReasoning: true,
},
{
id: CODEX_MODEL_MAP.gpt5CodexMini,
name: 'GPT-5-Codex-Mini',
modelString: CODEX_MODEL_MAP.gpt5CodexMini,
provider: 'openai',
description: 'Faster workflows optimized for low-latency code Q&A and editing.',
contextWindow: CONTEXT_WINDOW_128K,
maxOutputTokens: MAX_OUTPUT_16K,
supportsVision: false,
supportsTools: true,
tier: 'basic' as const,
hasReasoning: false,
},
{
id: CODEX_MODEL_MAP.codex1,
name: 'Codex-1',
modelString: CODEX_MODEL_MAP.codex1,
provider: 'openai',
description: 'Version of o3 optimized for software engineering with advanced reasoning.',
contextWindow: CONTEXT_WINDOW_200K,
maxOutputTokens: MAX_OUTPUT_32K, maxOutputTokens: MAX_OUTPUT_32K,
supportsVision: true, supportsVision: true,
supportsTools: true, supportsTools: true,
@@ -72,27 +47,40 @@ export const CODEX_MODELS: ModelDefinition[] = [
hasReasoning: true, hasReasoning: true,
}, },
{ {
id: CODEX_MODEL_MAP.codexMiniLatest, id: CODEX_MODEL_MAP.gpt51CodexMini,
name: 'Codex-Mini-Latest', name: 'GPT-5.1-Codex-Mini',
modelString: CODEX_MODEL_MAP.codexMiniLatest, modelString: CODEX_MODEL_MAP.gpt51CodexMini,
provider: 'openai', provider: 'openai',
description: 'Version of o4-mini designed for Codex with faster workflows.', description: 'Smaller, more cost-effective version for faster workflows.',
contextWindow: CONTEXT_WINDOW_128K, contextWindow: CONTEXT_WINDOW_128K,
maxOutputTokens: MAX_OUTPUT_16K, maxOutputTokens: MAX_OUTPUT_16K,
supportsVision: true, supportsVision: true,
supportsTools: true, supportsTools: true,
tier: 'standard' as const, tier: 'basic' as const,
hasReasoning: false, hasReasoning: false,
}, },
// ========== Base GPT-5 Model ========== // ========== General-Purpose GPT Models ==========
{ {
id: CODEX_MODEL_MAP.gpt5, id: CODEX_MODEL_MAP.gpt52,
name: 'GPT-5', name: 'GPT-5.2',
modelString: CODEX_MODEL_MAP.gpt5, modelString: CODEX_MODEL_MAP.gpt52,
provider: 'openai', provider: 'openai',
description: 'GPT-5 base flagship model with strong general-purpose capabilities.', description: 'Best general agentic model for tasks across industries and domains.',
contextWindow: CONTEXT_WINDOW_200K, contextWindow: CONTEXT_WINDOW_256K,
maxOutputTokens: MAX_OUTPUT_32K,
supportsVision: true,
supportsTools: true,
tier: 'standard' as const,
hasReasoning: true,
},
{
id: CODEX_MODEL_MAP.gpt51,
name: 'GPT-5.1',
modelString: CODEX_MODEL_MAP.gpt51,
provider: 'openai',
description: 'Great for coding and agentic tasks across domains.',
contextWindow: CONTEXT_WINDOW_256K,
maxOutputTokens: MAX_OUTPUT_32K, maxOutputTokens: MAX_OUTPUT_32K,
supportsVision: true, supportsVision: true,
supportsTools: true, supportsTools: true,

View File

@@ -31,6 +31,7 @@ import type {
import { import {
CODEX_MODEL_MAP, CODEX_MODEL_MAP,
supportsReasoningEffort, supportsReasoningEffort,
validateBareModelId,
type CodexApprovalPolicy, type CodexApprovalPolicy,
type CodexSandboxMode, type CodexSandboxMode,
type CodexAuthStatus, type CodexAuthStatus,
@@ -61,6 +62,7 @@ const CODEX_ADD_DIR_FLAG = '--add-dir';
const CODEX_SKIP_GIT_REPO_CHECK_FLAG = '--skip-git-repo-check'; const CODEX_SKIP_GIT_REPO_CHECK_FLAG = '--skip-git-repo-check';
const CODEX_RESUME_FLAG = 'resume'; const CODEX_RESUME_FLAG = 'resume';
const CODEX_REASONING_EFFORT_KEY = 'reasoning_effort'; const CODEX_REASONING_EFFORT_KEY = 'reasoning_effort';
const CODEX_YOLO_FLAG = '--dangerously-bypass-approvals-and-sandbox';
const OPENAI_API_KEY_ENV = 'OPENAI_API_KEY'; const OPENAI_API_KEY_ENV = 'OPENAI_API_KEY';
const CODEX_EXECUTION_MODE_CLI = 'cli'; const CODEX_EXECUTION_MODE_CLI = 'cli';
const CODEX_EXECUTION_MODE_SDK = 'sdk'; const CODEX_EXECUTION_MODE_SDK = 'sdk';
@@ -662,6 +664,10 @@ export class CodexProvider extends BaseProvider {
} }
async *executeQuery(options: ExecuteOptions): AsyncGenerator<ProviderMessage> { async *executeQuery(options: ExecuteOptions): AsyncGenerator<ProviderMessage> {
// Validate that model doesn't have a provider prefix
// AgentService should strip prefixes before passing to providers
validateBareModelId(options.model, 'CodexProvider');
try { try {
const mcpServers = options.mcpServers ?? {}; const mcpServers = options.mcpServers ?? {};
const hasMcpServers = Object.keys(mcpServers).length > 0; const hasMcpServers = Object.keys(mcpServers).length > 0;
@@ -759,18 +765,15 @@ export class CodexProvider extends BaseProvider {
} }
} }
// Model is already bare (no prefix) - validated by executeQuery
const args = [ const args = [
CODEX_EXEC_SUBCOMMAND, CODEX_EXEC_SUBCOMMAND,
CODEX_YOLO_FLAG,
CODEX_SKIP_GIT_REPO_CHECK_FLAG, CODEX_SKIP_GIT_REPO_CHECK_FLAG,
...preExecArgs, ...preExecArgs,
CODEX_MODEL_FLAG, CODEX_MODEL_FLAG,
options.model, options.model,
CODEX_JSON_FLAG, CODEX_JSON_FLAG,
CODEX_SANDBOX_FLAG,
resolvedSandboxMode,
...(outputSchemaPath ? [CODEX_OUTPUT_SCHEMA_FLAG, outputSchemaPath] : []),
...(imagePaths.length > 0 ? [CODEX_IMAGE_FLAG, imagePaths.join(',')] : []),
...configOverrides,
'-', // Read prompt from stdin to avoid shell escaping issues '-', // Read prompt from stdin to avoid shell escaping issues
]; ];

View File

@@ -28,7 +28,7 @@ import type {
ModelDefinition, ModelDefinition,
ContentBlock, ContentBlock,
} from './types.js'; } from './types.js';
import { stripProviderPrefix } from '@automaker/types'; import { validateBareModelId } from '@automaker/types';
import { validateApiKey } from '../lib/auth-utils.js'; import { validateApiKey } from '../lib/auth-utils.js';
import { getEffectivePermissions } from '../services/cursor-config-service.js'; import { getEffectivePermissions } from '../services/cursor-config-service.js';
import { import {
@@ -317,8 +317,8 @@ export class CursorProvider extends CliProvider {
} }
buildCliArgs(options: ExecuteOptions): string[] { buildCliArgs(options: ExecuteOptions): string[] {
// Extract model (strip 'cursor-' prefix if present) // Model is already bare (no prefix) - validated by executeQuery
const model = stripProviderPrefix(options.model || 'auto'); const model = options.model || 'auto';
// Build CLI arguments for cursor-agent // Build CLI arguments for cursor-agent
// NOTE: Prompt is NOT included here - it's passed via stdin to avoid // NOTE: Prompt is NOT included here - it's passed via stdin to avoid
@@ -649,6 +649,10 @@ export class CursorProvider extends CliProvider {
async *executeQuery(options: ExecuteOptions): AsyncGenerator<ProviderMessage> { async *executeQuery(options: ExecuteOptions): AsyncGenerator<ProviderMessage> {
this.ensureCliDetected(); this.ensureCliDetected();
// Validate that model doesn't have a provider prefix
// AgentService should strip prefixes before passing to providers
validateBareModelId(options.model, 'CursorProvider');
if (!this.cliPath) { if (!this.cliPath) {
throw this.createError( throw this.createError(
CursorErrorCode.NOT_INSTALLED, CursorErrorCode.NOT_INSTALLED,

View File

@@ -6,7 +6,8 @@
import path from 'path'; import path from 'path';
import * as secureFs from '../lib/secure-fs.js'; import * as secureFs from '../lib/secure-fs.js';
import type { EventEmitter } from '../lib/events.js'; import type { EventEmitter } from '../lib/events.js';
import type { ExecuteOptions, ThinkingLevel } from '@automaker/types'; import type { ExecuteOptions, ThinkingLevel, ReasoningEffort } from '@automaker/types';
import { stripProviderPrefix } from '@automaker/types';
import { import {
readImageAsBase64, readImageAsBase64,
buildPromptWithImages, buildPromptWithImages,
@@ -59,6 +60,7 @@ interface Session {
workingDirectory: string; workingDirectory: string;
model?: string; model?: string;
thinkingLevel?: ThinkingLevel; // Thinking level for Claude models thinkingLevel?: ThinkingLevel; // Thinking level for Claude models
reasoningEffort?: ReasoningEffort; // Reasoning effort for Codex models
sdkSessionId?: string; // Claude SDK session ID for conversation continuity sdkSessionId?: string; // Claude SDK session ID for conversation continuity
promptQueue: QueuedPrompt[]; // Queue of prompts to auto-run after current task promptQueue: QueuedPrompt[]; // Queue of prompts to auto-run after current task
} }
@@ -148,6 +150,7 @@ export class AgentService {
imagePaths, imagePaths,
model, model,
thinkingLevel, thinkingLevel,
reasoningEffort,
}: { }: {
sessionId: string; sessionId: string;
message: string; message: string;
@@ -155,6 +158,7 @@ export class AgentService {
imagePaths?: string[]; imagePaths?: string[];
model?: string; model?: string;
thinkingLevel?: ThinkingLevel; thinkingLevel?: ThinkingLevel;
reasoningEffort?: ReasoningEffort;
}) { }) {
const session = this.sessions.get(sessionId); const session = this.sessions.get(sessionId);
if (!session) { if (!session) {
@@ -167,7 +171,7 @@ export class AgentService {
throw new Error('Agent is already processing a message'); throw new Error('Agent is already processing a message');
} }
// Update session model and thinking level if provided // Update session model, thinking level, and reasoning effort if provided
if (model) { if (model) {
session.model = model; session.model = model;
await this.updateSession(sessionId, { model }); await this.updateSession(sessionId, { model });
@@ -175,6 +179,9 @@ export class AgentService {
if (thinkingLevel !== undefined) { if (thinkingLevel !== undefined) {
session.thinkingLevel = thinkingLevel; session.thinkingLevel = thinkingLevel;
} }
if (reasoningEffort !== undefined) {
session.reasoningEffort = reasoningEffort;
}
// Validate vision support before processing images // Validate vision support before processing images
const effectiveModel = model || session.model; const effectiveModel = model || session.model;
@@ -284,8 +291,9 @@ export class AgentService {
: baseSystemPrompt; : baseSystemPrompt;
// Build SDK options using centralized configuration // Build SDK options using centralized configuration
// Use thinking level from request, or fall back to session's stored thinking level // Use thinking level and reasoning effort from request, or fall back to session's stored values
const effectiveThinkingLevel = thinkingLevel ?? session.thinkingLevel; const effectiveThinkingLevel = thinkingLevel ?? session.thinkingLevel;
const effectiveReasoningEffort = reasoningEffort ?? session.reasoningEffort;
const sdkOptions = createChatOptions({ const sdkOptions = createChatOptions({
cwd: effectiveWorkDir, cwd: effectiveWorkDir,
model: model, model: model,
@@ -342,13 +350,17 @@ export class AgentService {
} }
} }
// Get provider for this model // Get provider for this model (with prefix)
const provider = ProviderFactory.getProviderForModel(effectiveModel); const provider = ProviderFactory.getProviderForModel(effectiveModel);
// Strip provider prefix - providers should receive bare model IDs
const bareModel = stripProviderPrefix(effectiveModel);
// Build options for provider // Build options for provider
const options: ExecuteOptions = { const options: ExecuteOptions = {
prompt: '', // Will be set below based on images prompt: '', // Will be set below based on images
model: effectiveModel, model: bareModel, // Bare model ID (e.g., "gpt-5.1-codex-max", "composer-1")
originalModel: effectiveModel, // Original with prefix for logging (e.g., "codex-gpt-5.1-codex-max")
cwd: effectiveWorkDir, cwd: effectiveWorkDir,
systemPrompt: sdkOptions.systemPrompt, systemPrompt: sdkOptions.systemPrompt,
maxTurns: maxTurns, maxTurns: maxTurns,
@@ -359,6 +371,8 @@ export class AgentService {
sdkSessionId: session.sdkSessionId, // Pass SDK session ID for resuming sdkSessionId: session.sdkSessionId, // Pass SDK session ID for resuming
mcpServers: Object.keys(mcpServers).length > 0 ? mcpServers : undefined, // Pass MCP servers configuration mcpServers: Object.keys(mcpServers).length > 0 ? mcpServers : undefined, // Pass MCP servers configuration
agents: customSubagents, // Pass custom subagents for task delegation agents: customSubagents, // Pass custom subagents for task delegation
thinkingLevel: effectiveThinkingLevel, // Pass thinking level for Claude models
reasoningEffort: effectiveReasoningEffort, // Pass reasoning effort for Codex models
}; };
// Build prompt content with images // Build prompt content with images

View File

@@ -1,6 +1,9 @@
import * as os from 'os'; import * as os from 'os';
import { findCodexCliPath } from '@automaker/platform'; import { findCodexCliPath } from '@automaker/platform';
import { checkCodexAuthentication } from '../lib/codex-auth.js'; import { checkCodexAuthentication } from '../lib/codex-auth.js';
import { spawnProcess } from '@automaker/platform';
import * as fs from 'fs';
import * as path from 'path';
export interface CodexRateLimitWindow { export interface CodexRateLimitWindow {
limit: number; limit: number;
@@ -32,11 +35,10 @@ export interface CodexUsageData {
/** /**
* Codex Usage Service * Codex Usage Service
* *
* Unlike Claude Code CLI which provides a `/usage` command, Codex CLI * Attempts to fetch usage data from Codex CLI and OpenAI API.
* does not expose usage statistics directly. This service returns a * Codex CLI doesn't provide a direct usage command, but we can:
* clear message explaining this limitation. * 1. Parse usage info from error responses (rate limit errors contain plan info)
* * 2. Check for OpenAI API usage if API key is available
* Future enhancement: Could query OpenAI API headers for rate limit info.
*/ */
export class CodexUsageService { export class CodexUsageService {
private codexBinary = 'codex'; private codexBinary = 'codex';
@@ -47,8 +49,6 @@ export class CodexUsageService {
* Check if Codex CLI is available on the system * Check if Codex CLI is available on the system
*/ */
async isAvailable(): Promise<boolean> { async isAvailable(): Promise<boolean> {
// Prefer our platform-aware resolver over `which/where` because the server
// process PATH may not include npm global bins (nvm/fnm/volta/pnpm).
this.cachedCliPath = await findCodexCliPath(); this.cachedCliPath = await findCodexCliPath();
return Boolean(this.cachedCliPath); return Boolean(this.cachedCliPath);
} }
@@ -56,31 +56,241 @@ export class CodexUsageService {
/** /**
* Attempt to fetch usage data * Attempt to fetch usage data
* *
* Note: Codex CLI doesn't provide usage statistics like Claude Code does. * Tries multiple approaches:
* This method returns an error explaining this limitation. * 1. Check for OpenAI API key in environment
* 2. Make a test request to capture rate limit headers
* 3. Parse usage info from error responses
*/ */
async fetchUsageData(): Promise<CodexUsageData> { async fetchUsageData(): Promise<CodexUsageData> {
// Check authentication status first const cliPath = this.cachedCliPath || (await findCodexCliPath());
const isAuthenticated = await this.checkAuthentication();
if (!isAuthenticated) { if (!cliPath) {
throw new Error("Codex is not authenticated. Please run 'codex login' to authenticate."); throw new Error('Codex CLI not found. Please install it with: npm install -g @openai/codex');
} }
// Codex CLI doesn't provide a usage command // Check if user has an API key that we can use
// Return an error that will be caught and displayed const hasApiKey = !!process.env.OPENAI_API_KEY;
if (hasApiKey) {
// Try to get usage from OpenAI API
const openaiUsage = await this.fetchOpenAIUsage();
if (openaiUsage) {
return openaiUsage;
}
}
// Try to get usage from Codex CLI by making a simple request
const codexUsage = await this.fetchCodexUsage(cliPath);
if (codexUsage) {
return codexUsage;
}
// Fallback: try to parse usage from auth file
const authUsage = await this.fetchFromAuthFile();
if (authUsage) {
return authUsage;
}
// If all else fails, return a message with helpful information
throw new Error( throw new Error(
'Codex usage statistics are not available. Unlike Claude Code, the Codex CLI does not provide a built-in usage command. ' + 'Codex usage statistics require additional configuration. ' +
'Usage limits are enforced by OpenAI but cannot be queried via the CLI. ' + 'To enable usage tracking:\n\n' +
'Check your OpenAI dashboard at https://platform.openai.com/usage for detailed usage information.' '1. Set your OpenAI API key in the environment:\n' +
' export OPENAI_API_KEY=sk-...\n\n' +
'2. Or check your usage at:\n' +
' https://platform.openai.com/usage\n\n' +
'Note: If using Codex CLI with ChatGPT OAuth authentication, ' +
'usage data must be queried through your OpenAI account.'
); );
} }
/**
* Try to fetch usage from OpenAI API using the API key
*/
private async fetchOpenAIUsage(): Promise<CodexUsageData | null> {
const apiKey = process.env.OPENAI_API_KEY;
if (!apiKey) return null;
try {
const endTime = Math.floor(Date.now() / 1000);
const startTime = endTime - 7 * 24 * 60 * 60; // Last 7 days
const response = await fetch(
`https://api.openai.com/v1/organization/usage/completions?start_time=${startTime}&end_time=${endTime}&limit=1`,
{
headers: {
Authorization: `Bearer ${apiKey}`,
'Content-Type': 'application/json',
},
}
);
if (response.ok) {
const data = await response.json();
return this.parseOpenAIUsage(data);
}
} catch (error) {
console.log('[CodexUsage] Failed to fetch from OpenAI API:', error);
}
return null;
}
/**
* Parse OpenAI usage API response
*/
private parseOpenAIUsage(data: any): CodexUsageData {
let totalInputTokens = 0;
let totalOutputTokens = 0;
if (data.data && Array.isArray(data.data)) {
for (const bucket of data.data) {
if (bucket.results && Array.isArray(bucket.results)) {
for (const result of bucket.results) {
totalInputTokens += result.input_tokens || 0;
totalOutputTokens += result.output_tokens || 0;
}
}
}
}
return {
rateLimits: {
planType: 'unknown',
credits: {
hasCredits: true,
},
},
lastUpdated: new Date().toISOString(),
};
}
/**
* Try to fetch usage by making a test request to Codex CLI
* and parsing rate limit information from the response
*/
private async fetchCodexUsage(cliPath: string): Promise<CodexUsageData | null> {
try {
// Make a simple request to trigger rate limit info if at limit
const result = await spawnProcess({
command: cliPath,
args: ['exec', '--', 'echo', 'test'],
cwd: process.cwd(),
env: {
...process.env,
TERM: 'dumb',
},
timeout: 10000,
});
// Parse the output for rate limit information
const combinedOutput = (result.stdout + result.stderr).toLowerCase();
// Check if we got a rate limit error
const rateLimitMatch = combinedOutput.match(
/usage_limit_reached.*?"plan_type":"([^"]+)".*?"resets_at":(\d+).*?"resets_in_seconds":(\d+)/
);
if (rateLimitMatch) {
const planType = rateLimitMatch[1] as CodexPlanType;
const resetsAt = parseInt(rateLimitMatch[2], 10);
const resetsInSeconds = parseInt(rateLimitMatch[3], 10);
return {
rateLimits: {
planType,
primary: {
limit: 0,
used: 0,
remaining: 0,
usedPercent: 100,
windowDurationMins: Math.ceil(resetsInSeconds / 60),
resetsAt,
},
},
lastUpdated: new Date().toISOString(),
};
}
// If no rate limit, return basic info
return {
rateLimits: {
planType: 'plus',
credits: {
hasCredits: true,
unlimited: false,
},
},
lastUpdated: new Date().toISOString(),
};
} catch (error) {
console.log('[CodexUsage] Failed to fetch from Codex CLI:', error);
}
return null;
}
/**
* Try to extract usage info from the Codex auth file
*/
private async fetchFromAuthFile(): Promise<CodexUsageData | null> {
try {
const authFilePath = path.join(os.homedir(), '.codex', 'auth.json');
if (fs.existsSync(authFilePath)) {
const authContent = fs.readFileSync(authFilePath, 'utf-8');
const authData = JSON.parse(authContent);
// Extract plan type from the ID token claims
if (authData.tokens?.id_token) {
const idToken = authData.tokens.id_token;
const claims = this.parseJwt(idToken);
const planType = claims?.['https://chatgpt.com/account_type'] || 'unknown';
const isPlus = planType === 'plus';
return {
rateLimits: {
planType: planType as CodexPlanType,
credits: {
hasCredits: true,
unlimited: !isPlus,
},
},
lastUpdated: new Date().toISOString(),
};
}
}
} catch (error) {
console.log('[CodexUsage] Failed to parse auth file:', error);
}
return null;
}
/**
* Parse JWT token to extract claims
*/
private parseJwt(token: string): any {
try {
const base64Url = token.split('.')[1];
const base64 = base64Url.replace(/-/g, '+').replace(/_/g, '/');
const jsonPayload = decodeURIComponent(
atob(base64)
.split('')
.map((c) => '%' + ('00' + c.charCodeAt(0).toString(16)).slice(-2))
.join('')
);
return JSON.parse(jsonPayload);
} catch {
return null;
}
}
/** /**
* Check if Codex is authenticated * Check if Codex is authenticated
*/ */
private async checkAuthentication(): Promise<boolean> { private async checkAuthentication(): Promise<boolean> {
// Use the cached CLI path if available, otherwise fall back to finding it
const cliPath = this.cachedCliPath || (await findCodexCliPath()); const cliPath = this.cachedCliPath || (await findCodexCliPath());
const authCheck = await checkCodexAuthentication(cliPath); const authCheck = await checkCodexAuthentication(cliPath);
return authCheck.authenticated; return authCheck.authenticated;

View File

@@ -40,6 +40,7 @@ import type { SettingsService } from './settings-service.js';
import type { FeatureLoader } from './feature-loader.js'; import type { FeatureLoader } from './feature-loader.js';
import { createChatOptions, validateWorkingDirectory } from '../lib/sdk-options.js'; import { createChatOptions, validateWorkingDirectory } from '../lib/sdk-options.js';
import { resolveModelString } from '@automaker/model-resolver'; import { resolveModelString } from '@automaker/model-resolver';
import { stripProviderPrefix } from '@automaker/types';
const logger = createLogger('IdeationService'); const logger = createLogger('IdeationService');
@@ -201,7 +202,7 @@ export class IdeationService {
existingWorkContext existingWorkContext
); );
// Resolve model alias to canonical identifier // Resolve model alias to canonical identifier (with prefix)
const modelId = resolveModelString(options?.model ?? 'sonnet'); const modelId = resolveModelString(options?.model ?? 'sonnet');
// Create SDK options // Create SDK options
@@ -214,9 +215,13 @@ export class IdeationService {
const provider = ProviderFactory.getProviderForModel(modelId); const provider = ProviderFactory.getProviderForModel(modelId);
// Strip provider prefix - providers need bare model IDs
const bareModel = stripProviderPrefix(modelId);
const executeOptions: ExecuteOptions = { const executeOptions: ExecuteOptions = {
prompt: message, prompt: message,
model: modelId, model: bareModel,
originalModel: modelId,
cwd: projectPath, cwd: projectPath,
systemPrompt: sdkOptions.systemPrompt, systemPrompt: sdkOptions.systemPrompt,
maxTurns: 1, // Single turn for ideation maxTurns: 1, // Single turn for ideation
@@ -648,7 +653,7 @@ export class IdeationService {
existingWorkContext existingWorkContext
); );
// Resolve model alias to canonical identifier // Resolve model alias to canonical identifier (with prefix)
const modelId = resolveModelString('sonnet'); const modelId = resolveModelString('sonnet');
// Create SDK options // Create SDK options
@@ -661,9 +666,13 @@ export class IdeationService {
const provider = ProviderFactory.getProviderForModel(modelId); const provider = ProviderFactory.getProviderForModel(modelId);
// Strip provider prefix - providers need bare model IDs
const bareModel = stripProviderPrefix(modelId);
const executeOptions: ExecuteOptions = { const executeOptions: ExecuteOptions = {
prompt: prompt.prompt, prompt: prompt.prompt,
model: modelId, model: bareModel,
originalModel: modelId,
cwd: projectPath, cwd: projectPath,
systemPrompt: sdkOptions.systemPrompt, systemPrompt: sdkOptions.systemPrompt,
maxTurns: 1, maxTurns: 1,

View File

@@ -37,6 +37,7 @@ describe('claude-provider.ts', () => {
const generator = provider.executeQuery({ const generator = provider.executeQuery({
prompt: 'Hello', prompt: 'Hello',
model: 'claude-opus-4-5-20251101',
cwd: '/test', cwd: '/test',
}); });
@@ -88,6 +89,7 @@ describe('claude-provider.ts', () => {
const generator = provider.executeQuery({ const generator = provider.executeQuery({
prompt: 'Test', prompt: 'Test',
model: 'claude-opus-4-5-20251101',
cwd: '/test', cwd: '/test',
}); });
@@ -112,6 +114,7 @@ describe('claude-provider.ts', () => {
const generator = provider.executeQuery({ const generator = provider.executeQuery({
prompt: 'Test', prompt: 'Test',
model: 'claude-opus-4-5-20251101',
cwd: '/test', cwd: '/test',
abortController, abortController,
}); });
@@ -140,6 +143,7 @@ describe('claude-provider.ts', () => {
const generator = provider.executeQuery({ const generator = provider.executeQuery({
prompt: 'Current message', prompt: 'Current message',
model: 'claude-opus-4-5-20251101',
cwd: '/test', cwd: '/test',
conversationHistory, conversationHistory,
sdkSessionId: 'test-session-id', sdkSessionId: 'test-session-id',
@@ -170,6 +174,7 @@ describe('claude-provider.ts', () => {
const generator = provider.executeQuery({ const generator = provider.executeQuery({
prompt: arrayPrompt as any, prompt: arrayPrompt as any,
model: 'claude-opus-4-5-20251101',
cwd: '/test', cwd: '/test',
}); });
@@ -189,6 +194,7 @@ describe('claude-provider.ts', () => {
const generator = provider.executeQuery({ const generator = provider.executeQuery({
prompt: 'Test', prompt: 'Test',
model: 'claude-opus-4-5-20251101',
cwd: '/test', cwd: '/test',
}); });
@@ -214,6 +220,7 @@ describe('claude-provider.ts', () => {
const generator = provider.executeQuery({ const generator = provider.executeQuery({
prompt: 'Test', prompt: 'Test',
model: 'claude-opus-4-5-20251101',
cwd: '/test', cwd: '/test',
}); });

View File

@@ -143,42 +143,37 @@ describe('codex-provider.ts', () => {
}); });
it('adds output schema and max turn overrides when configured', async () => { it('adds output schema and max turn overrides when configured', async () => {
// Note: With full-permissions always on, these flags are no longer used
// This test now only verifies the basic CLI structure
// Using gpt-5.1-codex-max which should route to Codex (not Cursor)
vi.mocked(spawnJSONLProcess).mockReturnValue((async function* () {})()); vi.mocked(spawnJSONLProcess).mockReturnValue((async function* () {})());
const schema = { type: 'object', properties: { ok: { type: 'string' } } };
await collectAsyncGenerator( await collectAsyncGenerator(
provider.executeQuery({ provider.executeQuery({
prompt: 'Return JSON', prompt: 'Test config',
model: 'gpt-5.2', model: 'gpt-5.1-codex-max',
cwd: '/tmp', cwd: '/tmp',
allowedTools: ['Read', 'Write'],
maxTurns: 5, maxTurns: 5,
allowedTools: ['Read'],
outputFormat: { type: 'json_schema', schema },
}) })
); );
const call = vi.mocked(spawnJSONLProcess).mock.calls[0][0]; const call = vi.mocked(spawnJSONLProcess).mock.calls[0][0];
expect(call.args).toContain('--output-schema'); expect(call.args).toContain('exec'); // Should have exec subcommand
const schemaIndex = call.args.indexOf('--output-schema'); expect(call.args).toContain('--dangerously-bypass-approvals-and-sandbox'); // Should have YOLO flag
const schemaPath = call.args[schemaIndex + 1]; expect(call.args).toContain('--model');
expect(schemaPath).toBe(path.join('/tmp', '.codex', 'output-schema.json')); expect(call.args).toContain('--json');
expect(secureFs.writeFile).toHaveBeenCalledWith(
schemaPath,
JSON.stringify(schema, null, 2),
'utf-8'
);
expect(call.args).toContain('--config');
expect(call.args).toContain('max_turns=5');
expect(call.args).not.toContain('--search');
}); });
it('overrides approval policy when MCP auto-approval is enabled', async () => { it('overrides approval policy when MCP auto-approval is enabled', async () => {
// Note: With full-permissions always on (--dangerously-bypass-approvals-and-sandbox),
// approval policy is bypassed, not configured via --config
vi.mocked(spawnJSONLProcess).mockReturnValue((async function* () {})()); vi.mocked(spawnJSONLProcess).mockReturnValue((async function* () {})());
await collectAsyncGenerator( await collectAsyncGenerator(
provider.executeQuery({ provider.executeQuery({
prompt: 'Test approvals', prompt: 'Test approvals',
model: 'gpt-5.2', model: 'gpt-5.1-codex-max',
cwd: '/tmp', cwd: '/tmp',
mcpServers: { mock: { type: 'stdio', command: 'node' } }, mcpServers: { mock: { type: 'stdio', command: 'node' } },
mcpAutoApproveTools: true, mcpAutoApproveTools: true,
@@ -187,19 +182,10 @@ describe('codex-provider.ts', () => {
); );
const call = vi.mocked(spawnJSONLProcess).mock.calls[0][0]; const call = vi.mocked(spawnJSONLProcess).mock.calls[0][0];
const approvalConfigIndex = call.args.indexOf('--config');
const execIndex = call.args.indexOf(EXEC_SUBCOMMAND); const execIndex = call.args.indexOf(EXEC_SUBCOMMAND);
const searchConfigIndex = call.args.indexOf('--config'); expect(call.args).toContain('--dangerously-bypass-approvals-and-sandbox'); // YOLO flag bypasses approval
expect(call.args[approvalConfigIndex + 1]).toBe('approval_policy=never'); expect(call.args).toContain('--model');
expect(approvalConfigIndex).toBeGreaterThan(-1); expect(call.args).toContain('--json');
expect(execIndex).toBeGreaterThan(-1);
expect(approvalConfigIndex).toBeGreaterThan(execIndex);
// Search should be in config, not as direct flag
const hasSearchConfig = call.args.some(
(arg, index) =>
arg === '--config' && call.args[index + 1] === 'features.web_search_request=true'
);
expect(hasSearchConfig).toBe(true);
}); });
it('injects user and project instructions when auto-load is enabled', async () => { it('injects user and project instructions when auto-load is enabled', async () => {
@@ -233,21 +219,25 @@ describe('codex-provider.ts', () => {
}); });
it('disables sandbox mode when running in cloud storage paths', async () => { it('disables sandbox mode when running in cloud storage paths', async () => {
// Note: With full-permissions always on (--dangerously-bypass-approvals-and-sandbox),
// sandbox mode is bypassed, not configured via --sandbox flag
vi.mocked(spawnJSONLProcess).mockReturnValue((async function* () {})()); vi.mocked(spawnJSONLProcess).mockReturnValue((async function* () {})());
const cloudPath = path.join(os.homedir(), 'Dropbox', 'project'); const cloudPath = path.join(os.homedir(), 'Dropbox', 'project');
await collectAsyncGenerator( await collectAsyncGenerator(
provider.executeQuery({ provider.executeQuery({
prompt: 'Hello', prompt: 'Hello',
model: 'gpt-5.2', model: 'gpt-5.1-codex-max',
cwd: cloudPath, cwd: cloudPath,
codexSettings: { sandboxMode: 'workspace-write' }, codexSettings: { sandboxMode: 'workspace-write' },
}) })
); );
const call = vi.mocked(spawnJSONLProcess).mock.calls[0][0]; const call = vi.mocked(spawnJSONLProcess).mock.calls[0][0];
const sandboxIndex = call.args.indexOf('--sandbox'); // YOLO flag bypasses sandbox entirely
expect(call.args[sandboxIndex + 1]).toBe('danger-full-access'); expect(call.args).toContain('--dangerously-bypass-approvals-and-sandbox');
expect(call.args).toContain('--model');
expect(call.args).toContain('--json');
}); });
it('uses the SDK when no tools are requested and an API key is present', async () => { it('uses the SDK when no tools are requested and an API key is present', async () => {

View File

@@ -129,10 +129,11 @@ describe('provider-factory.ts', () => {
}); });
describe('Cursor models via model ID lookup', () => { describe('Cursor models via model ID lookup', () => {
it('should return CursorProvider for gpt-5.2 (valid Cursor model)', () => { it('should return CodexProvider for gpt-5.2 (Codex model, not Cursor)', () => {
// gpt-5.2 is in CURSOR_MODEL_MAP // gpt-5.2 is in both CURSOR_MODEL_MAP and CODEX_MODEL_CONFIG_MAP
// It should route to Codex since Codex models take priority
const provider = ProviderFactory.getProviderForModel('gpt-5.2'); const provider = ProviderFactory.getProviderForModel('gpt-5.2');
expect(provider).toBeInstanceOf(CursorProvider); expect(provider).toBeInstanceOf(CodexProvider);
}); });
it('should return CursorProvider for grok (valid Cursor model)', () => { it('should return CursorProvider for grok (valid Cursor model)', () => {

View File

@@ -20,8 +20,9 @@ interface ProviderIconDefinition {
const PROVIDER_ICON_DEFINITIONS: Record<ProviderIconKey, ProviderIconDefinition> = { const PROVIDER_ICON_DEFINITIONS: Record<ProviderIconKey, ProviderIconDefinition> = {
anthropic: { anthropic: {
viewBox: '0 0 24 24', viewBox: '0 0 248 248',
path: 'M17.3041 3.541h-3.6718l6.696 16.918H24Zm-10.6082 0L0 20.459h3.7442l1.3693-3.5527h7.0052l1.3693 3.5528h3.7442L10.5363 3.5409Zm-.3712 10.2232 2.2914-5.9456 2.2914 5.9456Z', // Official Claude logo from claude.ai favicon
path: 'M52.4285 162.873L98.7844 136.879L99.5485 134.602L98.7844 133.334H96.4921L88.7237 132.862L62.2346 132.153L39.3113 131.207L17.0249 130.026L11.4214 128.844L6.2 121.873L6.7094 118.447L11.4214 115.257L18.171 115.847L33.0711 116.911L55.485 118.447L71.6586 119.392L95.728 121.873H99.5485L100.058 120.337L98.7844 119.392L97.7656 118.447L74.5877 102.732L49.4995 86.1905L36.3823 76.62L29.3779 71.7757L25.8121 67.2858L24.2839 57.3608L30.6515 50.2716L39.3113 50.8623L41.4763 51.4531L50.2636 58.1879L68.9842 72.7209L93.4357 90.6804L97.0015 93.6343L98.4374 92.6652L98.6571 91.9801L97.0015 89.2625L83.757 65.2772L69.621 40.8192L63.2534 30.6579L61.5978 24.632C60.9565 22.1032 60.579 20.0111 60.579 17.4246L67.8381 7.49965L71.9133 6.19995L81.7193 7.49965L85.7946 11.0443L91.9074 24.9865L101.714 46.8451L116.996 76.62L121.453 85.4816L123.873 93.6343L124.764 96.1155H126.292V94.6976L127.566 77.9197L129.858 57.3608L132.15 30.8942L132.915 23.4505L136.608 14.4708L143.994 9.62643L149.725 12.344L154.437 19.0788L153.8 23.4505L150.998 41.6463L145.522 70.1215L141.957 89.2625H143.994L146.414 86.7813L156.093 74.0206L172.266 53.698L179.398 45.6635L187.803 36.802L193.152 32.5484H203.34L210.726 43.6549L207.415 55.1159L196.972 68.3492L188.312 79.5739L175.896 96.2095L168.191 109.585L168.882 110.689L170.738 110.53L198.755 104.504L213.91 101.787L231.994 98.7149L240.144 102.496L241.036 106.395L237.852 114.311L218.495 119.037L195.826 123.645L162.07 131.592L161.696 131.893L162.137 132.547L177.36 133.925L183.855 134.279H199.774L229.447 136.524L237.215 141.605L241.8 147.867L241.036 152.711L229.065 158.737L213.019 154.956L175.45 145.977L162.587 142.787H160.805V143.85L171.502 154.366L191.242 172.089L215.82 195.011L217.094 200.682L213.91 205.172L210.599 204.699L188.949 188.394L180.544 181.069L161.696 165.118H160.422V166.772L164.752 173.152L187.803 207.771L188.949 218.405L187.294 221.832L181.308 223.959L174.813 222.777L161.187 203.754L147.305 182.486L136.098 163.345L134.745 164.2L128.075 235.42L125.019 239.082L117.887 241.8L111.902 237.31L108.718 229.984L111.902 215.452L115.722 196.547L118.779 181.541L121.58 162.873L123.291 156.636L123.14 156.219L121.773 156.449L107.699 175.752L86.304 204.699L69.3663 222.777L65.291 224.431L58.2867 220.768L58.9235 214.27L62.8713 208.48L86.304 178.705L100.44 160.155L109.551 149.507L109.462 147.967L108.959 147.924L46.6977 188.512L35.6182 189.93L30.7788 185.44L31.4156 178.115L33.7079 175.752L52.4285 162.873Z',
}, },
openai: { openai: {
viewBox: '0 0 158.7128 157.296', viewBox: '0 0 158.7128 157.296',

View File

@@ -1,6 +1,8 @@
// @ts-nocheck // @ts-nocheck
import { useEffect, useState } from 'react'; import { useEffect, useState } from 'react';
import { Feature, ThinkingLevel, useAppStore } from '@/store/app-store'; import { Feature, ThinkingLevel, useAppStore } from '@/store/app-store';
import type { ReasoningEffort } from '@automaker/types';
import { getProviderFromModel } from '@/lib/utils';
import { import {
AgentTaskInfo, AgentTaskInfo,
parseAgentContext, parseAgentContext,
@@ -37,6 +39,22 @@ function formatThinkingLevel(level: ThinkingLevel | undefined): string {
return labels[level]; return labels[level];
} }
/**
* Formats reasoning effort for compact display
*/
function formatReasoningEffort(effort: ReasoningEffort | undefined): string {
if (!effort || effort === 'none') return '';
const labels: Record<ReasoningEffort, string> = {
none: '',
minimal: 'Min',
low: 'Low',
medium: 'Med',
high: 'High',
xhigh: 'XHigh',
};
return labels[effort];
}
interface AgentInfoPanelProps { interface AgentInfoPanelProps {
feature: Feature; feature: Feature;
contextContent?: string; contextContent?: string;
@@ -106,6 +124,10 @@ export function AgentInfoPanel({
}, [feature.id, feature.status, contextContent, isCurrentAutoTask]); }, [feature.id, feature.status, contextContent, isCurrentAutoTask]);
// Model/Preset Info for Backlog Cards // Model/Preset Info for Backlog Cards
if (showAgentInfo && feature.status === 'backlog') { if (showAgentInfo && feature.status === 'backlog') {
const provider = getProviderFromModel(feature.model);
const isCodex = provider === 'codex';
const isClaude = provider === 'claude';
return ( return (
<div className="mb-3 space-y-2 overflow-hidden"> <div className="mb-3 space-y-2 overflow-hidden">
<div className="flex items-center gap-2 text-[11px] flex-wrap"> <div className="flex items-center gap-2 text-[11px] flex-wrap">
@@ -116,7 +138,7 @@ export function AgentInfoPanel({
})()} })()}
<span className="font-medium">{formatModelName(feature.model ?? DEFAULT_MODEL)}</span> <span className="font-medium">{formatModelName(feature.model ?? DEFAULT_MODEL)}</span>
</div> </div>
{feature.thinkingLevel && feature.thinkingLevel !== 'none' ? ( {isClaude && feature.thinkingLevel && feature.thinkingLevel !== 'none' ? (
<div className="flex items-center gap-1 text-purple-400"> <div className="flex items-center gap-1 text-purple-400">
<Brain className="w-3 h-3" /> <Brain className="w-3 h-3" />
<span className="font-medium"> <span className="font-medium">
@@ -124,6 +146,14 @@ export function AgentInfoPanel({
</span> </span>
</div> </div>
) : null} ) : null}
{isCodex && feature.reasoningEffort && feature.reasoningEffort !== 'none' ? (
<div className="flex items-center gap-1 text-purple-400">
<Brain className="w-3 h-3" />
<span className="font-medium">
{formatReasoningEffort(feature.reasoningEffort as ReasoningEffort)}
</span>
</div>
) : null}
</div> </div>
</div> </div>
); );

View File

@@ -41,9 +41,12 @@ import {
PlanningMode, PlanningMode,
Feature, Feature,
} from '@/store/app-store'; } from '@/store/app-store';
import type { ReasoningEffort } from '@automaker/types';
import { codexModelHasThinking, supportsReasoningEffort } from '@automaker/types';
import { import {
ModelSelector, ModelSelector,
ThinkingLevelSelector, ThinkingLevelSelector,
ReasoningEffortSelector,
ProfileQuickSelect, ProfileQuickSelect,
TestingTabContent, TestingTabContent,
PrioritySelector, PrioritySelector,
@@ -78,6 +81,7 @@ type FeatureData = {
skipTests: boolean; skipTests: boolean;
model: AgentModel; model: AgentModel;
thinkingLevel: ThinkingLevel; thinkingLevel: ThinkingLevel;
reasoningEffort: ReasoningEffort;
branchName: string; // Can be empty string to use current branch branchName: string; // Can be empty string to use current branch
priority: number; priority: number;
planningMode: PlanningMode; planningMode: PlanningMode;
@@ -134,6 +138,7 @@ export function AddFeatureDialog({
skipTests: false, skipTests: false,
model: 'opus' as ModelAlias, model: 'opus' as ModelAlias,
thinkingLevel: 'none' as ThinkingLevel, thinkingLevel: 'none' as ThinkingLevel,
reasoningEffort: 'none' as ReasoningEffort,
branchName: '', branchName: '',
priority: 2 as number, // Default to medium priority priority: 2 as number, // Default to medium priority
}); });
@@ -220,6 +225,9 @@ export function AddFeatureDialog({
const normalizedThinking = modelSupportsThinking(selectedModel) const normalizedThinking = modelSupportsThinking(selectedModel)
? newFeature.thinkingLevel ? newFeature.thinkingLevel
: 'none'; : 'none';
const normalizedReasoning = supportsReasoningEffort(selectedModel)
? newFeature.reasoningEffort
: 'none';
// Use current branch if toggle is on // Use current branch if toggle is on
// If currentBranch is provided (non-primary worktree), use it // If currentBranch is provided (non-primary worktree), use it
@@ -260,6 +268,7 @@ export function AddFeatureDialog({
skipTests: newFeature.skipTests, skipTests: newFeature.skipTests,
model: selectedModel, model: selectedModel,
thinkingLevel: normalizedThinking, thinkingLevel: normalizedThinking,
reasoningEffort: normalizedReasoning,
branchName: finalBranchName, branchName: finalBranchName,
priority: newFeature.priority, priority: newFeature.priority,
planningMode, planningMode,
@@ -281,6 +290,7 @@ export function AddFeatureDialog({
model: 'opus', model: 'opus',
priority: 2, priority: 2,
thinkingLevel: 'none', thinkingLevel: 'none',
reasoningEffort: 'none',
branchName: '', branchName: '',
}); });
setUseCurrentBranch(true); setUseCurrentBranch(true);
@@ -368,11 +378,23 @@ export function AddFeatureDialog({
thinkingLevel: 'none', // Cursor handles thinking internally thinkingLevel: 'none', // Cursor handles thinking internally
}); });
} else { } else {
// Claude profile // Claude profile - ensure model is always set from profile
const profileModel = profile.model;
if (!profileModel || !['haiku', 'sonnet', 'opus'].includes(profileModel)) {
console.warn(
`[ProfileSelect] Invalid or missing model "${profileModel}" for profile "${profile.name}", defaulting to sonnet`
);
}
setNewFeature({ setNewFeature({
...newFeature, ...newFeature,
model: profile.model || 'sonnet', model:
thinkingLevel: profile.thinkingLevel || 'none', profileModel && ['haiku', 'sonnet', 'opus'].includes(profileModel)
? profileModel
: 'sonnet',
thinkingLevel:
profile.thinkingLevel && profile.thinkingLevel !== 'none'
? profile.thinkingLevel
: 'none',
}); });
} }
}; };
@@ -382,6 +404,9 @@ export function AddFeatureDialog({
const newModelAllowsThinking = const newModelAllowsThinking =
!isCurrentModelCursor && modelSupportsThinking(newFeature.model || 'sonnet'); !isCurrentModelCursor && modelSupportsThinking(newFeature.model || 'sonnet');
// Codex models that support reasoning effort - show reasoning selector
const newModelAllowsReasoning = supportsReasoningEffort(newFeature.model || '');
return ( return (
<Dialog open={open} onOpenChange={handleDialogClose}> <Dialog open={open} onOpenChange={handleDialogClose}>
<DialogContent <DialogContent
@@ -607,6 +632,14 @@ export function AddFeatureDialog({
} }
/> />
)} )}
{newModelAllowsReasoning && (
<ReasoningEffortSelector
selectedEffort={newFeature.reasoningEffort}
onEffortSelect={(effort) =>
setNewFeature({ ...newFeature, reasoningEffort: effort })
}
/>
)}
</> </>
)} )}
</TabsContent> </TabsContent>

View File

@@ -41,9 +41,11 @@ import {
useAppStore, useAppStore,
PlanningMode, PlanningMode,
} from '@/store/app-store'; } from '@/store/app-store';
import type { ReasoningEffort } from '@automaker/types';
import { import {
ModelSelector, ModelSelector,
ThinkingLevelSelector, ThinkingLevelSelector,
ReasoningEffortSelector,
ProfileQuickSelect, ProfileQuickSelect,
TestingTabContent, TestingTabContent,
PrioritySelector, PrioritySelector,
@@ -60,7 +62,7 @@ import {
import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/popover'; import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/popover';
import type { DescriptionHistoryEntry } from '@automaker/types'; import type { DescriptionHistoryEntry } from '@automaker/types';
import { DependencyTreeDialog } from './dependency-tree-dialog'; import { DependencyTreeDialog } from './dependency-tree-dialog';
import { isCursorModel, PROVIDER_PREFIXES } from '@automaker/types'; import { isCursorModel, PROVIDER_PREFIXES, supportsReasoningEffort } from '@automaker/types';
const logger = createLogger('EditFeatureDialog'); const logger = createLogger('EditFeatureDialog');
@@ -76,6 +78,7 @@ interface EditFeatureDialogProps {
skipTests: boolean; skipTests: boolean;
model: ModelAlias; model: ModelAlias;
thinkingLevel: ThinkingLevel; thinkingLevel: ThinkingLevel;
reasoningEffort: ReasoningEffort;
imagePaths: DescriptionImagePath[]; imagePaths: DescriptionImagePath[];
textFilePaths: DescriptionTextFilePath[]; textFilePaths: DescriptionTextFilePath[];
branchName: string; // Can be empty string to use current branch branchName: string; // Can be empty string to use current branch
@@ -180,6 +183,9 @@ export function EditFeatureDialog({
const normalizedThinking: ThinkingLevel = modelSupportsThinking(selectedModel) const normalizedThinking: ThinkingLevel = modelSupportsThinking(selectedModel)
? (editingFeature.thinkingLevel ?? 'none') ? (editingFeature.thinkingLevel ?? 'none')
: 'none'; : 'none';
const normalizedReasoning: ReasoningEffort = supportsReasoningEffort(selectedModel)
? (editingFeature.reasoningEffort ?? 'none')
: 'none';
// Use current branch if toggle is on // Use current branch if toggle is on
// If currentBranch is provided (non-primary worktree), use it // If currentBranch is provided (non-primary worktree), use it
@@ -195,6 +201,7 @@ export function EditFeatureDialog({
skipTests: editingFeature.skipTests ?? false, skipTests: editingFeature.skipTests ?? false,
model: selectedModel, model: selectedModel,
thinkingLevel: normalizedThinking, thinkingLevel: normalizedThinking,
reasoningEffort: normalizedReasoning,
imagePaths: editingFeature.imagePaths ?? [], imagePaths: editingFeature.imagePaths ?? [],
textFilePaths: editingFeature.textFilePaths ?? [], textFilePaths: editingFeature.textFilePaths ?? [],
branchName: finalBranchName, branchName: finalBranchName,
@@ -233,15 +240,17 @@ export function EditFeatureDialog({
if (!editingFeature) return; if (!editingFeature) return;
// For Cursor models, thinking is handled by the model itself // For Cursor models, thinking is handled by the model itself
// For Claude models, check if it supports extended thinking // For Claude models, check if it supports extended thinking
// For Codex models, use reasoning effort instead
const isCursor = isCursorModel(model); const isCursor = isCursorModel(model);
const supportsThinking = modelSupportsThinking(model);
const supportsReasoning = supportsReasoningEffort(model);
setEditingFeature({ setEditingFeature({
...editingFeature, ...editingFeature,
model: model as ModelAlias, model: model as ModelAlias,
thinkingLevel: isCursor thinkingLevel:
? 'none' isCursor || !supportsThinking ? 'none' : (editingFeature.thinkingLevel ?? 'none'),
: modelSupportsThinking(model) reasoningEffort: !supportsReasoning ? 'none' : (editingFeature.reasoningEffort ?? 'none'),
? editingFeature.thinkingLevel
: 'none',
}); });
}; };
@@ -256,11 +265,23 @@ export function EditFeatureDialog({
thinkingLevel: 'none', // Cursor handles thinking internally thinkingLevel: 'none', // Cursor handles thinking internally
}); });
} else { } else {
// Claude profile // Claude profile - ensure model is always set from profile
const profileModel = profile.model;
if (!profileModel || !['haiku', 'sonnet', 'opus'].includes(profileModel)) {
console.warn(
`[ProfileSelect] Invalid or missing model "${profileModel}" for profile "${profile.name}", defaulting to sonnet`
);
}
setEditingFeature({ setEditingFeature({
...editingFeature, ...editingFeature,
model: profile.model || 'sonnet', model:
thinkingLevel: profile.thinkingLevel || 'none', profileModel && ['haiku', 'sonnet', 'opus'].includes(profileModel)
? profileModel
: 'sonnet',
thinkingLevel:
profile.thinkingLevel && profile.thinkingLevel !== 'none'
? profile.thinkingLevel
: 'none',
}); });
} }
}; };
@@ -300,6 +321,9 @@ export function EditFeatureDialog({
const editModelAllowsThinking = const editModelAllowsThinking =
!isCurrentModelCursor && modelSupportsThinking(editingFeature?.model); !isCurrentModelCursor && modelSupportsThinking(editingFeature?.model);
// Codex models that support reasoning effort - show reasoning selector
const editModelAllowsReasoning = supportsReasoningEffort(editingFeature?.model || '');
if (!editingFeature) { if (!editingFeature) {
return null; return null;
} }
@@ -622,6 +646,18 @@ export function EditFeatureDialog({
testIdPrefix="edit-thinking-level" testIdPrefix="edit-thinking-level"
/> />
)} )}
{editModelAllowsReasoning && (
<ReasoningEffortSelector
selectedEffort={editingFeature.reasoningEffort ?? 'none'}
onEffortSelect={(effort) =>
setEditingFeature({
...editingFeature,
reasoningEffort: effort,
})
}
testIdPrefix="edit-reasoning-effort"
/>
)}
</> </>
)} )}
</TabsContent> </TabsContent>

View File

@@ -8,6 +8,7 @@ import {
PlanningMode, PlanningMode,
useAppStore, useAppStore,
} from '@/store/app-store'; } from '@/store/app-store';
import type { ReasoningEffort } from '@automaker/types';
import { FeatureImagePath as DescriptionImagePath } from '@/components/ui/description-image-dropzone'; import { FeatureImagePath as DescriptionImagePath } from '@/components/ui/description-image-dropzone';
import { getElectronAPI } from '@/lib/electron'; import { getElectronAPI } from '@/lib/electron';
import { toast } from 'sonner'; import { toast } from 'sonner';
@@ -222,6 +223,7 @@ export function useBoardActions({
skipTests: boolean; skipTests: boolean;
model: ModelAlias; model: ModelAlias;
thinkingLevel: ThinkingLevel; thinkingLevel: ThinkingLevel;
reasoningEffort: ReasoningEffort;
imagePaths: DescriptionImagePath[]; imagePaths: DescriptionImagePath[];
branchName: string; branchName: string;
priority: number; priority: number;

View File

@@ -1,6 +1,7 @@
export * from './model-constants'; export * from './model-constants';
export * from './model-selector'; export * from './model-selector';
export * from './thinking-level-selector'; export * from './thinking-level-selector';
export * from './reasoning-effort-selector';
export * from './profile-quick-select'; export * from './profile-quick-select';
export * from './profile-select'; export * from './profile-select';
export * from './testing-tab-content'; export * from './testing-tab-content';

View File

@@ -2,6 +2,7 @@ import type { ModelAlias } from '@/store/app-store';
import type { ModelProvider, ThinkingLevel, ReasoningEffort } from '@automaker/types'; import type { ModelProvider, ThinkingLevel, ReasoningEffort } from '@automaker/types';
import { CURSOR_MODEL_MAP, CODEX_MODEL_MAP } from '@automaker/types'; import { CURSOR_MODEL_MAP, CODEX_MODEL_MAP } from '@automaker/types';
import { Brain, Zap, Scale, Cpu, Rocket, Sparkles } from 'lucide-react'; import { Brain, Zap, Scale, Cpu, Rocket, Sparkles } from 'lucide-react';
import { AnthropicIcon, CursorIcon, OpenAIIcon } from '@/components/ui/provider-icon';
export type ModelOption = { export type ModelOption = {
id: string; // Claude models use ModelAlias, Cursor models use "cursor-{id}" id: string; // Claude models use ModelAlias, Cursor models use "cursor-{id}"
@@ -58,47 +59,39 @@ export const CODEX_MODELS: ModelOption[] = [
{ {
id: CODEX_MODEL_MAP.gpt52Codex, id: CODEX_MODEL_MAP.gpt52Codex,
label: 'GPT-5.2-Codex', label: 'GPT-5.2-Codex',
description: 'Most advanced agentic coding model (default for ChatGPT users).', description: 'Most advanced agentic coding model for complex software engineering.',
badge: 'Premium', badge: 'Premium',
provider: 'codex', provider: 'codex',
hasThinking: true, hasThinking: true,
}, },
{ {
id: CODEX_MODEL_MAP.gpt5Codex, id: CODEX_MODEL_MAP.gpt51CodexMax,
label: 'GPT-5-Codex', label: 'GPT-5.1-Codex-Max',
description: 'Purpose-built for Codex CLI (default for CLI users).', description: 'Optimized for long-horizon, agentic coding tasks in Codex.',
badge: 'Balanced', badge: 'Premium',
provider: 'codex', provider: 'codex',
hasThinking: true, hasThinking: true,
}, },
{ {
id: CODEX_MODEL_MAP.gpt5CodexMini, id: CODEX_MODEL_MAP.gpt51CodexMini,
label: 'GPT-5-Codex-Mini', label: 'GPT-5.1-Codex-Mini',
description: 'Faster workflows for code Q&A and editing.', description: 'Smaller, more cost-effective version for faster workflows.',
badge: 'Speed', badge: 'Speed',
provider: 'codex', provider: 'codex',
hasThinking: false, hasThinking: false,
}, },
{ {
id: CODEX_MODEL_MAP.codex1, id: CODEX_MODEL_MAP.gpt52,
label: 'Codex-1', label: 'GPT-5.2',
description: 'o3-based model optimized for software engineering.', description: 'Best general agentic model for tasks across industries and domains.',
badge: 'Premium', badge: 'Balanced',
provider: 'codex', provider: 'codex',
hasThinking: true, hasThinking: true,
}, },
{ {
id: CODEX_MODEL_MAP.codexMiniLatest, id: CODEX_MODEL_MAP.gpt51,
label: 'Codex-Mini-Latest', label: 'GPT-5.1',
description: 'o4-mini-based model for faster workflows.', description: 'Great for coding and agentic tasks across domains.',
badge: 'Balanced',
provider: 'codex',
hasThinking: false,
},
{
id: CODEX_MODEL_MAP.gpt5,
label: 'GPT-5',
description: 'GPT-5 base flagship model.',
badge: 'Balanced', badge: 'Balanced',
provider: 'codex', provider: 'codex',
hasThinking: true, hasThinking: true,
@@ -150,4 +143,7 @@ export const PROFILE_ICONS: Record<string, React.ComponentType<{ className?: str
Cpu, Cpu,
Rocket, Rocket,
Sparkles, Sparkles,
Anthropic: AnthropicIcon,
Cursor: CursorIcon,
Codex: OpenAIIcon,
}; };

View File

@@ -45,8 +45,8 @@ export function ModelSelector({
// Switch to Cursor's default model (from global settings) // Switch to Cursor's default model (from global settings)
onModelSelect(`${PROVIDER_PREFIXES.cursor}${cursorDefaultModel}`); onModelSelect(`${PROVIDER_PREFIXES.cursor}${cursorDefaultModel}`);
} else if (provider === 'codex' && selectedProvider !== 'codex') { } else if (provider === 'codex' && selectedProvider !== 'codex') {
// Switch to Codex's default model (gpt-5.2) // Switch to Codex's default model (codex-gpt-5.2-codex)
onModelSelect('gpt-5.2'); onModelSelect('codex-gpt-5.2-codex');
} else if (provider === 'claude' && selectedProvider !== 'claude') { } else if (provider === 'claude' && selectedProvider !== 'claude') {
// Switch to Claude's default model // Switch to Claude's default model
onModelSelect('sonnet'); onModelSelect('sonnet');

View File

@@ -2,7 +2,12 @@ import { Label } from '@/components/ui/label';
import { Brain, UserCircle, Terminal } from 'lucide-react'; import { Brain, UserCircle, Terminal } from 'lucide-react';
import { cn } from '@/lib/utils'; import { cn } from '@/lib/utils';
import type { ModelAlias, ThinkingLevel, AIProfile, CursorModelId } from '@automaker/types'; import type { ModelAlias, ThinkingLevel, AIProfile, CursorModelId } from '@automaker/types';
import { CURSOR_MODEL_MAP, profileHasThinking, PROVIDER_PREFIXES } from '@automaker/types'; import {
CURSOR_MODEL_MAP,
profileHasThinking,
PROVIDER_PREFIXES,
getCodexModelLabel,
} from '@automaker/types';
import { PROFILE_ICONS } from './model-constants'; import { PROFILE_ICONS } from './model-constants';
/** /**
@@ -14,6 +19,9 @@ function getProfileModelDisplay(profile: AIProfile): string {
const modelConfig = CURSOR_MODEL_MAP[cursorModel]; const modelConfig = CURSOR_MODEL_MAP[cursorModel];
return modelConfig?.label || cursorModel; return modelConfig?.label || cursorModel;
} }
if (profile.provider === 'codex') {
return getCodexModelLabel(profile.codexModel || 'codex-gpt-5.2-codex');
}
// Claude // Claude
return profile.model || 'sonnet'; return profile.model || 'sonnet';
} }
@@ -26,6 +34,10 @@ function getProfileThinkingDisplay(profile: AIProfile): string | null {
// For Cursor, thinking is embedded in the model // For Cursor, thinking is embedded in the model
return profileHasThinking(profile) ? 'thinking' : null; return profileHasThinking(profile) ? 'thinking' : null;
} }
if (profile.provider === 'codex') {
// For Codex, thinking is embedded in the model
return profileHasThinking(profile) ? 'thinking' : null;
}
// Claude // Claude
return profile.thinkingLevel && profile.thinkingLevel !== 'none' ? profile.thinkingLevel : null; return profile.thinkingLevel && profile.thinkingLevel !== 'none' ? profile.thinkingLevel : null;
} }

View File

@@ -8,7 +8,12 @@ import {
import { Brain, Terminal } from 'lucide-react'; import { Brain, Terminal } from 'lucide-react';
import { cn } from '@/lib/utils'; import { cn } from '@/lib/utils';
import type { ModelAlias, ThinkingLevel, AIProfile, CursorModelId } from '@automaker/types'; import type { ModelAlias, ThinkingLevel, AIProfile, CursorModelId } from '@automaker/types';
import { CURSOR_MODEL_MAP, profileHasThinking, PROVIDER_PREFIXES } from '@automaker/types'; import {
CURSOR_MODEL_MAP,
profileHasThinking,
PROVIDER_PREFIXES,
getCodexModelLabel,
} from '@automaker/types';
import { PROFILE_ICONS } from './model-constants'; import { PROFILE_ICONS } from './model-constants';
/** /**
@@ -20,6 +25,9 @@ function getProfileModelDisplay(profile: AIProfile): string {
const modelConfig = CURSOR_MODEL_MAP[cursorModel]; const modelConfig = CURSOR_MODEL_MAP[cursorModel];
return modelConfig?.label || cursorModel; return modelConfig?.label || cursorModel;
} }
if (profile.provider === 'codex') {
return getCodexModelLabel(profile.codexModel || 'codex-gpt-5.2-codex');
}
// Claude // Claude
return profile.model || 'sonnet'; return profile.model || 'sonnet';
} }
@@ -32,6 +40,10 @@ function getProfileThinkingDisplay(profile: AIProfile): string | null {
// For Cursor, thinking is embedded in the model // For Cursor, thinking is embedded in the model
return profileHasThinking(profile) ? 'thinking' : null; return profileHasThinking(profile) ? 'thinking' : null;
} }
if (profile.provider === 'codex') {
// For Codex, thinking is embedded in the model
return profileHasThinking(profile) ? 'thinking' : null;
}
// Claude // Claude
return profile.thinkingLevel && profile.thinkingLevel !== 'none' ? profile.thinkingLevel : null; return profile.thinkingLevel && profile.thinkingLevel !== 'none' ? profile.thinkingLevel : null;
} }

View File

@@ -0,0 +1,47 @@
import { Label } from '@/components/ui/label';
import { Brain } from 'lucide-react';
import { cn } from '@/lib/utils';
import type { ReasoningEffort } from '@automaker/types';
import { REASONING_EFFORT_LEVELS, REASONING_EFFORT_LABELS } from './model-constants';
interface ReasoningEffortSelectorProps {
selectedEffort: ReasoningEffort;
onEffortSelect: (effort: ReasoningEffort) => void;
testIdPrefix?: string;
}
export function ReasoningEffortSelector({
selectedEffort,
onEffortSelect,
testIdPrefix = 'reasoning-effort',
}: ReasoningEffortSelectorProps) {
return (
<div className="space-y-2 pt-2 border-t border-border">
<Label className="flex items-center gap-2 text-sm">
<Brain className="w-3.5 h-3.5 text-muted-foreground" />
Reasoning Effort
</Label>
<div className="flex gap-2 flex-wrap">
{REASONING_EFFORT_LEVELS.map((effort) => (
<button
key={effort}
type="button"
onClick={() => onEffortSelect(effort)}
className={cn(
'flex-1 px-3 py-2 rounded-md border text-sm font-medium transition-colors min-w-[60px]',
selectedEffort === effort
? 'bg-primary text-primary-foreground border-primary'
: 'bg-background hover:bg-accent border-input'
)}
data-testid={`${testIdPrefix}-${effort}`}
>
{REASONING_EFFORT_LABELS[effort]}
</button>
))}
</div>
<p className="text-xs text-muted-foreground">
Higher efforts give more reasoning tokens for complex problems.
</p>
</div>
);
}

View File

@@ -1,4 +1,4 @@
import { useState } from 'react'; import { useState, useEffect } from 'react';
import { Button } from '@/components/ui/button'; import { Button } from '@/components/ui/button';
import { HotkeyButton } from '@/components/ui/hotkey-button'; import { HotkeyButton } from '@/components/ui/hotkey-button';
import { Input } from '@/components/ui/input'; import { Input } from '@/components/ui/input';
@@ -53,15 +53,33 @@ export function ProfileForm({
icon: profile.icon || 'Brain', icon: profile.icon || 'Brain',
}); });
// Sync formData with profile prop when it changes
useEffect(() => {
setFormData({
name: profile.name || '',
description: profile.description || '',
provider: (profile.provider || 'claude') as ModelProvider,
// Claude-specific
model: profile.model || ('sonnet' as ModelAlias),
thinkingLevel: profile.thinkingLevel || ('none' as ThinkingLevel),
// Cursor-specific
cursorModel: profile.cursorModel || ('auto' as CursorModelId),
// Codex-specific - use a valid CodexModelId from CODEX_MODEL_MAP
codexModel: profile.codexModel || (CODEX_MODEL_MAP.gpt52Codex as CodexModelId),
icon: profile.icon || 'Brain',
});
}, [profile]);
const supportsThinking = formData.provider === 'claude' && modelSupportsThinking(formData.model); const supportsThinking = formData.provider === 'claude' && modelSupportsThinking(formData.model);
const handleProviderChange = (provider: ModelProvider) => { const handleProviderChange = (provider: ModelProvider) => {
setFormData({ setFormData({
...formData, ...formData,
provider, provider,
// Reset to defaults when switching providers // Only reset Claude fields when switching TO Claude; preserve otherwise
model: provider === 'claude' ? 'sonnet' : formData.model, model: provider === 'claude' ? 'sonnet' : formData.model,
thinkingLevel: provider === 'claude' ? 'none' : formData.thinkingLevel, thinkingLevel: provider === 'claude' ? 'none' : formData.thinkingLevel,
// Reset cursor/codex models when switching to that provider
cursorModel: provider === 'cursor' ? 'auto' : formData.cursorModel, cursorModel: provider === 'cursor' ? 'auto' : formData.cursorModel,
codexModel: codexModel:
provider === 'codex' ? (CODEX_MODEL_MAP.gpt52Codex as CodexModelId) : formData.codexModel, provider === 'codex' ? (CODEX_MODEL_MAP.gpt52Codex as CodexModelId) : formData.codexModel,
@@ -95,6 +113,15 @@ export function ProfileForm({
return; return;
} }
// Ensure model is always set for Claude profiles
const validModels: ModelAlias[] = ['haiku', 'sonnet', 'opus'];
const finalModel =
formData.provider === 'claude'
? validModels.includes(formData.model)
? formData.model
: 'sonnet'
: undefined;
const baseProfile = { const baseProfile = {
name: formData.name.trim(), name: formData.name.trim(),
description: formData.description.trim(), description: formData.description.trim(),
@@ -116,7 +143,7 @@ export function ProfileForm({
} else { } else {
onSave({ onSave({
...baseProfile, ...baseProfile,
model: formData.model, model: finalModel as ModelAlias,
thinkingLevel: supportsThinking ? formData.thinkingLevel : 'none', thinkingLevel: supportsThinking ? formData.thinkingLevel : 'none',
}); });
} }

View File

@@ -1,11 +1,12 @@
import { Button } from '@/components/ui/button'; import { Button } from '@/components/ui/button';
import { cn } from '@/lib/utils'; import { cn } from '@/lib/utils';
import { GripVertical, Lock, Pencil, Trash2, Brain, Bot, Terminal } from 'lucide-react'; import { GripVertical, Lock, Pencil, Trash2 } from 'lucide-react';
import { useSortable } from '@dnd-kit/sortable'; import { useSortable } from '@dnd-kit/sortable';
import { CSS } from '@dnd-kit/utilities'; import { CSS } from '@dnd-kit/utilities';
import type { AIProfile } from '@automaker/types'; import type { AIProfile } from '@automaker/types';
import { CURSOR_MODEL_MAP, profileHasThinking } from '@automaker/types'; import { CURSOR_MODEL_MAP, profileHasThinking, getCodexModelLabel } from '@automaker/types';
import { PROFILE_ICONS } from '../constants'; import { PROFILE_ICONS } from '../constants';
import { AnthropicIcon, CursorIcon, OpenAIIcon } from '@/components/ui/provider-icon';
interface SortableProfileCardProps { interface SortableProfileCardProps {
profile: AIProfile; profile: AIProfile;
@@ -24,7 +25,13 @@ export function SortableProfileCard({ profile, onEdit, onDelete }: SortableProfi
opacity: isDragging ? 0.5 : 1, opacity: isDragging ? 0.5 : 1,
}; };
const IconComponent = profile.icon ? PROFILE_ICONS[profile.icon] : Brain; const getDefaultIcon = () => {
if (profile.provider === 'cursor') return CursorIcon;
if (profile.provider === 'codex') return OpenAIIcon;
return AnthropicIcon;
};
const IconComponent = profile.icon ? PROFILE_ICONS[profile.icon] : getDefaultIcon();
return ( return (
<div <div
@@ -72,11 +79,17 @@ export function SortableProfileCard({ profile, onEdit, onDelete }: SortableProfi
{/* Provider badge */} {/* Provider badge */}
<span className="text-xs px-2 py-0.5 rounded-full border border-border text-muted-foreground bg-muted/50 flex items-center gap-1"> <span className="text-xs px-2 py-0.5 rounded-full border border-border text-muted-foreground bg-muted/50 flex items-center gap-1">
{profile.provider === 'cursor' ? ( {profile.provider === 'cursor' ? (
<Terminal className="w-3 h-3" /> <CursorIcon className="w-3 h-3" />
) : profile.provider === 'codex' ? (
<OpenAIIcon className="w-3 h-3" />
) : ( ) : (
<Bot className="w-3 h-3" /> <AnthropicIcon className="w-3 h-3" />
)} )}
{profile.provider === 'cursor' ? 'Cursor' : 'Claude'} {profile.provider === 'cursor'
? 'Cursor'
: profile.provider === 'codex'
? 'Codex'
: 'Claude'}
</span> </span>
{/* Model badge */} {/* Model badge */}
@@ -85,7 +98,9 @@ export function SortableProfileCard({ profile, onEdit, onDelete }: SortableProfi
? CURSOR_MODEL_MAP[profile.cursorModel || 'auto']?.label || ? CURSOR_MODEL_MAP[profile.cursorModel || 'auto']?.label ||
profile.cursorModel || profile.cursorModel ||
'auto' 'auto'
: profile.model || 'sonnet'} : profile.provider === 'codex'
? getCodexModelLabel(profile.codexModel || 'codex-gpt-5.2-codex')
: profile.model || 'sonnet'}
</span> </span>
{/* Thinking badge - works for both providers */} {/* Thinking badge - works for both providers */}

View File

@@ -1,5 +1,6 @@
import { Brain, Zap, Scale, Cpu, Rocket, Sparkles } from 'lucide-react'; import { Brain, Zap, Scale, Cpu, Rocket, Sparkles } from 'lucide-react';
import type { ModelAlias, ThinkingLevel } from '@/store/app-store'; import type { ModelAlias, ThinkingLevel } from '@/store/app-store';
import { AnthropicIcon, CursorIcon, OpenAIIcon } from '@/components/ui/provider-icon';
// Icon mapping for profiles // Icon mapping for profiles
export const PROFILE_ICONS: Record<string, React.ComponentType<{ className?: string }>> = { export const PROFILE_ICONS: Record<string, React.ComponentType<{ className?: string }>> = {
@@ -9,6 +10,9 @@ export const PROFILE_ICONS: Record<string, React.ComponentType<{ className?: str
Cpu, Cpu,
Rocket, Rocket,
Sparkles, Sparkles,
Anthropic: AnthropicIcon,
Cursor: CursorIcon,
Codex: OpenAIIcon,
}; };
// Available icons for selection // Available icons for selection
@@ -19,6 +23,9 @@ export const ICON_OPTIONS = [
{ name: 'Cpu', icon: Cpu }, { name: 'Cpu', icon: Cpu },
{ name: 'Rocket', icon: Rocket }, { name: 'Rocket', icon: Rocket },
{ name: 'Sparkles', icon: Sparkles }, { name: 'Sparkles', icon: Sparkles },
{ name: 'Anthropic', icon: AnthropicIcon },
{ name: 'Cursor', icon: CursorIcon },
{ name: 'Codex', icon: OpenAIIcon },
]; ];
// Model options for the form // Model options for the form

View File

@@ -1,110 +1,37 @@
import { Label } from '@/components/ui/label'; import { Label } from '@/components/ui/label';
import { Checkbox } from '@/components/ui/checkbox'; import { Checkbox } from '@/components/ui/checkbox';
import { FileCode, ShieldCheck, Globe, ImageIcon } from 'lucide-react'; import { FileCode, Globe, ImageIcon } from 'lucide-react';
import { cn } from '@/lib/utils'; import { cn } from '@/lib/utils';
import {
Select,
SelectContent,
SelectItem,
SelectTrigger,
SelectValue,
} from '@/components/ui/select';
import type { CodexApprovalPolicy, CodexSandboxMode } from '@automaker/types';
import { OpenAIIcon } from '@/components/ui/provider-icon'; import { OpenAIIcon } from '@/components/ui/provider-icon';
interface CodexSettingsProps { interface CodexSettingsProps {
autoLoadCodexAgents: boolean; autoLoadCodexAgents: boolean;
codexSandboxMode: CodexSandboxMode;
codexApprovalPolicy: CodexApprovalPolicy;
codexEnableWebSearch: boolean; codexEnableWebSearch: boolean;
codexEnableImages: boolean; codexEnableImages: boolean;
onAutoLoadCodexAgentsChange: (enabled: boolean) => void; onAutoLoadCodexAgentsChange: (enabled: boolean) => void;
onCodexSandboxModeChange: (mode: CodexSandboxMode) => void;
onCodexApprovalPolicyChange: (policy: CodexApprovalPolicy) => void;
onCodexEnableWebSearchChange: (enabled: boolean) => void; onCodexEnableWebSearchChange: (enabled: boolean) => void;
onCodexEnableImagesChange: (enabled: boolean) => void; onCodexEnableImagesChange: (enabled: boolean) => void;
} }
const CARD_TITLE = 'Codex CLI Settings'; const CARD_TITLE = 'Codex CLI Settings';
const CARD_SUBTITLE = 'Configure Codex instructions, capabilities, and execution safety defaults.'; const CARD_SUBTITLE = 'Configure Codex instructions and capabilities.';
const AGENTS_TITLE = 'Auto-load AGENTS.md Instructions'; const AGENTS_TITLE = 'Auto-load AGENTS.md Instructions';
const AGENTS_DESCRIPTION = 'Automatically inject project instructions from'; const AGENTS_DESCRIPTION = 'Automatically inject project instructions from';
const AGENTS_PATH = '.codex/AGENTS.md'; const AGENTS_PATH = '.codex/AGENTS.md';
const AGENTS_SUFFIX = 'on each Codex run.'; const AGENTS_SUFFIX = 'on each Codex run.';
const WEB_SEARCH_TITLE = 'Enable Web Search'; const WEB_SEARCH_TITLE = 'Enable Web Search';
const WEB_SEARCH_DESCRIPTION = const WEB_SEARCH_DESCRIPTION = 'Allow Codex to search the web for current information.';
'Allow Codex to search the web for current information using --search flag.';
const IMAGES_TITLE = 'Enable Image Support'; const IMAGES_TITLE = 'Enable Image Support';
const IMAGES_DESCRIPTION = 'Allow Codex to process images attached to prompts using -i flag.'; const IMAGES_DESCRIPTION = 'Allow Codex to process images attached to prompts.';
const SANDBOX_TITLE = 'Sandbox Policy';
const APPROVAL_TITLE = 'Approval Policy';
const SANDBOX_SELECT_LABEL = 'Select sandbox policy';
const APPROVAL_SELECT_LABEL = 'Select approval policy';
const SANDBOX_OPTIONS: Array<{
value: CodexSandboxMode;
label: string;
description: string;
}> = [
{
value: 'read-only',
label: 'Read-only',
description: 'Only allow safe, non-mutating commands.',
},
{
value: 'workspace-write',
label: 'Workspace write',
description: 'Allow file edits inside the project workspace.',
},
{
value: 'danger-full-access',
label: 'Full access',
description: 'Allow unrestricted commands (use with care).',
},
];
const APPROVAL_OPTIONS: Array<{
value: CodexApprovalPolicy;
label: string;
description: string;
}> = [
{
value: 'untrusted',
label: 'Untrusted',
description: 'Ask for approval for most commands.',
},
{
value: 'on-failure',
label: 'On failure',
description: 'Ask only if a command fails in the sandbox.',
},
{
value: 'on-request',
label: 'On request',
description: 'Let the agent decide when to ask.',
},
{
value: 'never',
label: 'Never',
description: 'Never ask for approval (least restrictive).',
},
];
export function CodexSettings({ export function CodexSettings({
autoLoadCodexAgents, autoLoadCodexAgents,
codexSandboxMode,
codexApprovalPolicy,
codexEnableWebSearch, codexEnableWebSearch,
codexEnableImages, codexEnableImages,
onAutoLoadCodexAgentsChange, onAutoLoadCodexAgentsChange,
onCodexSandboxModeChange,
onCodexApprovalPolicyChange,
onCodexEnableWebSearchChange, onCodexEnableWebSearchChange,
onCodexEnableImagesChange, onCodexEnableImagesChange,
}: CodexSettingsProps) { }: CodexSettingsProps) {
const sandboxOption = SANDBOX_OPTIONS.find((option) => option.value === codexSandboxMode);
const approvalOption = APPROVAL_OPTIONS.find((option) => option.value === codexApprovalPolicy);
return ( return (
<div <div
className={cn( className={cn(
@@ -189,61 +116,6 @@ export function CodexSettings({
<p className="text-xs text-muted-foreground/80 leading-relaxed">{IMAGES_DESCRIPTION}</p> <p className="text-xs text-muted-foreground/80 leading-relaxed">{IMAGES_DESCRIPTION}</p>
</div> </div>
</div> </div>
<div className="group flex items-start space-x-3 p-3 rounded-xl hover:bg-accent/30 transition-colors duration-200 -mx-3">
<div className="w-10 h-10 mt-0.5 rounded-xl flex items-center justify-center shrink-0 bg-brand-500/10">
<ShieldCheck className="w-5 h-5 text-brand-500" />
</div>
<div className="flex-1 space-y-4">
<div className="flex items-center justify-between gap-4">
<div>
<Label className="text-foreground font-medium">{SANDBOX_TITLE}</Label>
<p className="text-xs text-muted-foreground/80 leading-relaxed">
{sandboxOption?.description}
</p>
</div>
<Select
value={codexSandboxMode}
onValueChange={(value) => onCodexSandboxModeChange(value as CodexSandboxMode)}
>
<SelectTrigger className="w-[180px] h-8" data-testid="codex-sandbox-select">
<SelectValue aria-label={SANDBOX_SELECT_LABEL} />
</SelectTrigger>
<SelectContent>
{SANDBOX_OPTIONS.map((option) => (
<SelectItem key={option.value} value={option.value}>
{option.label}
</SelectItem>
))}
</SelectContent>
</Select>
</div>
<div className="flex items-center justify-between gap-4">
<div>
<Label className="text-foreground font-medium">{APPROVAL_TITLE}</Label>
<p className="text-xs text-muted-foreground/80 leading-relaxed">
{approvalOption?.description}
</p>
</div>
<Select
value={codexApprovalPolicy}
onValueChange={(value) => onCodexApprovalPolicyChange(value as CodexApprovalPolicy)}
>
<SelectTrigger className="w-[180px] h-8" data-testid="codex-approval-select">
<SelectValue aria-label={APPROVAL_SELECT_LABEL} />
</SelectTrigger>
<SelectContent>
{APPROVAL_OPTIONS.map((option) => (
<SelectItem key={option.value} value={option.value}>
{option.label}
</SelectItem>
))}
</SelectContent>
</Select>
</div>
</div>
</div>
</div> </div>
</div> </div>
); );

View File

@@ -4,9 +4,11 @@ import { useAppStore } from '@/store/app-store';
import type { import type {
ModelAlias, ModelAlias,
CursorModelId, CursorModelId,
CodexModelId,
GroupedModel, GroupedModel,
PhaseModelEntry, PhaseModelEntry,
ThinkingLevel, ThinkingLevel,
ReasoningEffort,
} from '@automaker/types'; } from '@automaker/types';
import { import {
stripProviderPrefix, stripProviderPrefix,
@@ -15,6 +17,7 @@ import {
isGroupSelected, isGroupSelected,
getSelectedVariant, getSelectedVariant,
isCursorModel, isCursorModel,
codexModelHasThinking,
} from '@automaker/types'; } from '@automaker/types';
import { import {
CLAUDE_MODELS, CLAUDE_MODELS,
@@ -22,6 +25,8 @@ import {
CODEX_MODELS, CODEX_MODELS,
THINKING_LEVELS, THINKING_LEVELS,
THINKING_LEVEL_LABELS, THINKING_LEVEL_LABELS,
REASONING_EFFORT_LEVELS,
REASONING_EFFORT_LABELS,
} from '@/components/views/board-view/shared/model-constants'; } from '@/components/views/board-view/shared/model-constants';
import { Check, ChevronsUpDown, Star, ChevronRight } from 'lucide-react'; import { Check, ChevronsUpDown, Star, ChevronRight } from 'lucide-react';
import { AnthropicIcon, CursorIcon, OpenAIIcon } from '@/components/ui/provider-icon'; import { AnthropicIcon, CursorIcon, OpenAIIcon } from '@/components/ui/provider-icon';
@@ -69,14 +74,17 @@ export function PhaseModelSelector({
const [open, setOpen] = React.useState(false); const [open, setOpen] = React.useState(false);
const [expandedGroup, setExpandedGroup] = React.useState<string | null>(null); const [expandedGroup, setExpandedGroup] = React.useState<string | null>(null);
const [expandedClaudeModel, setExpandedClaudeModel] = React.useState<ModelAlias | null>(null); const [expandedClaudeModel, setExpandedClaudeModel] = React.useState<ModelAlias | null>(null);
const [expandedCodexModel, setExpandedCodexModel] = React.useState<CodexModelId | null>(null);
const commandListRef = React.useRef<HTMLDivElement>(null); const commandListRef = React.useRef<HTMLDivElement>(null);
const expandedTriggerRef = React.useRef<HTMLDivElement>(null); const expandedTriggerRef = React.useRef<HTMLDivElement>(null);
const expandedClaudeTriggerRef = React.useRef<HTMLDivElement>(null); const expandedClaudeTriggerRef = React.useRef<HTMLDivElement>(null);
const expandedCodexTriggerRef = React.useRef<HTMLDivElement>(null);
const { enabledCursorModels, favoriteModels, toggleFavoriteModel } = useAppStore(); const { enabledCursorModels, favoriteModels, toggleFavoriteModel } = useAppStore();
// Extract model and thinking level from value // Extract model and thinking/reasoning levels from value
const selectedModel = value.model; const selectedModel = value.model;
const selectedThinkingLevel = value.thinkingLevel || 'none'; const selectedThinkingLevel = value.thinkingLevel || 'none';
const selectedReasoningEffort = value.reasoningEffort || 'none';
// Close expanded group when trigger scrolls out of view // Close expanded group when trigger scrolls out of view
React.useEffect(() => { React.useEffect(() => {
@@ -124,6 +132,29 @@ export function PhaseModelSelector({
return () => observer.disconnect(); return () => observer.disconnect();
}, [expandedClaudeModel]); }, [expandedClaudeModel]);
// Close expanded Codex model popover when trigger scrolls out of view
React.useEffect(() => {
const triggerElement = expandedCodexTriggerRef.current;
const listElement = commandListRef.current;
if (!triggerElement || !listElement || !expandedCodexModel) return;
const observer = new IntersectionObserver(
(entries) => {
const entry = entries[0];
if (!entry.isIntersecting) {
setExpandedCodexModel(null);
}
},
{
root: listElement,
threshold: 0.1,
}
);
observer.observe(triggerElement);
return () => observer.disconnect();
}, [expandedCodexModel]);
// Filter Cursor models to only show enabled ones // Filter Cursor models to only show enabled ones
const availableCursorModels = CURSOR_MODELS.filter((model) => { const availableCursorModels = CURSOR_MODELS.filter((model) => {
const cursorId = stripProviderPrefix(model.id) as CursorModelId; const cursorId = stripProviderPrefix(model.id) as CursorModelId;
@@ -241,55 +272,183 @@ export function PhaseModelSelector({
return { favorites: favs, claude: cModels, cursor: curModels, codex: codModels }; return { favorites: favs, claude: cModels, cursor: curModels, codex: codModels };
}, [favoriteModels, availableCursorModels]); }, [favoriteModels, availableCursorModels]);
// Render Codex model item (no thinking level needed) // Render Codex model item with secondary popover for reasoning effort (only for models that support it)
const renderCodexModelItem = (model: (typeof CODEX_MODELS)[0]) => { const renderCodexModelItem = (model: (typeof CODEX_MODELS)[0]) => {
const isSelected = selectedModel === model.id; const isSelected = selectedModel === model.id;
const isFavorite = favoriteModels.includes(model.id); const isFavorite = favoriteModels.includes(model.id);
const hasReasoning = codexModelHasThinking(model.id as CodexModelId);
const isExpanded = expandedCodexModel === model.id;
const currentReasoning = isSelected ? selectedReasoningEffort : 'none';
// If model doesn't support reasoning, render as simple selector (like Cursor models)
if (!hasReasoning) {
return (
<CommandItem
key={model.id}
value={model.label}
onSelect={() => {
onChange({ model: model.id as CodexModelId });
setOpen(false);
}}
className="group flex items-center justify-between py-2"
>
<div className="flex items-center gap-3 overflow-hidden">
<OpenAIIcon
className={cn(
'h-4 w-4 shrink-0',
isSelected ? 'text-primary' : 'text-muted-foreground'
)}
/>
<div className="flex flex-col truncate">
<span className={cn('truncate font-medium', isSelected && 'text-primary')}>
{model.label}
</span>
<span className="truncate text-xs text-muted-foreground">{model.description}</span>
</div>
</div>
<div className="flex items-center gap-1 ml-2">
<Button
variant="ghost"
size="icon"
className={cn(
'h-6 w-6 hover:bg-transparent hover:text-yellow-500 focus:ring-0',
isFavorite
? 'text-yellow-500 opacity-100'
: 'opacity-0 group-hover:opacity-100 text-muted-foreground'
)}
onClick={(e) => {
e.stopPropagation();
toggleFavoriteModel(model.id);
}}
>
<Star className={cn('h-3.5 w-3.5', isFavorite && 'fill-current')} />
</Button>
{isSelected && <Check className="h-4 w-4 text-primary shrink-0" />}
</div>
</CommandItem>
);
}
// Model supports reasoning - show popover with reasoning effort options
return ( return (
<CommandItem <CommandItem
key={model.id} key={model.id}
value={model.label} value={model.label}
onSelect={() => { onSelect={() => setExpandedCodexModel(isExpanded ? null : (model.id as CodexModelId))}
onChange({ model: model.id }); className="p-0 data-[selected=true]:bg-transparent"
setOpen(false);
}}
className="group flex items-center justify-between py-2"
> >
<div className="flex items-center gap-3 overflow-hidden"> <Popover
<OpenAIIcon open={isExpanded}
className={cn( onOpenChange={(isOpen) => {
'h-4 w-4 shrink-0', if (!isOpen) {
isSelected ? 'text-primary' : 'text-muted-foreground' setExpandedCodexModel(null);
)} }
/> }}
<div className="flex flex-col truncate"> >
<span className={cn('truncate font-medium', isSelected && 'text-primary')}> <PopoverTrigger asChild>
{model.label} <div
</span> ref={isExpanded ? expandedCodexTriggerRef : undefined}
<span className="truncate text-xs text-muted-foreground">{model.description}</span> className={cn(
</div> 'w-full group flex items-center justify-between py-2 px-2 rounded-sm cursor-pointer',
</div> 'hover:bg-accent',
isExpanded && 'bg-accent'
)}
>
<div className="flex items-center gap-3 overflow-hidden">
<OpenAIIcon
className={cn(
'h-4 w-4 shrink-0',
isSelected ? 'text-primary' : 'text-muted-foreground'
)}
/>
<div className="flex flex-col truncate">
<span className={cn('truncate font-medium', isSelected && 'text-primary')}>
{model.label}
</span>
<span className="truncate text-xs text-muted-foreground">
{isSelected && currentReasoning !== 'none'
? `Reasoning: ${REASONING_EFFORT_LABELS[currentReasoning]}`
: model.description}
</span>
</div>
</div>
<div className="flex items-center gap-1 ml-2"> <div className="flex items-center gap-1 ml-2">
<Button <Button
variant="ghost" variant="ghost"
size="icon" size="icon"
className={cn( className={cn(
'h-6 w-6 hover:bg-transparent hover:text-yellow-500 focus:ring-0', 'h-6 w-6 hover:bg-transparent hover:text-yellow-500 focus:ring-0',
isFavorite isFavorite
? 'text-yellow-500 opacity-100' ? 'text-yellow-500 opacity-100'
: 'opacity-0 group-hover:opacity-100 text-muted-foreground' : 'opacity-0 group-hover:opacity-100 text-muted-foreground'
)} )}
onClick={(e) => { onClick={(e) => {
e.stopPropagation(); e.stopPropagation();
toggleFavoriteModel(model.id); toggleFavoriteModel(model.id);
}} }}
>
<Star className={cn('h-3.5 w-3.5', isFavorite && 'fill-current')} />
</Button>
{isSelected && <Check className="h-4 w-4 text-primary shrink-0" />}
<ChevronRight
className={cn(
'h-4 w-4 text-muted-foreground transition-transform',
isExpanded && 'rotate-90'
)}
/>
</div>
</div>
</PopoverTrigger>
<PopoverContent
side="right"
align="start"
className="w-[220px] p-1"
sideOffset={8}
collisionPadding={16}
onCloseAutoFocus={(e) => e.preventDefault()}
> >
<Star className={cn('h-3.5 w-3.5', isFavorite && 'fill-current')} /> <div className="space-y-1">
</Button> <div className="px-2 py-1.5 text-xs font-medium text-muted-foreground border-b border-border/50 mb-1">
{isSelected && <Check className="h-4 w-4 text-primary shrink-0" />} Reasoning Effort
</div> </div>
{REASONING_EFFORT_LEVELS.map((effort) => (
<button
key={effort}
onClick={() => {
onChange({
model: model.id as CodexModelId,
reasoningEffort: effort,
});
setExpandedCodexModel(null);
setOpen(false);
}}
className={cn(
'w-full flex items-center justify-between px-2 py-2 rounded-sm text-sm',
'hover:bg-accent cursor-pointer transition-colors',
isSelected && currentReasoning === effort && 'bg-accent text-accent-foreground'
)}
>
<div className="flex flex-col items-start">
<span className="font-medium">{REASONING_EFFORT_LABELS[effort]}</span>
<span className="text-xs text-muted-foreground">
{effort === 'none' && 'No reasoning capability'}
{effort === 'minimal' && 'Minimal reasoning'}
{effort === 'low' && 'Light reasoning'}
{effort === 'medium' && 'Moderate reasoning'}
{effort === 'high' && 'Deep reasoning'}
{effort === 'xhigh' && 'Maximum reasoning'}
</span>
</div>
{isSelected && currentReasoning === effort && (
<Check className="h-3.5 w-3.5 text-primary" />
)}
</button>
))}
</div>
</PopoverContent>
</Popover>
</CommandItem> </CommandItem>
); );
}; };

View File

@@ -29,35 +29,30 @@ interface CodexModelInfo {
} }
const CODEX_MODEL_INFO: Record<CodexModelId, CodexModelInfo> = { const CODEX_MODEL_INFO: Record<CodexModelId, CodexModelInfo> = {
'gpt-5.2-codex': { 'codex-gpt-5.2-codex': {
id: 'gpt-5.2-codex', id: 'codex-gpt-5.2-codex',
label: 'GPT-5.2-Codex', label: 'GPT-5.2-Codex',
description: 'Most advanced agentic coding model for complex software engineering', description: 'Most advanced agentic coding model for complex software engineering',
}, },
'gpt-5-codex': { 'codex-gpt-5.1-codex-max': {
id: 'gpt-5-codex', id: 'codex-gpt-5.1-codex-max',
label: 'GPT-5-Codex', label: 'GPT-5.1-Codex-Max',
description: 'Purpose-built for Codex CLI with versatile tool use', description: 'Optimized for long-horizon, agentic coding tasks in Codex',
}, },
'gpt-5-codex-mini': { 'codex-gpt-5.1-codex-mini': {
id: 'gpt-5-codex-mini', id: 'codex-gpt-5.1-codex-mini',
label: 'GPT-5-Codex-Mini', label: 'GPT-5.1-Codex-Mini',
description: 'Faster workflows optimized for low-latency code Q&A and editing', description: 'Smaller, more cost-effective version for faster workflows',
}, },
'codex-1': { 'codex-gpt-5.2': {
id: 'codex-1', id: 'codex-gpt-5.2',
label: 'Codex-1', label: 'GPT-5.2',
description: 'Version of o3 optimized for software engineering', description: 'Best general agentic model for tasks across industries and domains',
}, },
'codex-mini-latest': { 'codex-gpt-5.1': {
id: 'codex-mini-latest', id: 'codex-gpt-5.1',
label: 'Codex-Mini-Latest', label: 'GPT-5.1',
description: 'Version of o4-mini for Codex, optimized for faster workflows', description: 'Great for coding and agentic tasks across domains',
},
'gpt-5': {
id: 'gpt-5',
label: 'GPT-5',
description: 'GPT-5 base flagship model',
}, },
}; };
@@ -167,17 +162,21 @@ export function CodexModelConfiguration({
function getModelDisplayName(modelId: string): string { function getModelDisplayName(modelId: string): string {
const displayNames: Record<string, string> = { const displayNames: Record<string, string> = {
'gpt-5.2-codex': 'GPT-5.2-Codex', 'codex-gpt-5.2-codex': 'GPT-5.2-Codex',
'gpt-5-codex': 'GPT-5-Codex', 'codex-gpt-5.1-codex-max': 'GPT-5.1-Codex-Max',
'gpt-5-codex-mini': 'GPT-5-Codex-Mini', 'codex-gpt-5.1-codex-mini': 'GPT-5.1-Codex-Mini',
'codex-1': 'Codex-1', 'codex-gpt-5.2': 'GPT-5.2',
'codex-mini-latest': 'Codex-Mini-Latest', 'codex-gpt-5.1': 'GPT-5.1',
'gpt-5': 'GPT-5',
}; };
return displayNames[modelId] || modelId; return displayNames[modelId] || modelId;
} }
function supportsReasoningEffort(modelId: string): boolean { function supportsReasoningEffort(modelId: string): boolean {
const reasoningModels = ['gpt-5.2-codex', 'gpt-5-codex', 'gpt-5', 'codex-1']; const reasoningModels = [
'codex-gpt-5.2-codex',
'codex-gpt-5.1-codex-max',
'codex-gpt-5.2',
'codex-gpt-5.1',
];
return reasoningModels.includes(modelId); return reasoningModels.includes(modelId);
} }

View File

@@ -181,13 +181,9 @@ export function CodexSettingsTab() {
<CodexSettings <CodexSettings
autoLoadCodexAgents={codexAutoLoadAgents} autoLoadCodexAgents={codexAutoLoadAgents}
codexSandboxMode={codexSandboxMode}
codexApprovalPolicy={codexApprovalPolicy}
codexEnableWebSearch={codexEnableWebSearch} codexEnableWebSearch={codexEnableWebSearch}
codexEnableImages={codexEnableImages} codexEnableImages={codexEnableImages}
onAutoLoadCodexAgentsChange={setCodexAutoLoadAgents} onAutoLoadCodexAgentsChange={setCodexAutoLoadAgents}
onCodexSandboxModeChange={setCodexSandboxMode}
onCodexApprovalPolicyChange={setCodexApprovalPolicy}
onCodexEnableWebSearchChange={setCodexEnableWebSearch} onCodexEnableWebSearchChange={setCodexEnableWebSearch}
onCodexEnableImagesChange={setCodexEnableImages} onCodexEnableImagesChange={setCodexEnableImages}
/> />

View File

@@ -0,0 +1,84 @@
import { useEffect, useRef } from 'react';
import { useAppStore } from '@/store/app-store';
import { getHttpApiClient } from '@/lib/http-api-client';
/**
* Hook that loads project settings from the server when the current project changes.
* This ensures that settings like board backgrounds are properly restored when
* switching between projects or restarting the app.
*/
export function useProjectSettingsLoader() {
const currentProject = useAppStore((state) => state.currentProject);
const setBoardBackground = useAppStore((state) => state.setBoardBackground);
const setCardOpacity = useAppStore((state) => state.setCardOpacity);
const setColumnOpacity = useAppStore((state) => state.setColumnOpacity);
const setColumnBorderEnabled = useAppStore((state) => state.setColumnBorderEnabled);
const setCardGlassmorphism = useAppStore((state) => state.setCardGlassmorphism);
const setCardBorderEnabled = useAppStore((state) => state.setCardBorderEnabled);
const setCardBorderOpacity = useAppStore((state) => state.setCardBorderOpacity);
const setHideScrollbar = useAppStore((state) => state.setHideScrollbar);
const loadingRef = useRef<string | null>(null);
const currentProjectRef = useRef<string | null>(null);
useEffect(() => {
currentProjectRef.current = currentProject?.path ?? null;
if (!currentProject?.path) {
return;
}
// Prevent loading the same project multiple times
if (loadingRef.current === currentProject.path) {
return;
}
loadingRef.current = currentProject.path;
const requestedProjectPath = currentProject.path;
const loadProjectSettings = async () => {
try {
const httpClient = getHttpApiClient();
const result = await httpClient.settings.getProject(requestedProjectPath);
// Race condition protection: ignore stale results if project changed
if (currentProjectRef.current !== requestedProjectPath) {
return;
}
if (result.success && result.settings) {
const bg = result.settings.boardBackground;
// Apply boardBackground if present
if (bg?.imagePath) {
setBoardBackground(requestedProjectPath, bg.imagePath);
}
// Settings map for cleaner iteration
const settingsMap = {
cardOpacity: setCardOpacity,
columnOpacity: setColumnOpacity,
columnBorderEnabled: setColumnBorderEnabled,
cardGlassmorphism: setCardGlassmorphism,
cardBorderEnabled: setCardBorderEnabled,
cardBorderOpacity: setCardBorderOpacity,
hideScrollbar: setHideScrollbar,
} as const;
// Apply all settings that are defined
for (const [key, setter] of Object.entries(settingsMap)) {
const value = bg?.[key as keyof typeof bg];
if (value !== undefined) {
(setter as (path: string, val: typeof value) => void)(requestedProjectPath, value);
}
}
}
} catch (error) {
console.error('Failed to load project settings:', error);
// Don't show error toast - just log it
}
};
loadProjectSettings();
}, [currentProject?.path]);
}

View File

@@ -38,12 +38,13 @@ export function formatModelName(model: string): string {
if (model.includes('sonnet')) return 'Sonnet 4.5'; if (model.includes('sonnet')) return 'Sonnet 4.5';
if (model.includes('haiku')) return 'Haiku 4.5'; if (model.includes('haiku')) return 'Haiku 4.5';
// Codex/GPT models // Codex/GPT models - specific formatting
if (model === 'gpt-5.2') return 'GPT-5.2'; if (model === 'codex-gpt-5.2-codex') return 'GPT-5.2 Codex';
if (model === 'gpt-5.1-codex-max') return 'GPT-5.1 Max'; if (model === 'codex-gpt-5.2') return 'GPT-5.2';
if (model === 'gpt-5.1-codex') return 'GPT-5.1 Codex'; if (model === 'codex-gpt-5.1-codex-max') return 'GPT-5.1 Max';
if (model === 'gpt-5.1-codex-mini') return 'GPT-5.1 Mini'; if (model === 'codex-gpt-5.1-codex-mini') return 'GPT-5.1 Mini';
if (model === 'gpt-5.1') return 'GPT-5.1'; if (model === 'codex-gpt-5.1') return 'GPT-5.1';
// Generic fallbacks for other GPT models
if (model.startsWith('gpt-')) return model.toUpperCase(); if (model.startsWith('gpt-')) return model.toUpperCase();
if (model.match(/^o\d/)) return model.toUpperCase(); // o1, o3, etc. if (model.match(/^o\d/)) return model.toUpperCase(); // o1, o3, etc.

View File

@@ -372,7 +372,13 @@ export const verifySession = async (): Promise<boolean> => {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
}; };
// Add session token header if available // Electron mode: use API key header
const apiKey = getApiKey();
if (apiKey) {
headers['X-API-Key'] = apiKey;
}
// Add session token header if available (web mode)
const sessionToken = getSessionToken(); const sessionToken = getSessionToken();
if (sessionToken) { if (sessionToken) {
headers['X-Session-Token'] = sessionToken; headers['X-Session-Token'] = sessionToken;

View File

@@ -1,6 +1,7 @@
import { clsx, type ClassValue } from 'clsx'; import { clsx, type ClassValue } from 'clsx';
import { twMerge } from 'tailwind-merge'; import { twMerge } from 'tailwind-merge';
import type { ModelAlias, ModelProvider } from '@/store/app-store'; import type { ModelAlias, ModelProvider } from '@/store/app-store';
import { CODEX_MODEL_CONFIG_MAP, codexModelHasThinking } from '@automaker/types';
export function cn(...inputs: ClassValue[]) { export function cn(...inputs: ClassValue[]) {
return twMerge(clsx(inputs)); return twMerge(clsx(inputs));
@@ -8,8 +9,31 @@ export function cn(...inputs: ClassValue[]) {
/** /**
* Determine if the current model supports extended thinking controls * Determine if the current model supports extended thinking controls
* Note: This is for Claude's "thinking levels" only, not Codex's "reasoning effort"
*
* Rules:
* - Claude models: support thinking (sonnet-4.5-thinking, opus-4.5-thinking, etc.)
* - Cursor models: NO thinking controls (handled internally by Cursor CLI)
* - Codex models: NO thinking controls (they use reasoningEffort instead)
*/ */
export function modelSupportsThinking(_model?: ModelAlias | string): boolean { export function modelSupportsThinking(_model?: ModelAlias | string): boolean {
if (!_model) return true;
// Cursor models - don't show thinking controls
if (_model.startsWith('cursor-')) {
return false;
}
// Codex models - use reasoningEffort, not thinkingLevel
if (_model.startsWith('codex-')) {
return false;
}
// Bare gpt- models (legacy) - assume Codex, no thinking controls
if (_model.startsWith('gpt-')) {
return false;
}
// All Claude models support thinking // All Claude models support thinking
return true; return true;
} }
@@ -26,13 +50,12 @@ export function getProviderFromModel(model?: string): ModelProvider {
return 'cursor'; return 'cursor';
} }
// Check for Codex/OpenAI models (gpt- prefix or o-series) // Check for Codex/OpenAI models (codex- prefix, gpt- prefix, or o-series)
const CODEX_MODEL_PREFIXES = ['gpt-'];
const OPENAI_O_SERIES_PATTERN = /^o\d/;
if ( if (
CODEX_MODEL_PREFIXES.some((prefix) => model.startsWith(prefix)) || model.startsWith('codex-') ||
OPENAI_O_SERIES_PATTERN.test(model) || model.startsWith('codex:') ||
model.startsWith('codex:') model.startsWith('gpt-') ||
/^o\d/.test(model)
) { ) {
return 'codex'; return 'codex';
} }
@@ -50,14 +73,16 @@ export function getModelDisplayName(model: ModelAlias | string): string {
sonnet: 'Claude Sonnet', sonnet: 'Claude Sonnet',
opus: 'Claude Opus', opus: 'Claude Opus',
// Codex models // Codex models
'gpt-5.2': 'GPT-5.2', 'codex-gpt-5.2': 'GPT-5.2',
'gpt-5.1-codex-max': 'GPT-5.1 Codex Max', 'codex-gpt-5.1-codex-max': 'GPT-5.1 Codex Max',
'gpt-5.1-codex': 'GPT-5.1 Codex', 'codex-gpt-5.1-codex': 'GPT-5.1 Codex',
'gpt-5.1-codex-mini': 'GPT-5.1 Codex Mini', 'codex-gpt-5.1-codex-mini': 'GPT-5.1 Codex Mini',
'gpt-5.1': 'GPT-5.1', 'codex-gpt-5.1': 'GPT-5.1',
// Cursor models (common ones) // Cursor models (common ones)
'cursor-auto': 'Cursor Auto', 'cursor-auto': 'Cursor Auto',
'cursor-composer-1': 'Composer 1', 'cursor-composer-1': 'Composer 1',
'cursor-gpt-5.2': 'GPT-5.2',
'cursor-gpt-5.1': 'GPT-5.1',
}; };
return displayNames[model] || model; return displayNames[model] || model;
} }

View File

@@ -29,6 +29,7 @@ import { ThemeOption, themeOptions } from '@/config/theme-options';
import { SandboxRiskDialog } from '@/components/dialogs/sandbox-risk-dialog'; import { SandboxRiskDialog } from '@/components/dialogs/sandbox-risk-dialog';
import { SandboxRejectionScreen } from '@/components/dialogs/sandbox-rejection-screen'; import { SandboxRejectionScreen } from '@/components/dialogs/sandbox-rejection-screen';
import { LoadingState } from '@/components/ui/loading-state'; import { LoadingState } from '@/components/ui/loading-state';
import { useProjectSettingsLoader } from '@/hooks/use-project-settings-loader';
const logger = createLogger('RootLayout'); const logger = createLogger('RootLayout');
@@ -76,6 +77,9 @@ function RootLayoutContent() {
const isAuthenticated = useAuthStore((s) => s.isAuthenticated); const isAuthenticated = useAuthStore((s) => s.isAuthenticated);
const { openFileBrowser } = useFileBrowser(); const { openFileBrowser } = useFileBrowser();
// Load project settings when switching projects
useProjectSettingsLoader();
const isSetupRoute = location.pathname === '/setup'; const isSetupRoute = location.pathname === '/setup';
const isLoginRoute = location.pathname === '/login'; const isLoginRoute = location.pathname === '/login';
const isLoggedOutRoute = location.pathname === '/logged-out'; const isLoggedOutRoute = location.pathname === '/logged-out';

View File

@@ -1169,7 +1169,7 @@ const initialState: AppState = {
enabledCursorModels: getAllCursorModelIds(), // All Cursor models enabled by default enabledCursorModels: getAllCursorModelIds(), // All Cursor models enabled by default
cursorDefaultModel: 'auto', // Default to auto selection cursorDefaultModel: 'auto', // Default to auto selection
enabledCodexModels: getAllCodexModelIds(), // All Codex models enabled by default enabledCodexModels: getAllCodexModelIds(), // All Codex models enabled by default
codexDefaultModel: 'gpt-5.2-codex', // Default to GPT-5.2-Codex codexDefaultModel: 'codex-gpt-5.2-codex', // Default to GPT-5.2-Codex
codexAutoLoadAgents: false, // Default to disabled (user must opt-in) codexAutoLoadAgents: false, // Default to disabled (user must opt-in)
codexSandboxMode: 'workspace-write', // Default to workspace-write for safety codexSandboxMode: 'workspace-write', // Default to workspace-write for safety
codexApprovalPolicy: 'on-request', // Default to on-request for balanced safety codexApprovalPolicy: 'on-request', // Default to on-request for balanced safety

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 MiB

View File

@@ -0,0 +1,399 @@
/**
* Board Background Persistence End-to-End Test
*
* Tests that board background settings are properly saved and loaded when switching projects.
* This verifies that:
* 1. Background settings are saved to .automaker-local/settings.json
* 2. Settings are loaded when switching back to a project
* 3. Background image, opacity, and other settings are correctly restored
* 4. Settings persist across app restarts (new page loads)
*
* This test prevents regression of the board background loading bug where
* settings were saved but never loaded when switching projects.
*/
import { test, expect } from '@playwright/test';
import * as fs from 'fs';
import * as path from 'path';
import {
createTempDirPath,
cleanupTempDir,
authenticateForTests,
handleLoginScreenIfPresent,
} from '../utils';
// Create unique temp dirs for this test run
const TEST_TEMP_DIR = createTempDirPath('board-bg-test');
test.describe('Board Background Persistence', () => {
test.beforeAll(async () => {
// Create test temp directory
if (!fs.existsSync(TEST_TEMP_DIR)) {
fs.mkdirSync(TEST_TEMP_DIR, { recursive: true });
}
});
test.afterAll(async () => {
// Cleanup temp directory
cleanupTempDir(TEST_TEMP_DIR);
});
test('should load board background settings when switching projects', async ({ page }) => {
const projectAName = `project-a-${Date.now()}`;
const projectBName = `project-b-${Date.now()}`;
const projectAPath = path.join(TEST_TEMP_DIR, projectAName);
const projectBPath = path.join(TEST_TEMP_DIR, projectBName);
const projectAId = `project-a-${Date.now()}`;
const projectBId = `project-b-${Date.now()}`;
// Create both project directories
fs.mkdirSync(projectAPath, { recursive: true });
fs.mkdirSync(projectBPath, { recursive: true });
// Create basic files for both projects
for (const [name, projectPath] of [
[projectAName, projectAPath],
[projectBName, projectBPath],
]) {
fs.writeFileSync(
path.join(projectPath, 'package.json'),
JSON.stringify({ name, version: '1.0.0' }, null, 2)
);
fs.writeFileSync(path.join(projectPath, 'README.md'), `# ${name}\n`);
}
// Create .automaker-local directory for project A with background settings
const automakerDirA = path.join(projectAPath, '.automaker-local');
fs.mkdirSync(automakerDirA, { recursive: true });
fs.mkdirSync(path.join(automakerDirA, 'board'), { recursive: true });
fs.mkdirSync(path.join(automakerDirA, 'features'), { recursive: true });
fs.mkdirSync(path.join(automakerDirA, 'context'), { recursive: true });
// Copy actual background image from test fixtures
const backgroundPath = path.join(automakerDirA, 'board', 'background.jpg');
const testImagePath = path.join(__dirname, '..', 'img', 'background.jpg');
fs.copyFileSync(testImagePath, backgroundPath);
// Create settings.json with board background configuration
const settingsPath = path.join(automakerDirA, 'settings.json');
const backgroundSettings = {
version: 1,
boardBackground: {
imagePath: backgroundPath,
cardOpacity: 85,
columnOpacity: 60,
columnBorderEnabled: true,
cardGlassmorphism: true,
cardBorderEnabled: false,
cardBorderOpacity: 50,
hideScrollbar: true,
imageVersion: Date.now(),
},
};
fs.writeFileSync(settingsPath, JSON.stringify(backgroundSettings, null, 2));
// Create minimal automaker-local directory for project B (no background)
const automakerDirB = path.join(projectBPath, '.automaker-local');
fs.mkdirSync(automakerDirB, { recursive: true });
fs.mkdirSync(path.join(automakerDirB, 'features'), { recursive: true });
fs.mkdirSync(path.join(automakerDirB, 'context'), { recursive: true });
fs.writeFileSync(
path.join(automakerDirB, 'settings.json'),
JSON.stringify({ version: 1 }, null, 2)
);
// Set up app state with both projects in the list (not recent, but in projects list)
await page.addInitScript(
({ projects }: { projects: string[] }) => {
const appState = {
state: {
projects: [
{
id: projects[0],
name: projects[1],
path: projects[2],
lastOpened: new Date(Date.now() - 86400000).toISOString(),
theme: 'red',
},
{
id: projects[3],
name: projects[4],
path: projects[5],
lastOpened: new Date(Date.now() - 172800000).toISOString(),
theme: 'red',
},
],
currentProject: null,
currentView: 'welcome',
theme: 'red',
sidebarOpen: true,
apiKeys: { anthropic: '', google: '' },
chatSessions: [],
chatHistoryOpen: false,
maxConcurrency: 3,
boardBackgroundByProject: {},
},
version: 2,
};
localStorage.setItem('automaker-storage', JSON.stringify(appState));
// Setup complete
const setupState = {
state: {
setupComplete: true,
workspaceDir: '/tmp',
},
version: 0,
};
localStorage.setItem('setup-storage', JSON.stringify(setupState));
},
{ projects: [projectAId, projectAName, projectAPath, projectBId, projectBName, projectBPath] }
);
// Track API calls to /api/settings/project to verify settings are being loaded
const settingsApiCalls: Array<{ url: string; method: string; body: string }> = [];
page.on('request', (request) => {
if (request.url().includes('/api/settings/project') && request.method() === 'POST') {
settingsApiCalls.push({
url: request.url(),
method: request.method(),
body: request.postData() || '',
});
}
});
// Navigate to the app
await authenticateForTests(page);
await page.goto('/');
await page.waitForLoadState('load');
await handleLoginScreenIfPresent(page);
// Wait for welcome view
await expect(page.locator('[data-testid="welcome-view"]')).toBeVisible({ timeout: 10000 });
// Open project A (has background settings)
const projectACard = page.locator(`[data-testid="recent-project-${projectAId}"]`);
await expect(projectACard).toBeVisible();
await projectACard.click();
// Wait for board view
await expect(page.locator('[data-testid="board-view"]')).toBeVisible({ timeout: 15000 });
// Verify project A is current
await expect(
page.locator('[data-testid="project-selector"]').getByText(projectAName)
).toBeVisible({ timeout: 5000 });
// CRITICAL: Wait for settings to be loaded (useProjectSettingsLoader hook)
// This ensures the background settings are fetched from the server
await page.waitForTimeout(2000);
// Check if background settings were applied by checking the store
// We can't directly access React state, so we'll verify via DOM/CSS
const boardView = page.locator('[data-testid="board-view"]');
await expect(boardView).toBeVisible();
// Wait for initial project load to stabilize
await page.waitForTimeout(500);
// Switch to project B (no background)
const projectSelector = page.locator('[data-testid="project-selector"]');
await projectSelector.click();
// Wait for dropdown to be visible
await expect(page.locator('[data-testid="project-picker-dropdown"]')).toBeVisible({
timeout: 5000,
});
const projectPickerB = page.locator(`[data-testid="project-option-${projectBId}"]`);
await expect(projectPickerB).toBeVisible({ timeout: 5000 });
await projectPickerB.click();
// Wait for project B to load
await expect(
page.locator('[data-testid="project-selector"]').getByText(projectBName)
).toBeVisible({ timeout: 5000 });
// Wait a bit for project B to fully load before switching
await page.waitForTimeout(500);
// Switch back to project A
await projectSelector.click();
// Wait for dropdown to be visible
await expect(page.locator('[data-testid="project-picker-dropdown"]')).toBeVisible({
timeout: 5000,
});
const projectPickerA = page.locator(`[data-testid="project-option-${projectAId}"]`);
await expect(projectPickerA).toBeVisible({ timeout: 5000 });
await projectPickerA.click();
// Verify we're back on project A
await expect(
page.locator('[data-testid="project-selector"]').getByText(projectAName)
).toBeVisible({ timeout: 5000 });
// CRITICAL: Wait for settings to be loaded again
await page.waitForTimeout(2000);
// Verify that the settings API was called for project A (at least twice - initial load and switch back)
const projectASettingsCalls = settingsApiCalls.filter((call) =>
call.body.includes(projectAPath)
);
// Debug: log all API calls if test fails
if (projectASettingsCalls.length < 2) {
console.log('Total settings API calls:', settingsApiCalls.length);
console.log('API calls:', JSON.stringify(settingsApiCalls, null, 2));
console.log('Looking for path:', projectAPath);
}
expect(projectASettingsCalls.length).toBeGreaterThanOrEqual(2);
// Verify settings file still exists with correct data
const loadedSettings = JSON.parse(fs.readFileSync(settingsPath, 'utf-8'));
expect(loadedSettings.boardBackground).toBeDefined();
expect(loadedSettings.boardBackground.imagePath).toBe(backgroundPath);
expect(loadedSettings.boardBackground.cardOpacity).toBe(85);
expect(loadedSettings.boardBackground.columnOpacity).toBe(60);
expect(loadedSettings.boardBackground.hideScrollbar).toBe(true);
// The test passing means:
// 1. The useProjectSettingsLoader hook is working
// 2. Settings are loaded when switching projects
// 3. The API call to /api/settings/project is made correctly
});
test('should load background settings on app restart', async ({ page }) => {
const projectName = `restart-test-${Date.now()}`;
const projectPath = path.join(TEST_TEMP_DIR, projectName);
const projectId = `project-${Date.now()}`;
// Create project directory
fs.mkdirSync(projectPath, { recursive: true });
fs.writeFileSync(
path.join(projectPath, 'package.json'),
JSON.stringify({ name: projectName, version: '1.0.0' }, null, 2)
);
// Create .automaker-local with background settings
const automakerDir = path.join(projectPath, '.automaker-local');
fs.mkdirSync(automakerDir, { recursive: true });
fs.mkdirSync(path.join(automakerDir, 'board'), { recursive: true });
fs.mkdirSync(path.join(automakerDir, 'features'), { recursive: true });
fs.mkdirSync(path.join(automakerDir, 'context'), { recursive: true });
// Copy actual background image from test fixtures
const backgroundPath = path.join(automakerDir, 'board', 'background.jpg');
const testImagePath = path.join(__dirname, '..', 'img', 'background.jpg');
fs.copyFileSync(testImagePath, backgroundPath);
const settingsPath = path.join(automakerDir, 'settings.json');
fs.writeFileSync(
settingsPath,
JSON.stringify(
{
version: 1,
boardBackground: {
imagePath: backgroundPath,
cardOpacity: 90,
columnOpacity: 70,
imageVersion: Date.now(),
},
},
null,
2
)
);
// Set up with project as current using direct localStorage
await page.addInitScript(
({ project }: { project: string[] }) => {
const projectObj = {
id: project[0],
name: project[1],
path: project[2],
lastOpened: new Date().toISOString(),
theme: 'red',
};
const appState = {
state: {
projects: [projectObj],
currentProject: projectObj,
currentView: 'board',
theme: 'red',
sidebarOpen: true,
apiKeys: { anthropic: '', google: '' },
chatSessions: [],
chatHistoryOpen: false,
maxConcurrency: 3,
boardBackgroundByProject: {},
},
version: 2,
};
localStorage.setItem('automaker-storage', JSON.stringify(appState));
// Setup complete
const setupState = {
state: {
setupComplete: true,
workspaceDir: '/tmp',
},
version: 0,
};
localStorage.setItem('setup-storage', JSON.stringify(setupState));
},
{ project: [projectId, projectName, projectPath] }
);
// Track API calls to /api/settings/project to verify settings are being loaded
const settingsApiCalls: Array<{ url: string; method: string; body: string }> = [];
page.on('request', (request) => {
if (request.url().includes('/api/settings/project') && request.method() === 'POST') {
settingsApiCalls.push({
url: request.url(),
method: request.method(),
body: request.postData() || '',
});
}
});
// Navigate and authenticate
await authenticateForTests(page);
await page.goto('/');
await page.waitForLoadState('load');
await handleLoginScreenIfPresent(page);
// Should go straight to board view (not welcome) since we have currentProject
await expect(page.locator('[data-testid="board-view"]')).toBeVisible({ timeout: 15000 });
// Wait for settings to load
await page.waitForTimeout(2000);
// Verify that the settings API was called for this project
const projectSettingsCalls = settingsApiCalls.filter((call) => call.body.includes(projectPath));
// Debug: log all API calls if test fails
if (projectSettingsCalls.length < 1) {
console.log('Total settings API calls:', settingsApiCalls.length);
console.log('API calls:', JSON.stringify(settingsApiCalls, null, 2));
console.log('Looking for path:', projectPath);
}
expect(projectSettingsCalls.length).toBeGreaterThanOrEqual(1);
// Verify settings file exists with correct data
const loadedSettings = JSON.parse(fs.readFileSync(settingsPath, 'utf-8'));
expect(loadedSettings.boardBackground).toBeDefined();
expect(loadedSettings.boardBackground.imagePath).toBe(backgroundPath);
expect(loadedSettings.boardBackground.cardOpacity).toBe(90);
expect(loadedSettings.boardBackground.columnOpacity).toBe(70);
// The test passing means:
// 1. The useProjectSettingsLoader hook is working
// 2. Settings are loaded when app starts with a currentProject
// 3. The API call to /api/settings/project is made correctly
});
});

View File

@@ -21,7 +21,7 @@ import {
} from '@automaker/types'; } from '@automaker/types';
// Pattern definitions for Codex/OpenAI models // Pattern definitions for Codex/OpenAI models
const CODEX_MODEL_PREFIXES = ['gpt-']; const CODEX_MODEL_PREFIXES = ['codex-', 'gpt-'];
const OPENAI_O_SERIES_PATTERN = /^o\d/; const OPENAI_O_SERIES_PATTERN = /^o\d/;
const OPENAI_O_SERIES_ALLOWED_MODELS = new Set<string>(); const OPENAI_O_SERIES_ALLOWED_MODELS = new Set<string>();
@@ -62,6 +62,12 @@ export function resolveModelString(
return modelKey; return modelKey;
} }
// Codex model with explicit prefix (e.g., "codex-gpt-5.1-codex-max") - pass through unchanged
if (modelKey.startsWith(PROVIDER_PREFIXES.codex)) {
console.log(`[ModelResolver] Using Codex model: ${modelKey}`);
return modelKey;
}
// Full Claude model string - pass through unchanged // Full Claude model string - pass through unchanged
if (modelKey.includes('claude-')) { if (modelKey.includes('claude-')) {
console.log(`[ModelResolver] Using full Claude model string: ${modelKey}`); console.log(`[ModelResolver] Using full Claude model string: ${modelKey}`);
@@ -75,8 +81,7 @@ export function resolveModelString(
return resolved; return resolved;
} }
// OpenAI/Codex models - check BEFORE bare Cursor models since they overlap // OpenAI/Codex models - check for codex- or gpt- prefix
// (Cursor supports gpt models, but bare "gpt-*" should route to Codex)
if ( if (
CODEX_MODEL_PREFIXES.some((prefix) => modelKey.startsWith(prefix)) || CODEX_MODEL_PREFIXES.some((prefix) => modelKey.startsWith(prefix)) ||
(OPENAI_O_SERIES_PATTERN.test(modelKey) && OPENAI_O_SERIES_ALLOWED_MODELS.has(modelKey)) (OPENAI_O_SERIES_PATTERN.test(modelKey) && OPENAI_O_SERIES_ALLOWED_MODELS.has(modelKey))

View File

@@ -2,14 +2,15 @@
* Codex CLI Model IDs * Codex CLI Model IDs
* Based on OpenAI Codex CLI official models * Based on OpenAI Codex CLI official models
* Reference: https://developers.openai.com/codex/models/ * Reference: https://developers.openai.com/codex/models/
*
* IMPORTANT: All Codex models use 'codex-' prefix to distinguish from Cursor CLI models
*/ */
export type CodexModelId = export type CodexModelId =
| 'gpt-5.2-codex' // Most advanced agentic coding model for complex software engineering | 'codex-gpt-5.2-codex'
| 'gpt-5-codex' // Purpose-built for Codex CLI with versatile tool use | 'codex-gpt-5.1-codex-max'
| 'gpt-5-codex-mini' // Faster workflows optimized for low-latency code Q&A and editing | 'codex-gpt-5.1-codex-mini'
| 'codex-1' // Version of o3 optimized for software engineering | 'codex-gpt-5.2'
| 'codex-mini-latest' // Version of o4-mini for Codex, optimized for faster workflows | 'codex-gpt-5.1';
| 'gpt-5'; // GPT-5 base flagship model
/** /**
* Codex model metadata * Codex model metadata
@@ -25,47 +26,41 @@ export interface CodexModelConfig {
/** /**
* Complete model map for Codex CLI * Complete model map for Codex CLI
* All keys use 'codex-' prefix to distinguish from Cursor CLI models
*/ */
export const CODEX_MODEL_CONFIG_MAP: Record<CodexModelId, CodexModelConfig> = { export const CODEX_MODEL_CONFIG_MAP: Record<CodexModelId, CodexModelConfig> = {
'gpt-5.2-codex': { 'codex-gpt-5.2-codex': {
id: 'gpt-5.2-codex', id: 'codex-gpt-5.2-codex',
label: 'GPT-5.2-Codex', label: 'GPT-5.2-Codex',
description: 'Most advanced agentic coding model for complex software engineering', description: 'Most advanced agentic coding model for complex software engineering',
hasThinking: true, hasThinking: true,
supportsVision: true, // GPT-5 supports vision supportsVision: true,
}, },
'gpt-5-codex': { 'codex-gpt-5.1-codex-max': {
id: 'gpt-5-codex', id: 'codex-gpt-5.1-codex-max',
label: 'GPT-5-Codex', label: 'GPT-5.1-Codex-Max',
description: 'Purpose-built for Codex CLI with versatile tool use', description: 'Optimized for long-horizon, agentic coding tasks in Codex',
hasThinking: true, hasThinking: true,
supportsVision: true, supportsVision: true,
}, },
'gpt-5-codex-mini': { 'codex-gpt-5.1-codex-mini': {
id: 'gpt-5-codex-mini', id: 'codex-gpt-5.1-codex-mini',
label: 'GPT-5-Codex-Mini', label: 'GPT-5.1-Codex-Mini',
description: 'Faster workflows optimized for low-latency code Q&A and editing', description: 'Smaller, more cost-effective version for faster workflows',
hasThinking: false, hasThinking: false,
supportsVision: true, supportsVision: true,
}, },
'codex-1': { 'codex-gpt-5.2': {
id: 'codex-1', id: 'codex-gpt-5.2',
label: 'Codex-1', label: 'GPT-5.2 (Codex)',
description: 'Version of o3 optimized for software engineering', description: 'Best general agentic model for tasks across industries and domains via Codex',
hasThinking: true, hasThinking: true,
supportsVision: true, supportsVision: true,
}, },
'codex-mini-latest': { 'codex-gpt-5.1': {
id: 'codex-mini-latest', id: 'codex-gpt-5.1',
label: 'Codex-Mini-Latest', label: 'GPT-5.1 (Codex)',
description: 'Version of o4-mini for Codex, optimized for faster workflows', description: 'Great for coding and agentic tasks across domains via Codex',
hasThinking: false,
supportsVision: true,
},
'gpt-5': {
id: 'gpt-5',
label: 'GPT-5',
description: 'GPT-5 base flagship model',
hasThinking: true, hasThinking: true,
supportsVision: true, supportsVision: true,
}, },

View File

@@ -1,6 +1,8 @@
/** /**
* Cursor CLI Model IDs * Cursor CLI Model IDs
* Reference: https://cursor.com/docs * Reference: https://cursor.com/docs
*
* IMPORTANT: GPT models use 'cursor-' prefix to distinguish from Codex CLI models
*/ */
export type CursorModelId = export type CursorModelId =
| 'auto' // Auto-select best model | 'auto' // Auto-select best model
@@ -12,14 +14,14 @@ export type CursorModelId =
| 'opus-4.1' // Claude Opus 4.1 | 'opus-4.1' // Claude Opus 4.1
| 'gemini-3-pro' // Gemini 3 Pro | 'gemini-3-pro' // Gemini 3 Pro
| 'gemini-3-flash' // Gemini 3 Flash | 'gemini-3-flash' // Gemini 3 Flash
| 'gpt-5.2' // GPT-5.2 | 'cursor-gpt-5.2' // GPT-5.2 via Cursor
| 'gpt-5.1' // GPT-5.1 | 'cursor-gpt-5.1' // GPT-5.1 via Cursor
| 'gpt-5.2-high' // GPT-5.2 High | 'cursor-gpt-5.2-high' // GPT-5.2 High via Cursor
| 'gpt-5.1-high' // GPT-5.1 High | 'cursor-gpt-5.1-high' // GPT-5.1 High via Cursor
| 'gpt-5.1-codex' // GPT-5.1 Codex | 'cursor-gpt-5.1-codex' // GPT-5.1 Codex via Cursor
| 'gpt-5.1-codex-high' // GPT-5.1 Codex High | 'cursor-gpt-5.1-codex-high' // GPT-5.1 Codex High via Cursor
| 'gpt-5.1-codex-max' // GPT-5.1 Codex Max | 'cursor-gpt-5.1-codex-max' // GPT-5.1 Codex Max via Cursor
| 'gpt-5.1-codex-max-high' // GPT-5.1 Codex Max High | 'cursor-gpt-5.1-codex-max-high' // GPT-5.1 Codex Max High via Cursor
| 'grok'; // Grok | 'grok'; // Grok
/** /**
@@ -101,57 +103,57 @@ export const CURSOR_MODEL_MAP: Record<CursorModelId, CursorModelConfig> = {
hasThinking: false, hasThinking: false,
supportsVision: false, supportsVision: false,
}, },
'gpt-5.2': { 'cursor-gpt-5.2': {
id: 'gpt-5.2', id: 'cursor-gpt-5.2',
label: 'GPT-5.2', label: 'GPT-5.2',
description: 'OpenAI GPT-5.2 via Cursor', description: 'OpenAI GPT-5.2 via Cursor',
hasThinking: false, hasThinking: false,
supportsVision: false, supportsVision: false,
}, },
'gpt-5.1': { 'cursor-gpt-5.1': {
id: 'gpt-5.1', id: 'cursor-gpt-5.1',
label: 'GPT-5.1', label: 'GPT-5.1',
description: 'OpenAI GPT-5.1 via Cursor', description: 'OpenAI GPT-5.1 via Cursor',
hasThinking: false, hasThinking: false,
supportsVision: false, supportsVision: false,
}, },
'gpt-5.2-high': { 'cursor-gpt-5.2-high': {
id: 'gpt-5.2-high', id: 'cursor-gpt-5.2-high',
label: 'GPT-5.2 High', label: 'GPT-5.2 High',
description: 'OpenAI GPT-5.2 with high compute', description: 'OpenAI GPT-5.2 with high compute',
hasThinking: false, hasThinking: false,
supportsVision: false, supportsVision: false,
}, },
'gpt-5.1-high': { 'cursor-gpt-5.1-high': {
id: 'gpt-5.1-high', id: 'cursor-gpt-5.1-high',
label: 'GPT-5.1 High', label: 'GPT-5.1 High',
description: 'OpenAI GPT-5.1 with high compute', description: 'OpenAI GPT-5.1 with high compute',
hasThinking: false, hasThinking: false,
supportsVision: false, supportsVision: false,
}, },
'gpt-5.1-codex': { 'cursor-gpt-5.1-codex': {
id: 'gpt-5.1-codex', id: 'cursor-gpt-5.1-codex',
label: 'GPT-5.1 Codex', label: 'GPT-5.1 Codex',
description: 'OpenAI GPT-5.1 Codex for code generation', description: 'OpenAI GPT-5.1 Codex for code generation',
hasThinking: false, hasThinking: false,
supportsVision: false, supportsVision: false,
}, },
'gpt-5.1-codex-high': { 'cursor-gpt-5.1-codex-high': {
id: 'gpt-5.1-codex-high', id: 'cursor-gpt-5.1-codex-high',
label: 'GPT-5.1 Codex High', label: 'GPT-5.1 Codex High',
description: 'OpenAI GPT-5.1 Codex with high compute', description: 'OpenAI GPT-5.1 Codex with high compute',
hasThinking: false, hasThinking: false,
supportsVision: false, supportsVision: false,
}, },
'gpt-5.1-codex-max': { 'cursor-gpt-5.1-codex-max': {
id: 'gpt-5.1-codex-max', id: 'cursor-gpt-5.1-codex-max',
label: 'GPT-5.1 Codex Max', label: 'GPT-5.1 Codex Max',
description: 'OpenAI GPT-5.1 Codex Max capacity', description: 'OpenAI GPT-5.1 Codex Max capacity',
hasThinking: false, hasThinking: false,
supportsVision: false, supportsVision: false,
}, },
'gpt-5.1-codex-max-high': { 'cursor-gpt-5.1-codex-max-high': {
id: 'gpt-5.1-codex-max-high', id: 'cursor-gpt-5.1-codex-max-high',
label: 'GPT-5.1 Codex Max High', label: 'GPT-5.1 Codex Max High',
description: 'OpenAI GPT-5.1 Codex Max with high compute', description: 'OpenAI GPT-5.1 Codex Max with high compute',
hasThinking: false, hasThinking: false,
@@ -224,14 +226,14 @@ export interface GroupedModel {
export const CURSOR_MODEL_GROUPS: GroupedModel[] = [ export const CURSOR_MODEL_GROUPS: GroupedModel[] = [
// GPT-5.2 group (compute levels) // GPT-5.2 group (compute levels)
{ {
baseId: 'gpt-5.2-group', baseId: 'cursor-gpt-5.2-group',
label: 'GPT-5.2', label: 'GPT-5.2',
description: 'OpenAI GPT-5.2 via Cursor', description: 'OpenAI GPT-5.2 via Cursor',
variantType: 'compute', variantType: 'compute',
variants: [ variants: [
{ id: 'gpt-5.2', label: 'Standard', description: 'Default compute level' }, { id: 'cursor-gpt-5.2', label: 'Standard', description: 'Default compute level' },
{ {
id: 'gpt-5.2-high', id: 'cursor-gpt-5.2-high',
label: 'High', label: 'High',
description: 'High compute level', description: 'High compute level',
badge: 'More tokens', badge: 'More tokens',
@@ -240,14 +242,14 @@ export const CURSOR_MODEL_GROUPS: GroupedModel[] = [
}, },
// GPT-5.1 group (compute levels) // GPT-5.1 group (compute levels)
{ {
baseId: 'gpt-5.1-group', baseId: 'cursor-gpt-5.1-group',
label: 'GPT-5.1', label: 'GPT-5.1',
description: 'OpenAI GPT-5.1 via Cursor', description: 'OpenAI GPT-5.1 via Cursor',
variantType: 'compute', variantType: 'compute',
variants: [ variants: [
{ id: 'gpt-5.1', label: 'Standard', description: 'Default compute level' }, { id: 'cursor-gpt-5.1', label: 'Standard', description: 'Default compute level' },
{ {
id: 'gpt-5.1-high', id: 'cursor-gpt-5.1-high',
label: 'High', label: 'High',
description: 'High compute level', description: 'High compute level',
badge: 'More tokens', badge: 'More tokens',
@@ -256,16 +258,26 @@ export const CURSOR_MODEL_GROUPS: GroupedModel[] = [
}, },
// GPT-5.1 Codex group (capacity + compute matrix) // GPT-5.1 Codex group (capacity + compute matrix)
{ {
baseId: 'gpt-5.1-codex-group', baseId: 'cursor-gpt-5.1-codex-group',
label: 'GPT-5.1 Codex', label: 'GPT-5.1 Codex',
description: 'OpenAI GPT-5.1 Codex for code generation', description: 'OpenAI GPT-5.1 Codex for code generation',
variantType: 'capacity', variantType: 'capacity',
variants: [ variants: [
{ id: 'gpt-5.1-codex', label: 'Standard', description: 'Default capacity' }, { id: 'cursor-gpt-5.1-codex', label: 'Standard', description: 'Default capacity' },
{ id: 'gpt-5.1-codex-high', label: 'High', description: 'High compute', badge: 'Compute' },
{ id: 'gpt-5.1-codex-max', label: 'Max', description: 'Maximum capacity', badge: 'Capacity' },
{ {
id: 'gpt-5.1-codex-max-high', id: 'cursor-gpt-5.1-codex-high',
label: 'High',
description: 'High compute',
badge: 'Compute',
},
{
id: 'cursor-gpt-5.1-codex-max',
label: 'Max',
description: 'Maximum capacity',
badge: 'Capacity',
},
{
id: 'cursor-gpt-5.1-codex-max-high',
label: 'Max High', label: 'Max High',
description: 'Max capacity + high compute', description: 'Max capacity + high compute',
badge: 'Premium', badge: 'Premium',

View File

@@ -3,6 +3,7 @@
*/ */
import type { PlanningMode, ThinkingLevel } from './settings.js'; import type { PlanningMode, ThinkingLevel } from './settings.js';
import type { ReasoningEffort } from './provider.js';
/** /**
* A single entry in the description history * A single entry in the description history
@@ -49,6 +50,7 @@ export interface Feature {
branchName?: string; // Name of the feature branch (undefined = use current worktree) branchName?: string; // Name of the feature branch (undefined = use current worktree)
skipTests?: boolean; skipTests?: boolean;
thinkingLevel?: ThinkingLevel; thinkingLevel?: ThinkingLevel;
reasoningEffort?: ReasoningEffort;
planningMode?: PlanningMode; planningMode?: PlanningMode;
requirePlanApproval?: boolean; requirePlanApproval?: boolean;
planSpec?: { planSpec?: {

View File

@@ -186,6 +186,7 @@ export {
addProviderPrefix, addProviderPrefix,
getBareModelId, getBareModelId,
normalizeModelString, normalizeModelString,
validateBareModelId,
} from './provider-utils.js'; } from './provider-utils.js';
// Pipeline types // Pipeline types

View File

@@ -74,47 +74,39 @@ export const CODEX_MODELS: (ModelOption & { hasReasoning?: boolean })[] = [
{ {
id: CODEX_MODEL_MAP.gpt52Codex, id: CODEX_MODEL_MAP.gpt52Codex,
label: 'GPT-5.2-Codex', label: 'GPT-5.2-Codex',
description: 'Most advanced agentic coding model (default for ChatGPT users).', description: 'Most advanced agentic coding model for complex software engineering.',
badge: 'Premium', badge: 'Premium',
provider: 'codex', provider: 'codex',
hasReasoning: true, hasReasoning: true,
}, },
{ {
id: CODEX_MODEL_MAP.gpt5Codex, id: CODEX_MODEL_MAP.gpt51CodexMax,
label: 'GPT-5-Codex', label: 'GPT-5.1-Codex-Max',
description: 'Purpose-built for Codex CLI (default for CLI users).', description: 'Optimized for long-horizon, agentic coding tasks in Codex.',
badge: 'Balanced', badge: 'Premium',
provider: 'codex', provider: 'codex',
hasReasoning: true, hasReasoning: true,
}, },
{ {
id: CODEX_MODEL_MAP.gpt5CodexMini, id: CODEX_MODEL_MAP.gpt51CodexMini,
label: 'GPT-5-Codex-Mini', label: 'GPT-5.1-Codex-Mini',
description: 'Faster workflows for code Q&A and editing.', description: 'Smaller, more cost-effective version for faster workflows.',
badge: 'Speed', badge: 'Speed',
provider: 'codex', provider: 'codex',
hasReasoning: false, hasReasoning: false,
}, },
{ {
id: CODEX_MODEL_MAP.codex1, id: CODEX_MODEL_MAP.gpt52,
label: 'Codex-1', label: 'GPT-5.2',
description: 'o3-based model optimized for software engineering.', description: 'Best general agentic model for tasks across industries and domains.',
badge: 'Premium', badge: 'Balanced',
provider: 'codex', provider: 'codex',
hasReasoning: true, hasReasoning: true,
}, },
{ {
id: CODEX_MODEL_MAP.codexMiniLatest, id: CODEX_MODEL_MAP.gpt51,
label: 'Codex-Mini-Latest', label: 'GPT-5.1',
description: 'o4-mini-based model for faster workflows.', description: 'Great for coding and agentic tasks across domains.',
badge: 'Balanced',
provider: 'codex',
hasReasoning: false,
},
{
id: CODEX_MODEL_MAP.gpt5,
label: 'GPT-5',
description: 'GPT-5 base flagship model.',
badge: 'Balanced', badge: 'Balanced',
provider: 'codex', provider: 'codex',
hasReasoning: true, hasReasoning: true,
@@ -203,11 +195,10 @@ export function getModelDisplayName(model: ModelAlias | string): string {
sonnet: 'Claude Sonnet', sonnet: 'Claude Sonnet',
opus: 'Claude Opus', opus: 'Claude Opus',
[CODEX_MODEL_MAP.gpt52Codex]: 'GPT-5.2-Codex', [CODEX_MODEL_MAP.gpt52Codex]: 'GPT-5.2-Codex',
[CODEX_MODEL_MAP.gpt5Codex]: 'GPT-5-Codex', [CODEX_MODEL_MAP.gpt51CodexMax]: 'GPT-5.1-Codex-Max',
[CODEX_MODEL_MAP.gpt5CodexMini]: 'GPT-5-Codex-Mini', [CODEX_MODEL_MAP.gpt51CodexMini]: 'GPT-5.1-Codex-Mini',
[CODEX_MODEL_MAP.codex1]: 'Codex-1', [CODEX_MODEL_MAP.gpt52]: 'GPT-5.2',
[CODEX_MODEL_MAP.codexMiniLatest]: 'Codex-Mini-Latest', [CODEX_MODEL_MAP.gpt51]: 'GPT-5.1',
[CODEX_MODEL_MAP.gpt5]: 'GPT-5',
}; };
return displayNames[model] || model; return displayNames[model] || model;
} }

View File

@@ -11,23 +11,23 @@ export const CLAUDE_MODEL_MAP: Record<string, string> = {
* Codex/OpenAI model identifiers * Codex/OpenAI model identifiers
* Based on OpenAI Codex CLI official models * Based on OpenAI Codex CLI official models
* See: https://developers.openai.com/codex/models/ * See: https://developers.openai.com/codex/models/
*
* IMPORTANT: All Codex models use 'codex-' prefix to distinguish from Cursor CLI models
*/ */
export const CODEX_MODEL_MAP = { export const CODEX_MODEL_MAP = {
// Codex-specific models // Recommended Codex-specific models
/** Most advanced agentic coding model for complex software engineering (default for ChatGPT users) */ /** Most advanced agentic coding model for complex software engineering (default for ChatGPT users) */
gpt52Codex: 'gpt-5.2-codex', gpt52Codex: 'codex-gpt-5.2-codex',
/** Purpose-built for Codex CLI with versatile tool use (default for CLI users) */ /** Optimized for long-horizon, agentic coding tasks in Codex */
gpt5Codex: 'gpt-5-codex', gpt51CodexMax: 'codex-gpt-5.1-codex-max',
/** Faster workflows optimized for low-latency code Q&A and editing */ /** Smaller, more cost-effective version for faster workflows */
gpt5CodexMini: 'gpt-5-codex-mini', gpt51CodexMini: 'codex-gpt-5.1-codex-mini',
/** Version of o3 optimized for software engineering */
codex1: 'codex-1',
/** Version of o4-mini for Codex, optimized for faster workflows */
codexMiniLatest: 'codex-mini-latest',
// Base GPT-5 model (also available in Codex) // General-purpose GPT models (also available in Codex)
/** GPT-5 base flagship model */ /** Best general agentic model for tasks across industries and domains */
gpt5: 'gpt-5', gpt52: 'codex-gpt-5.2',
/** Great for coding and agentic tasks across domains */
gpt51: 'codex-gpt-5.1',
} as const; } as const;
export const CODEX_MODEL_IDS = Object.values(CODEX_MODEL_MAP); export const CODEX_MODEL_IDS = Object.values(CODEX_MODEL_MAP);
@@ -38,9 +38,9 @@ export const CODEX_MODEL_IDS = Object.values(CODEX_MODEL_MAP);
*/ */
export const REASONING_CAPABLE_MODELS = new Set([ export const REASONING_CAPABLE_MODELS = new Set([
CODEX_MODEL_MAP.gpt52Codex, CODEX_MODEL_MAP.gpt52Codex,
CODEX_MODEL_MAP.gpt5Codex, CODEX_MODEL_MAP.gpt51CodexMax,
CODEX_MODEL_MAP.gpt5, CODEX_MODEL_MAP.gpt52,
CODEX_MODEL_MAP.codex1, // o3-based model CODEX_MODEL_MAP.gpt51,
]); ]);
/** /**

View File

@@ -8,20 +8,20 @@
import type { ModelProvider } from './settings.js'; import type { ModelProvider } from './settings.js';
import { CURSOR_MODEL_MAP, type CursorModelId } from './cursor-models.js'; import { CURSOR_MODEL_MAP, type CursorModelId } from './cursor-models.js';
import { CLAUDE_MODEL_MAP, CODEX_MODEL_MAP, type CodexModelId } from './model.js'; import { CLAUDE_MODEL_MAP, CODEX_MODEL_MAP } from './model.js';
import { CODEX_MODEL_CONFIG_MAP, type CodexModelId } from './codex-models.js';
/** Provider prefix constants */ /** Provider prefix constants */
export const PROVIDER_PREFIXES = { export const PROVIDER_PREFIXES = {
cursor: 'cursor-', cursor: 'cursor-',
codex: 'codex-', codex: 'codex-',
// Add new provider prefixes here
} as const; } as const;
/** /**
* Check if a model string represents a Cursor model * Check if a model string represents a Cursor model
* *
* @param model - Model string to check (e.g., "cursor-composer-1" or "composer-1") * @param model - Model string to check (e.g., "cursor-composer-1" or "composer-1")
* @returns true if the model is a Cursor model * @returns true if the model is a Cursor model (excluding Codex-specific models)
*/ */
export function isCursorModel(model: string | undefined | null): boolean { export function isCursorModel(model: string | undefined | null): boolean {
if (!model || typeof model !== 'string') return false; if (!model || typeof model !== 'string') return false;
@@ -31,8 +31,13 @@ export function isCursorModel(model: string | undefined | null): boolean {
return true; return true;
} }
// Check if it's a bare Cursor model ID // Check if it's a bare Cursor model ID (excluding Codex-specific models)
return model in CURSOR_MODEL_MAP; // Codex-specific models should always route to Codex provider, not Cursor
if (model in CURSOR_MODEL_MAP) {
return true;
}
return false;
} }
/** /**
@@ -67,7 +72,7 @@ export function isCodexModel(model: string | undefined | null): boolean {
return true; return true;
} }
// Check if it's a gpt- model // Check if it's a gpt- model (bare gpt models go to Codex, not Cursor)
if (model.startsWith('gpt-')) { if (model.startsWith('gpt-')) {
return true; return true;
} }
@@ -78,8 +83,7 @@ export function isCodexModel(model: string | undefined | null): boolean {
} }
// Check if it's in the CODEX_MODEL_MAP // Check if it's in the CODEX_MODEL_MAP
const modelValues = Object.values(CODEX_MODEL_MAP); return model in CODEX_MODEL_MAP;
return modelValues.includes(model as CodexModelId);
} }
/** /**
@@ -178,9 +182,8 @@ export function normalizeModelString(model: string | undefined | null): string {
} }
// For Codex, bare gpt-* and o-series models are valid canonical forms // For Codex, bare gpt-* and o-series models are valid canonical forms
// Only add prefix if it's in CODEX_MODEL_MAP but doesn't have gpt-/o prefix // Check if it's in the CODEX_MODEL_MAP
const codexModelValues = Object.values(CODEX_MODEL_MAP); if (model in CODEX_MODEL_MAP) {
if (codexModelValues.includes(model as CodexModelId)) {
// If it already starts with gpt- or o, it's canonical // If it already starts with gpt- or o, it's canonical
if (model.startsWith('gpt-') || /^o\d/.test(model)) { if (model.startsWith('gpt-') || /^o\d/.test(model)) {
return model; return model;
@@ -193,3 +196,37 @@ export function normalizeModelString(model: string | undefined | null): string {
return model; return model;
} }
/**
* Validate that a model ID does not contain a provider prefix
*
* Providers should receive bare model IDs (e.g., "gpt-5.1-codex-max", "composer-1")
* without provider prefixes (e.g., NOT "codex-gpt-5.1-codex-max", NOT "cursor-composer-1").
*
* This validation ensures the ProviderFactory properly stripped prefixes before
* passing models to providers.
*
* @param model - Model ID to validate
* @param providerName - Name of the provider for error messages
* @throws Error if model contains a provider prefix
*
* @example
* validateBareModelId("gpt-5.1-codex-max", "CodexProvider"); // ✅ OK
* validateBareModelId("codex-gpt-5.1-codex-max", "CodexProvider"); // ❌ Throws error
*/
export function validateBareModelId(model: string, providerName: string): void {
if (!model || typeof model !== 'string') {
throw new Error(`[${providerName}] Invalid model ID: expected string, got ${typeof model}`);
}
for (const [provider, prefix] of Object.entries(PROVIDER_PREFIXES)) {
if (model.startsWith(prefix)) {
throw new Error(
`[${providerName}] Model ID should not contain provider prefix '${prefix}'. ` +
`Got: '${model}'. ` +
`This is likely a bug in ProviderFactory - it should strip the '${provider}' prefix ` +
`before passing the model to the provider.`
);
}
}
}

View File

@@ -95,7 +95,10 @@ export interface AgentDefinition {
*/ */
export interface ExecuteOptions { export interface ExecuteOptions {
prompt: string | Array<{ type: string; text?: string; source?: object }>; prompt: string | Array<{ type: string; text?: string; source?: object }>;
/** Bare model ID without provider prefix (e.g., "gpt-5.1-codex-max", "composer-1") */
model: string; model: string;
/** Original model ID with provider prefix for logging (e.g., "codex-gpt-5.1-codex-max") */
originalModel?: string;
cwd: string; cwd: string;
systemPrompt?: string | SystemPromptPreset; systemPrompt?: string | SystemPromptPreset;
maxTurns?: number; maxTurns?: number;

View File

@@ -11,6 +11,7 @@ import type { CursorModelId } from './cursor-models.js';
import { CURSOR_MODEL_MAP, getAllCursorModelIds } from './cursor-models.js'; import { CURSOR_MODEL_MAP, getAllCursorModelIds } from './cursor-models.js';
import type { PromptCustomization } from './prompts.js'; import type { PromptCustomization } from './prompts.js';
import type { CodexSandboxMode, CodexApprovalPolicy } from './codex.js'; import type { CodexSandboxMode, CodexApprovalPolicy } from './codex.js';
import type { ReasoningEffort } from './provider.js';
// Re-export ModelAlias for convenience // Re-export ModelAlias for convenience
export type { ModelAlias }; export type { ModelAlias };
@@ -108,14 +109,18 @@ const DEFAULT_CODEX_ADDITIONAL_DIRS: string[] = [];
/** /**
* PhaseModelEntry - Configuration for a single phase model * PhaseModelEntry - Configuration for a single phase model
* *
* Encapsulates both the model selection and optional thinking level * Encapsulates the model selection and optional reasoning/thinking capabilities:
* for Claude models. Cursor models handle thinking internally. * - Claude models: Use thinkingLevel for extended thinking
* - Codex models: Use reasoningEffort for reasoning intensity
* - Cursor models: Handle thinking internally
*/ */
export interface PhaseModelEntry { export interface PhaseModelEntry {
/** The model to use (Claude alias or Cursor model ID) */ /** The model to use (Claude alias, Cursor model ID, or Codex model ID) */
model: ModelAlias | CursorModelId; model: ModelAlias | CursorModelId | CodexModelId;
/** Extended thinking level (only applies to Claude models, defaults to 'none') */ /** Extended thinking level (only applies to Claude models, defaults to 'none') */
thinkingLevel?: ThinkingLevel; thinkingLevel?: ThinkingLevel;
/** Reasoning effort level (only applies to Codex models, defaults to 'none') */
reasoningEffort?: ReasoningEffort;
} }
/** /**
@@ -276,7 +281,7 @@ export function profileHasThinking(profile: AIProfile): boolean {
if (profile.provider === 'codex') { if (profile.provider === 'codex') {
// Codex models handle thinking internally (o-series models) // Codex models handle thinking internally (o-series models)
const model = profile.codexModel || 'gpt-5.2'; const model = profile.codexModel || 'codex-gpt-5.2';
return model.startsWith('o'); return model.startsWith('o');
} }
@@ -292,7 +297,7 @@ export function getProfileModelString(profile: AIProfile): string {
} }
if (profile.provider === 'codex') { if (profile.provider === 'codex') {
return `codex:${profile.codexModel || 'gpt-5.2'}`; return `codex:${profile.codexModel || 'codex-gpt-5.2'}`;
} }
// Claude // Claude