mirror of
https://github.com/AutoMaker-Org/automaker.git
synced 2026-02-02 08:33:36 +00:00
Merge branch 'v0.9.0rc' into feat/subagents-skills
Resolved conflict in agent-service.ts by keeping both: - agents parameter for custom subagents (from our branch) - thinkingLevel and reasoningEffort parameters (from v0.9.0rc) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -10,7 +10,7 @@ import { BaseProvider } from './base-provider.js';
|
||||
import { classifyError, getUserFriendlyErrorMessage, createLogger } from '@automaker/utils';
|
||||
|
||||
const logger = createLogger('ClaudeProvider');
|
||||
import { getThinkingTokenBudget } from '@automaker/types';
|
||||
import { getThinkingTokenBudget, validateBareModelId } from '@automaker/types';
|
||||
import type {
|
||||
ExecuteOptions,
|
||||
ProviderMessage,
|
||||
@@ -53,6 +53,10 @@ export class ClaudeProvider extends BaseProvider {
|
||||
* Execute a query using Claude Agent SDK
|
||||
*/
|
||||
async *executeQuery(options: ExecuteOptions): AsyncGenerator<ProviderMessage> {
|
||||
// Validate that model doesn't have a provider prefix
|
||||
// AgentService should strip prefixes before passing to providers
|
||||
validateBareModelId(options.model, 'ClaudeProvider');
|
||||
|
||||
const {
|
||||
prompt,
|
||||
model,
|
||||
|
||||
@@ -7,16 +7,17 @@
|
||||
import { CODEX_MODEL_MAP } from '@automaker/types';
|
||||
import type { ModelDefinition } from './types.js';
|
||||
|
||||
const CONTEXT_WINDOW_200K = 200000;
|
||||
const CONTEXT_WINDOW_256K = 256000;
|
||||
const CONTEXT_WINDOW_128K = 128000;
|
||||
const MAX_OUTPUT_32K = 32000;
|
||||
const MAX_OUTPUT_16K = 16000;
|
||||
|
||||
/**
|
||||
* All available Codex models with their specifications
|
||||
* Based on https://developers.openai.com/codex/models/
|
||||
*/
|
||||
export const CODEX_MODELS: ModelDefinition[] = [
|
||||
// ========== Codex-Specific Models ==========
|
||||
// ========== Recommended Codex Models ==========
|
||||
{
|
||||
id: CODEX_MODEL_MAP.gpt52Codex,
|
||||
name: 'GPT-5.2-Codex',
|
||||
@@ -24,7 +25,7 @@ export const CODEX_MODELS: ModelDefinition[] = [
|
||||
provider: 'openai',
|
||||
description:
|
||||
'Most advanced agentic coding model for complex software engineering (default for ChatGPT users).',
|
||||
contextWindow: CONTEXT_WINDOW_200K,
|
||||
contextWindow: CONTEXT_WINDOW_256K,
|
||||
maxOutputTokens: MAX_OUTPUT_32K,
|
||||
supportsVision: true,
|
||||
supportsTools: true,
|
||||
@@ -33,38 +34,12 @@ export const CODEX_MODELS: ModelDefinition[] = [
|
||||
hasReasoning: true,
|
||||
},
|
||||
{
|
||||
id: CODEX_MODEL_MAP.gpt5Codex,
|
||||
name: 'GPT-5-Codex',
|
||||
modelString: CODEX_MODEL_MAP.gpt5Codex,
|
||||
id: CODEX_MODEL_MAP.gpt51CodexMax,
|
||||
name: 'GPT-5.1-Codex-Max',
|
||||
modelString: CODEX_MODEL_MAP.gpt51CodexMax,
|
||||
provider: 'openai',
|
||||
description: 'Purpose-built for Codex CLI with versatile tool use (default for CLI users).',
|
||||
contextWindow: CONTEXT_WINDOW_200K,
|
||||
maxOutputTokens: MAX_OUTPUT_32K,
|
||||
supportsVision: true,
|
||||
supportsTools: true,
|
||||
tier: 'standard' as const,
|
||||
hasReasoning: true,
|
||||
},
|
||||
{
|
||||
id: CODEX_MODEL_MAP.gpt5CodexMini,
|
||||
name: 'GPT-5-Codex-Mini',
|
||||
modelString: CODEX_MODEL_MAP.gpt5CodexMini,
|
||||
provider: 'openai',
|
||||
description: 'Faster workflows optimized for low-latency code Q&A and editing.',
|
||||
contextWindow: CONTEXT_WINDOW_128K,
|
||||
maxOutputTokens: MAX_OUTPUT_16K,
|
||||
supportsVision: false,
|
||||
supportsTools: true,
|
||||
tier: 'basic' as const,
|
||||
hasReasoning: false,
|
||||
},
|
||||
{
|
||||
id: CODEX_MODEL_MAP.codex1,
|
||||
name: 'Codex-1',
|
||||
modelString: CODEX_MODEL_MAP.codex1,
|
||||
provider: 'openai',
|
||||
description: 'Version of o3 optimized for software engineering with advanced reasoning.',
|
||||
contextWindow: CONTEXT_WINDOW_200K,
|
||||
description: 'Optimized for long-horizon, agentic coding tasks in Codex.',
|
||||
contextWindow: CONTEXT_WINDOW_256K,
|
||||
maxOutputTokens: MAX_OUTPUT_32K,
|
||||
supportsVision: true,
|
||||
supportsTools: true,
|
||||
@@ -72,27 +47,40 @@ export const CODEX_MODELS: ModelDefinition[] = [
|
||||
hasReasoning: true,
|
||||
},
|
||||
{
|
||||
id: CODEX_MODEL_MAP.codexMiniLatest,
|
||||
name: 'Codex-Mini-Latest',
|
||||
modelString: CODEX_MODEL_MAP.codexMiniLatest,
|
||||
id: CODEX_MODEL_MAP.gpt51CodexMini,
|
||||
name: 'GPT-5.1-Codex-Mini',
|
||||
modelString: CODEX_MODEL_MAP.gpt51CodexMini,
|
||||
provider: 'openai',
|
||||
description: 'Version of o4-mini designed for Codex with faster workflows.',
|
||||
description: 'Smaller, more cost-effective version for faster workflows.',
|
||||
contextWindow: CONTEXT_WINDOW_128K,
|
||||
maxOutputTokens: MAX_OUTPUT_16K,
|
||||
supportsVision: true,
|
||||
supportsTools: true,
|
||||
tier: 'standard' as const,
|
||||
tier: 'basic' as const,
|
||||
hasReasoning: false,
|
||||
},
|
||||
|
||||
// ========== Base GPT-5 Model ==========
|
||||
// ========== General-Purpose GPT Models ==========
|
||||
{
|
||||
id: CODEX_MODEL_MAP.gpt5,
|
||||
name: 'GPT-5',
|
||||
modelString: CODEX_MODEL_MAP.gpt5,
|
||||
id: CODEX_MODEL_MAP.gpt52,
|
||||
name: 'GPT-5.2',
|
||||
modelString: CODEX_MODEL_MAP.gpt52,
|
||||
provider: 'openai',
|
||||
description: 'GPT-5 base flagship model with strong general-purpose capabilities.',
|
||||
contextWindow: CONTEXT_WINDOW_200K,
|
||||
description: 'Best general agentic model for tasks across industries and domains.',
|
||||
contextWindow: CONTEXT_WINDOW_256K,
|
||||
maxOutputTokens: MAX_OUTPUT_32K,
|
||||
supportsVision: true,
|
||||
supportsTools: true,
|
||||
tier: 'standard' as const,
|
||||
hasReasoning: true,
|
||||
},
|
||||
{
|
||||
id: CODEX_MODEL_MAP.gpt51,
|
||||
name: 'GPT-5.1',
|
||||
modelString: CODEX_MODEL_MAP.gpt51,
|
||||
provider: 'openai',
|
||||
description: 'Great for coding and agentic tasks across domains.',
|
||||
contextWindow: CONTEXT_WINDOW_256K,
|
||||
maxOutputTokens: MAX_OUTPUT_32K,
|
||||
supportsVision: true,
|
||||
supportsTools: true,
|
||||
|
||||
@@ -31,6 +31,7 @@ import type {
|
||||
import {
|
||||
CODEX_MODEL_MAP,
|
||||
supportsReasoningEffort,
|
||||
validateBareModelId,
|
||||
type CodexApprovalPolicy,
|
||||
type CodexSandboxMode,
|
||||
type CodexAuthStatus,
|
||||
@@ -61,6 +62,7 @@ const CODEX_ADD_DIR_FLAG = '--add-dir';
|
||||
const CODEX_SKIP_GIT_REPO_CHECK_FLAG = '--skip-git-repo-check';
|
||||
const CODEX_RESUME_FLAG = 'resume';
|
||||
const CODEX_REASONING_EFFORT_KEY = 'reasoning_effort';
|
||||
const CODEX_YOLO_FLAG = '--dangerously-bypass-approvals-and-sandbox';
|
||||
const OPENAI_API_KEY_ENV = 'OPENAI_API_KEY';
|
||||
const CODEX_EXECUTION_MODE_CLI = 'cli';
|
||||
const CODEX_EXECUTION_MODE_SDK = 'sdk';
|
||||
@@ -662,6 +664,10 @@ export class CodexProvider extends BaseProvider {
|
||||
}
|
||||
|
||||
async *executeQuery(options: ExecuteOptions): AsyncGenerator<ProviderMessage> {
|
||||
// Validate that model doesn't have a provider prefix
|
||||
// AgentService should strip prefixes before passing to providers
|
||||
validateBareModelId(options.model, 'CodexProvider');
|
||||
|
||||
try {
|
||||
const mcpServers = options.mcpServers ?? {};
|
||||
const hasMcpServers = Object.keys(mcpServers).length > 0;
|
||||
@@ -759,18 +765,15 @@ export class CodexProvider extends BaseProvider {
|
||||
}
|
||||
}
|
||||
|
||||
// Model is already bare (no prefix) - validated by executeQuery
|
||||
const args = [
|
||||
CODEX_EXEC_SUBCOMMAND,
|
||||
CODEX_YOLO_FLAG,
|
||||
CODEX_SKIP_GIT_REPO_CHECK_FLAG,
|
||||
...preExecArgs,
|
||||
CODEX_MODEL_FLAG,
|
||||
options.model,
|
||||
CODEX_JSON_FLAG,
|
||||
CODEX_SANDBOX_FLAG,
|
||||
resolvedSandboxMode,
|
||||
...(outputSchemaPath ? [CODEX_OUTPUT_SCHEMA_FLAG, outputSchemaPath] : []),
|
||||
...(imagePaths.length > 0 ? [CODEX_IMAGE_FLAG, imagePaths.join(',')] : []),
|
||||
...configOverrides,
|
||||
'-', // Read prompt from stdin to avoid shell escaping issues
|
||||
];
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ import type {
|
||||
ModelDefinition,
|
||||
ContentBlock,
|
||||
} from './types.js';
|
||||
import { stripProviderPrefix } from '@automaker/types';
|
||||
import { validateBareModelId } from '@automaker/types';
|
||||
import { validateApiKey } from '../lib/auth-utils.js';
|
||||
import { getEffectivePermissions } from '../services/cursor-config-service.js';
|
||||
import {
|
||||
@@ -317,8 +317,8 @@ export class CursorProvider extends CliProvider {
|
||||
}
|
||||
|
||||
buildCliArgs(options: ExecuteOptions): string[] {
|
||||
// Extract model (strip 'cursor-' prefix if present)
|
||||
const model = stripProviderPrefix(options.model || 'auto');
|
||||
// Model is already bare (no prefix) - validated by executeQuery
|
||||
const model = options.model || 'auto';
|
||||
|
||||
// Build CLI arguments for cursor-agent
|
||||
// NOTE: Prompt is NOT included here - it's passed via stdin to avoid
|
||||
@@ -649,6 +649,10 @@ export class CursorProvider extends CliProvider {
|
||||
async *executeQuery(options: ExecuteOptions): AsyncGenerator<ProviderMessage> {
|
||||
this.ensureCliDetected();
|
||||
|
||||
// Validate that model doesn't have a provider prefix
|
||||
// AgentService should strip prefixes before passing to providers
|
||||
validateBareModelId(options.model, 'CursorProvider');
|
||||
|
||||
if (!this.cliPath) {
|
||||
throw this.createError(
|
||||
CursorErrorCode.NOT_INSTALLED,
|
||||
|
||||
Reference in New Issue
Block a user