diff --git a/apps/server/src/lib/model-resolver.ts b/apps/server/src/lib/model-resolver.ts index f524dbdf..d4e12756 100644 --- a/apps/server/src/lib/model-resolver.ts +++ b/apps/server/src/lib/model-resolver.ts @@ -42,7 +42,8 @@ export function resolveModelString( } // OpenAI/Codex models - pass through unchanged - if (modelKey.startsWith("gpt-") || modelKey.startsWith("o")) { + // Only check for gpt-* models (Codex CLI doesn't support o1/o3) + if (modelKey.startsWith("gpt-")) { console.log(`[ModelResolver] Using OpenAI/Codex model: ${modelKey}`); return modelKey; } diff --git a/apps/server/src/providers/provider-factory.ts b/apps/server/src/providers/provider-factory.ts index e39eb65f..824a20f4 100644 --- a/apps/server/src/providers/provider-factory.ts +++ b/apps/server/src/providers/provider-factory.ts @@ -21,8 +21,9 @@ export class ProviderFactory { static getProviderForModel(modelId: string): BaseProvider { const lowerModel = modelId.toLowerCase(); - // OpenAI/Codex models (gpt-*, o1, o3, etc.) - if (lowerModel.startsWith("gpt-") || lowerModel.startsWith("o")) { + // OpenAI/Codex models (gpt-*) + // Note: o1/o3 models are not supported by Codex CLI + if (lowerModel.startsWith("gpt-")) { return new CodexProvider(); }