refactor: restrict model checks to gpt-* for OpenAI/Codex models

- Updated model resolution logic to only check for gpt-* models, removing references to unsupported o1/o3 models in both model-resolver and provider-factory files.
- Enhanced comments for clarity regarding model support in Codex CLI.
This commit is contained in:
Kacper
2025-12-13 13:12:04 +01:00
parent f71533ab17
commit 0473b35db3
2 changed files with 5 additions and 3 deletions

View File

@@ -42,7 +42,8 @@ export function resolveModelString(
}
// OpenAI/Codex models - pass through unchanged
if (modelKey.startsWith("gpt-") || modelKey.startsWith("o")) {
// Only check for gpt-* models (Codex CLI doesn't support o1/o3)
if (modelKey.startsWith("gpt-")) {
console.log(`[ModelResolver] Using OpenAI/Codex model: ${modelKey}`);
return modelKey;
}

View File

@@ -21,8 +21,9 @@ export class ProviderFactory {
static getProviderForModel(modelId: string): BaseProvider {
const lowerModel = modelId.toLowerCase();
// OpenAI/Codex models (gpt-*, o1, o3, etc.)
if (lowerModel.startsWith("gpt-") || lowerModel.startsWith("o")) {
// OpenAI/Codex models (gpt-*)
// Note: o1/o3 models are not supported by Codex CLI
if (lowerModel.startsWith("gpt-")) {
return new CodexProvider();
}