refactor: restrict model checks to gpt-* for OpenAI/Codex models

- Updated model resolution logic to only check for gpt-* models, removing references to unsupported o1/o3 models in both model-resolver and provider-factory files.
- Enhanced comments for clarity regarding model support in Codex CLI.
This commit is contained in:
Kacper
2025-12-13 13:12:04 +01:00
parent f71533ab17
commit 0473b35db3
2 changed files with 5 additions and 3 deletions

View File

@@ -21,8 +21,9 @@ export class ProviderFactory {
static getProviderForModel(modelId: string): BaseProvider {
const lowerModel = modelId.toLowerCase();
// OpenAI/Codex models (gpt-*, o1, o3, etc.)
if (lowerModel.startsWith("gpt-") || lowerModel.startsWith("o")) {
// OpenAI/Codex models (gpt-*)
// Note: o1/o3 models are not supported by Codex CLI
if (lowerModel.startsWith("gpt-")) {
return new CodexProvider();
}