mirror of
https://github.com/AutoMaker-Org/automaker.git
synced 2026-02-02 20:43:36 +00:00
feat: add Codex CLI detection and model management APIs
- Implemented IPC handlers for checking Codex CLI installation status, retrieving available models, and checking provider statuses. - Enhanced the SettingsView to include OpenAI API key management and connection testing. - Updated the feature executor to support multiple model providers (Claude and Codex), allowing for improved flexibility in feature execution. - Introduced utility functions to determine model types and support for thinking controls. This update enhances the application's capabilities by integrating Codex CLI support and improving model management, providing users with a more robust experience. 🤖 Generated with [Claude Code](https://claude.com/claude-code)
This commit is contained in:
@@ -569,3 +569,82 @@ ipcMain.handle("claude:check-cli", async () => {
|
||||
return { success: false, error: error.message };
|
||||
}
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Codex CLI Detection IPC Handlers
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Check Codex CLI installation status
|
||||
*/
|
||||
ipcMain.handle("codex:check-cli", async () => {
|
||||
try {
|
||||
const codexCliDetector = require("./services/codex-cli-detector");
|
||||
const info = codexCliDetector.getInstallationInfo();
|
||||
return { success: true, ...info };
|
||||
} catch (error) {
|
||||
console.error("[IPC] codex:check-cli error:", error);
|
||||
return { success: false, error: error.message };
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Get all available models from all providers
|
||||
*/
|
||||
ipcMain.handle("model:get-available", async () => {
|
||||
try {
|
||||
const { ModelProviderFactory } = require("./services/model-provider");
|
||||
const models = ModelProviderFactory.getAllModels();
|
||||
return { success: true, models };
|
||||
} catch (error) {
|
||||
console.error("[IPC] model:get-available error:", error);
|
||||
return { success: false, error: error.message };
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Check all provider installation status
|
||||
*/
|
||||
ipcMain.handle("model:check-providers", async () => {
|
||||
try {
|
||||
const { ModelProviderFactory } = require("./services/model-provider");
|
||||
const status = await ModelProviderFactory.checkAllProviders();
|
||||
return { success: true, providers: status };
|
||||
} catch (error) {
|
||||
console.error("[IPC] model:check-providers error:", error);
|
||||
return { success: false, error: error.message };
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Test OpenAI API connection
|
||||
*/
|
||||
ipcMain.handle("openai:test-connection", async (_, { apiKey }) => {
|
||||
try {
|
||||
// Simple test using fetch to OpenAI API
|
||||
const response = await fetch("https://api.openai.com/v1/models", {
|
||||
method: "GET",
|
||||
headers: {
|
||||
"Authorization": `Bearer ${apiKey || process.env.OPENAI_API_KEY}`,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
});
|
||||
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
return {
|
||||
success: true,
|
||||
message: `Connected successfully. Found ${data.data?.length || 0} models.`
|
||||
};
|
||||
} else {
|
||||
const error = await response.json();
|
||||
return {
|
||||
success: false,
|
||||
error: error.error?.message || "Failed to connect to OpenAI API"
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("[IPC] openai:test-connection error:", error);
|
||||
return { success: false, error: error.message };
|
||||
}
|
||||
});
|
||||
|
||||
@@ -141,6 +141,22 @@ contextBridge.exposeInMainWorld("electronAPI", {
|
||||
|
||||
// Claude CLI Detection API
|
||||
checkClaudeCli: () => ipcRenderer.invoke("claude:check-cli"),
|
||||
|
||||
// Codex CLI Detection API
|
||||
checkCodexCli: () => ipcRenderer.invoke("codex:check-cli"),
|
||||
|
||||
// Model Management APIs
|
||||
model: {
|
||||
// Get all available models from all providers
|
||||
getAvailable: () => ipcRenderer.invoke("model:get-available"),
|
||||
|
||||
// Check all provider installation status
|
||||
checkProviders: () => ipcRenderer.invoke("model:check-providers"),
|
||||
},
|
||||
|
||||
// OpenAI API
|
||||
testOpenAIConnection: (apiKey) =>
|
||||
ipcRenderer.invoke("openai:test-connection", { apiKey }),
|
||||
});
|
||||
|
||||
// Also expose a flag to detect if we're in Electron
|
||||
|
||||
232
app/electron/services/codex-cli-detector.js
Normal file
232
app/electron/services/codex-cli-detector.js
Normal file
@@ -0,0 +1,232 @@
|
||||
const { execSync } = require('child_process');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const os = require('os');
|
||||
|
||||
/**
|
||||
* Codex CLI Detector - Checks if OpenAI Codex CLI is installed
|
||||
*
|
||||
* Codex CLI is OpenAI's agent CLI tool that allows users to use
|
||||
* GPT-5.1 Codex models (gpt-5.1-codex-max, gpt-5.1-codex, etc.)
|
||||
* for code generation and agentic tasks.
|
||||
*/
|
||||
class CodexCliDetector {
|
||||
/**
|
||||
* Check if Codex CLI is installed and accessible
|
||||
* @returns {Object} { installed: boolean, path: string|null, version: string|null, method: 'cli'|'npm'|'brew'|'none' }
|
||||
*/
|
||||
static detectCodexInstallation() {
|
||||
try {
|
||||
// Method 1: Check if 'codex' command is in PATH
|
||||
try {
|
||||
const codexPath = execSync('which codex 2>/dev/null', { encoding: 'utf-8' }).trim();
|
||||
if (codexPath) {
|
||||
const version = this.getCodexVersion(codexPath);
|
||||
return {
|
||||
installed: true,
|
||||
path: codexPath,
|
||||
version: version,
|
||||
method: 'cli'
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
// CLI not in PATH, continue checking other methods
|
||||
}
|
||||
|
||||
// Method 2: Check for npm global installation
|
||||
try {
|
||||
const npmListOutput = execSync('npm list -g @openai/codex --depth=0 2>/dev/null', { encoding: 'utf-8' });
|
||||
if (npmListOutput && npmListOutput.includes('@openai/codex')) {
|
||||
// Get the path from npm bin
|
||||
const npmBinPath = execSync('npm bin -g', { encoding: 'utf-8' }).trim();
|
||||
const codexPath = path.join(npmBinPath, 'codex');
|
||||
const version = this.getCodexVersion(codexPath);
|
||||
return {
|
||||
installed: true,
|
||||
path: codexPath,
|
||||
version: version,
|
||||
method: 'npm'
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
// npm global not found
|
||||
}
|
||||
|
||||
// Method 3: Check for Homebrew installation on macOS
|
||||
if (process.platform === 'darwin') {
|
||||
try {
|
||||
const brewList = execSync('brew list --formula 2>/dev/null', { encoding: 'utf-8' });
|
||||
if (brewList.includes('codex')) {
|
||||
const brewPrefixOutput = execSync('brew --prefix codex 2>/dev/null', { encoding: 'utf-8' }).trim();
|
||||
const codexPath = path.join(brewPrefixOutput, 'bin', 'codex');
|
||||
const version = this.getCodexVersion(codexPath);
|
||||
return {
|
||||
installed: true,
|
||||
path: codexPath,
|
||||
version: version,
|
||||
method: 'brew'
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
// Homebrew not found or codex not installed via brew
|
||||
}
|
||||
}
|
||||
|
||||
// Method 4: Check Windows path
|
||||
if (process.platform === 'win32') {
|
||||
try {
|
||||
const codexPath = execSync('where codex 2>nul', { encoding: 'utf-8' }).trim().split('\n')[0];
|
||||
if (codexPath) {
|
||||
const version = this.getCodexVersion(codexPath);
|
||||
return {
|
||||
installed: true,
|
||||
path: codexPath,
|
||||
version: version,
|
||||
method: 'cli'
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
// Not found on Windows
|
||||
}
|
||||
}
|
||||
|
||||
// Method 5: Check common installation paths
|
||||
const commonPaths = [
|
||||
path.join(os.homedir(), '.local', 'bin', 'codex'),
|
||||
path.join(os.homedir(), '.npm-global', 'bin', 'codex'),
|
||||
'/usr/local/bin/codex',
|
||||
'/opt/homebrew/bin/codex',
|
||||
];
|
||||
|
||||
for (const checkPath of commonPaths) {
|
||||
if (fs.existsSync(checkPath)) {
|
||||
const version = this.getCodexVersion(checkPath);
|
||||
return {
|
||||
installed: true,
|
||||
path: checkPath,
|
||||
version: version,
|
||||
method: 'cli'
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Method 6: Check if OPENAI_API_KEY is set (can use Codex API directly)
|
||||
if (process.env.OPENAI_API_KEY) {
|
||||
return {
|
||||
installed: false,
|
||||
path: null,
|
||||
version: null,
|
||||
method: 'api-key-only',
|
||||
hasApiKey: true
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
installed: false,
|
||||
path: null,
|
||||
version: null,
|
||||
method: 'none'
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('[CodexCliDetector] Error detecting Codex installation:', error);
|
||||
return {
|
||||
installed: false,
|
||||
path: null,
|
||||
version: null,
|
||||
method: 'none',
|
||||
error: error.message
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get Codex CLI version from executable path
|
||||
* @param {string} codexPath Path to codex executable
|
||||
* @returns {string|null} Version string or null
|
||||
*/
|
||||
static getCodexVersion(codexPath) {
|
||||
try {
|
||||
const version = execSync(`"${codexPath}" --version 2>/dev/null`, { encoding: 'utf-8' }).trim();
|
||||
return version || null;
|
||||
} catch (error) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get installation info and recommendations
|
||||
* @returns {Object} Installation status and recommendations
|
||||
*/
|
||||
static getInstallationInfo() {
|
||||
const detection = this.detectCodexInstallation();
|
||||
|
||||
if (detection.installed) {
|
||||
return {
|
||||
status: 'installed',
|
||||
method: detection.method,
|
||||
version: detection.version,
|
||||
path: detection.path,
|
||||
recommendation: detection.method === 'cli'
|
||||
? 'Using Codex CLI - ready for GPT-5.1 Codex models'
|
||||
: `Using Codex CLI via ${detection.method} - ready for GPT-5.1 Codex models`
|
||||
};
|
||||
}
|
||||
|
||||
// Not installed but has API key
|
||||
if (detection.method === 'api-key-only') {
|
||||
return {
|
||||
status: 'api_key_only',
|
||||
method: 'api-key-only',
|
||||
recommendation: 'OPENAI_API_KEY detected but Codex CLI not installed. Install Codex CLI for full agentic capabilities.',
|
||||
installCommands: this.getInstallCommands()
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
status: 'not_installed',
|
||||
recommendation: 'Install OpenAI Codex CLI to use GPT-5.1 Codex models for agentic tasks',
|
||||
installCommands: this.getInstallCommands()
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get installation commands for different platforms
|
||||
* @returns {Object} Installation commands by platform
|
||||
*/
|
||||
static getInstallCommands() {
|
||||
return {
|
||||
npm: 'npm install -g @openai/codex@latest',
|
||||
macos: 'brew install codex',
|
||||
linux: 'npm install -g @openai/codex@latest',
|
||||
windows: 'npm install -g @openai/codex@latest'
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if Codex CLI supports a specific model
|
||||
* @param {string} model Model name to check
|
||||
* @returns {boolean} Whether the model is supported
|
||||
*/
|
||||
static isModelSupported(model) {
|
||||
const supportedModels = [
|
||||
'gpt-5.1-codex-max',
|
||||
'gpt-5.1-codex',
|
||||
'gpt-5.1-codex-mini',
|
||||
'gpt-5.1',
|
||||
'o3',
|
||||
'o3-mini',
|
||||
'o4-mini'
|
||||
];
|
||||
return supportedModels.includes(model);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get default model for Codex CLI
|
||||
* @returns {string} Default model name
|
||||
*/
|
||||
static getDefaultModel() {
|
||||
return 'gpt-5.1-codex-max';
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = CodexCliDetector;
|
||||
585
app/electron/services/codex-executor.js
Normal file
585
app/electron/services/codex-executor.js
Normal file
@@ -0,0 +1,585 @@
|
||||
/**
|
||||
* Codex CLI Execution Wrapper
|
||||
*
|
||||
* This module handles spawning and managing Codex CLI processes
|
||||
* for executing OpenAI model queries.
|
||||
*/
|
||||
|
||||
const { spawn } = require('child_process');
|
||||
const { EventEmitter } = require('events');
|
||||
const readline = require('readline');
|
||||
const CodexCliDetector = require('./codex-cli-detector');
|
||||
|
||||
/**
|
||||
* Message types from Codex CLI JSON output
|
||||
*/
|
||||
const CODEX_EVENT_TYPES = {
|
||||
THREAD_STARTED: 'thread.started',
|
||||
ITEM_STARTED: 'item.started',
|
||||
ITEM_COMPLETED: 'item.completed',
|
||||
THREAD_COMPLETED: 'thread.completed',
|
||||
ERROR: 'error'
|
||||
};
|
||||
|
||||
/**
|
||||
* Codex Executor - Manages Codex CLI process execution
|
||||
*/
|
||||
class CodexExecutor extends EventEmitter {
|
||||
constructor() {
|
||||
super();
|
||||
this.currentProcess = null;
|
||||
this.codexPath = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find and cache the Codex CLI path
|
||||
* @returns {string|null} Path to codex executable
|
||||
*/
|
||||
findCodexPath() {
|
||||
if (this.codexPath) {
|
||||
return this.codexPath;
|
||||
}
|
||||
|
||||
const installation = CodexCliDetector.detectCodexInstallation();
|
||||
if (installation.installed && installation.path) {
|
||||
this.codexPath = installation.path;
|
||||
return this.codexPath;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a Codex CLI query
|
||||
* @param {Object} options Execution options
|
||||
* @param {string} options.prompt The prompt to execute
|
||||
* @param {string} options.model Model to use (default: gpt-5.1-codex-max)
|
||||
* @param {string} options.cwd Working directory
|
||||
* @param {string} options.systemPrompt System prompt (optional, will be prepended to prompt)
|
||||
* @param {number} options.maxTurns Not used - Codex CLI doesn't support this parameter
|
||||
* @param {string[]} options.allowedTools Not used - Codex CLI doesn't support this parameter
|
||||
* @param {Object} options.env Environment variables
|
||||
* @returns {AsyncGenerator} Generator yielding messages
|
||||
*/
|
||||
async *execute(options) {
|
||||
const {
|
||||
prompt,
|
||||
model = 'gpt-5.1-codex-max',
|
||||
cwd = process.cwd(),
|
||||
systemPrompt,
|
||||
maxTurns, // Not used by Codex CLI
|
||||
allowedTools, // Not used by Codex CLI
|
||||
env = {}
|
||||
} = options;
|
||||
|
||||
const codexPath = this.findCodexPath();
|
||||
if (!codexPath) {
|
||||
yield {
|
||||
type: 'error',
|
||||
error: 'Codex CLI not found. Please install it with: npm install -g @openai/codex@latest'
|
||||
};
|
||||
return;
|
||||
}
|
||||
|
||||
// Combine system prompt with main prompt if provided
|
||||
// Codex CLI doesn't support --system-prompt argument, so we prepend it to the prompt
|
||||
let combinedPrompt = prompt;
|
||||
console.log('[CodexExecutor] Original prompt length:', prompt?.length || 0);
|
||||
if (systemPrompt) {
|
||||
combinedPrompt = `${systemPrompt}\n\n---\n\n${prompt}`;
|
||||
console.log('[CodexExecutor] System prompt prepended to main prompt');
|
||||
console.log('[CodexExecutor] System prompt length:', systemPrompt.length);
|
||||
console.log('[CodexExecutor] Combined prompt length:', combinedPrompt.length);
|
||||
}
|
||||
|
||||
// Build command arguments
|
||||
// Note: maxTurns and allowedTools are not supported by Codex CLI
|
||||
console.log('[CodexExecutor] Building command arguments...');
|
||||
const args = this.buildArgs({
|
||||
prompt: combinedPrompt,
|
||||
model
|
||||
});
|
||||
|
||||
console.log('[CodexExecutor] Executing command:', codexPath);
|
||||
console.log('[CodexExecutor] Number of args:', args.length);
|
||||
console.log('[CodexExecutor] Args (without prompt):', args.slice(0, -1).join(' '));
|
||||
console.log('[CodexExecutor] Prompt length in args:', args[args.length - 1]?.length || 0);
|
||||
console.log('[CodexExecutor] Prompt preview (first 200 chars):', args[args.length - 1]?.substring(0, 200));
|
||||
console.log('[CodexExecutor] Working directory:', cwd);
|
||||
|
||||
// Spawn the process
|
||||
const processEnv = {
|
||||
...process.env,
|
||||
...env,
|
||||
// Ensure OPENAI_API_KEY is available
|
||||
OPENAI_API_KEY: env.OPENAI_API_KEY || process.env.OPENAI_API_KEY
|
||||
};
|
||||
|
||||
// Log API key status (without exposing the key)
|
||||
if (processEnv.OPENAI_API_KEY) {
|
||||
console.log('[CodexExecutor] OPENAI_API_KEY is set (length:', processEnv.OPENAI_API_KEY.length, ')');
|
||||
} else {
|
||||
console.warn('[CodexExecutor] WARNING: OPENAI_API_KEY is not set!');
|
||||
}
|
||||
|
||||
console.log('[CodexExecutor] Spawning process...');
|
||||
const proc = spawn(codexPath, args, {
|
||||
cwd,
|
||||
env: processEnv,
|
||||
stdio: ['pipe', 'pipe', 'pipe']
|
||||
});
|
||||
|
||||
this.currentProcess = proc;
|
||||
console.log('[CodexExecutor] Process spawned with PID:', proc.pid);
|
||||
|
||||
// Track process events
|
||||
proc.on('error', (error) => {
|
||||
console.error('[CodexExecutor] Process error:', error);
|
||||
});
|
||||
|
||||
proc.on('spawn', () => {
|
||||
console.log('[CodexExecutor] Process spawned successfully');
|
||||
});
|
||||
|
||||
// Collect stderr output as it comes in
|
||||
let stderr = '';
|
||||
let hasOutput = false;
|
||||
let stdoutChunks = [];
|
||||
let stderrChunks = [];
|
||||
|
||||
proc.stderr.on('data', (data) => {
|
||||
const errorText = data.toString();
|
||||
stderr += errorText;
|
||||
stderrChunks.push(errorText);
|
||||
hasOutput = true;
|
||||
console.error('[CodexExecutor] stderr chunk received (', data.length, 'bytes):', errorText.substring(0, 200));
|
||||
});
|
||||
|
||||
proc.stderr.on('end', () => {
|
||||
console.log('[CodexExecutor] stderr stream ended. Total chunks:', stderrChunks.length, 'Total length:', stderr.length);
|
||||
});
|
||||
|
||||
proc.stdout.on('data', (data) => {
|
||||
const text = data.toString();
|
||||
stdoutChunks.push(text);
|
||||
hasOutput = true;
|
||||
console.log('[CodexExecutor] stdout chunk received (', data.length, 'bytes):', text.substring(0, 200));
|
||||
});
|
||||
|
||||
proc.stdout.on('end', () => {
|
||||
console.log('[CodexExecutor] stdout stream ended. Total chunks:', stdoutChunks.length);
|
||||
});
|
||||
|
||||
// Create readline interface for parsing JSONL output
|
||||
console.log('[CodexExecutor] Creating readline interface...');
|
||||
const rl = readline.createInterface({
|
||||
input: proc.stdout,
|
||||
crlfDelay: Infinity
|
||||
});
|
||||
|
||||
// Track accumulated content for converting to Claude format
|
||||
let accumulatedText = '';
|
||||
let toolUses = [];
|
||||
let lastOutputTime = Date.now();
|
||||
const OUTPUT_TIMEOUT = 30000; // 30 seconds timeout for no output
|
||||
let lineCount = 0;
|
||||
let jsonParseErrors = 0;
|
||||
|
||||
// Set up timeout check
|
||||
const checkTimeout = setInterval(() => {
|
||||
const timeSinceLastOutput = Date.now() - lastOutputTime;
|
||||
if (timeSinceLastOutput > OUTPUT_TIMEOUT && !hasOutput) {
|
||||
console.warn('[CodexExecutor] No output received for', timeSinceLastOutput, 'ms. Process still alive:', !proc.killed);
|
||||
}
|
||||
}, 5000);
|
||||
|
||||
console.log('[CodexExecutor] Starting to read lines from stdout...');
|
||||
|
||||
// Process stdout line by line (JSONL format)
|
||||
try {
|
||||
for await (const line of rl) {
|
||||
hasOutput = true;
|
||||
lastOutputTime = Date.now();
|
||||
lineCount++;
|
||||
|
||||
console.log('[CodexExecutor] Line', lineCount, 'received (length:', line.length, '):', line.substring(0, 100));
|
||||
|
||||
if (!line.trim()) {
|
||||
console.log('[CodexExecutor] Skipping empty line');
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
const event = JSON.parse(line);
|
||||
console.log('[CodexExecutor] Successfully parsed JSON event. Type:', event.type, 'Keys:', Object.keys(event));
|
||||
|
||||
const convertedMsg = this.convertToClaudeFormat(event);
|
||||
console.log('[CodexExecutor] Converted message:', convertedMsg ? { type: convertedMsg.type } : 'null');
|
||||
|
||||
if (convertedMsg) {
|
||||
// Accumulate text content
|
||||
if (convertedMsg.type === 'assistant' && convertedMsg.message?.content) {
|
||||
for (const block of convertedMsg.message.content) {
|
||||
if (block.type === 'text') {
|
||||
accumulatedText += block.text;
|
||||
console.log('[CodexExecutor] Accumulated text block (total length:', accumulatedText.length, ')');
|
||||
} else if (block.type === 'tool_use') {
|
||||
toolUses.push(block);
|
||||
console.log('[CodexExecutor] Tool use detected:', block.name);
|
||||
}
|
||||
}
|
||||
}
|
||||
console.log('[CodexExecutor] Yielding message of type:', convertedMsg.type);
|
||||
yield convertedMsg;
|
||||
} else {
|
||||
console.log('[CodexExecutor] Converted message is null, skipping');
|
||||
}
|
||||
} catch (parseError) {
|
||||
jsonParseErrors++;
|
||||
// Non-JSON output, yield as text
|
||||
console.log('[CodexExecutor] JSON parse error (', jsonParseErrors, 'total):', parseError.message);
|
||||
console.log('[CodexExecutor] Non-JSON line content:', line.substring(0, 200));
|
||||
yield {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{ type: 'text', text: line + '\n' }]
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
console.log('[CodexExecutor] Finished reading all lines. Total lines:', lineCount, 'JSON errors:', jsonParseErrors);
|
||||
} catch (readError) {
|
||||
console.error('[CodexExecutor] Error reading from readline:', readError);
|
||||
throw readError;
|
||||
} finally {
|
||||
clearInterval(checkTimeout);
|
||||
console.log('[CodexExecutor] Cleaned up timeout checker');
|
||||
}
|
||||
|
||||
// Handle process completion
|
||||
console.log('[CodexExecutor] Waiting for process to close...');
|
||||
const exitCode = await new Promise((resolve) => {
|
||||
proc.on('close', (code, signal) => {
|
||||
console.log('[CodexExecutor] Process closed with code:', code, 'signal:', signal);
|
||||
resolve(code);
|
||||
});
|
||||
});
|
||||
|
||||
this.currentProcess = null;
|
||||
console.log('[CodexExecutor] Process completed. Exit code:', exitCode, 'Has output:', hasOutput, 'Stderr length:', stderr.length);
|
||||
|
||||
// Wait a bit for any remaining stderr data to be collected
|
||||
console.log('[CodexExecutor] Waiting 200ms for any remaining stderr data...');
|
||||
await new Promise(resolve => setTimeout(resolve, 200));
|
||||
console.log('[CodexExecutor] Final stderr length:', stderr.length, 'Final stdout chunks:', stdoutChunks.length);
|
||||
|
||||
if (exitCode !== 0) {
|
||||
const errorMessage = stderr.trim()
|
||||
? `Codex CLI exited with code ${exitCode}.\n\nError output:\n${stderr}`
|
||||
: `Codex CLI exited with code ${exitCode}. No error output captured.`;
|
||||
|
||||
console.error('[CodexExecutor] Process failed with exit code', exitCode);
|
||||
console.error('[CodexExecutor] Error message:', errorMessage);
|
||||
console.error('[CodexExecutor] Stderr chunks:', stderrChunks.length, 'Stdout chunks:', stdoutChunks.length);
|
||||
|
||||
yield {
|
||||
type: 'error',
|
||||
error: errorMessage
|
||||
};
|
||||
} else if (!hasOutput && !stderr) {
|
||||
// Process exited successfully but produced no output - might be API key issue
|
||||
const warningMessage = 'Codex CLI completed but produced no output. This might indicate:\n' +
|
||||
'- Missing or invalid OPENAI_API_KEY\n' +
|
||||
'- Codex CLI configuration issue\n' +
|
||||
'- The process completed without generating any response\n\n' +
|
||||
`Debug info: Exit code ${exitCode}, stdout chunks: ${stdoutChunks.length}, stderr chunks: ${stderrChunks.length}, lines read: ${lineCount}`;
|
||||
|
||||
console.warn('[CodexExecutor] No output detected:', warningMessage);
|
||||
console.warn('[CodexExecutor] Stdout chunks:', stdoutChunks);
|
||||
console.warn('[CodexExecutor] Stderr chunks:', stderrChunks);
|
||||
|
||||
yield {
|
||||
type: 'error',
|
||||
error: warningMessage
|
||||
};
|
||||
} else {
|
||||
console.log('[CodexExecutor] Process completed successfully. Exit code:', exitCode, 'Lines processed:', lineCount);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build command arguments for Codex CLI
|
||||
* Only includes supported arguments based on Codex CLI help:
|
||||
* - --model: Model to use
|
||||
* - --json: JSON output format
|
||||
* - --full-auto: Non-interactive automatic execution
|
||||
*
|
||||
* Note: Codex CLI does NOT support:
|
||||
* - --system-prompt (system prompt is prepended to main prompt)
|
||||
* - --max-turns (not available in CLI)
|
||||
* - --tools (not available in CLI)
|
||||
*
|
||||
* @param {Object} options Options
|
||||
* @returns {string[]} Command arguments
|
||||
*/
|
||||
buildArgs(options) {
|
||||
const { prompt, model } = options;
|
||||
|
||||
console.log('[CodexExecutor] buildArgs called with model:', model, 'prompt length:', prompt?.length || 0);
|
||||
|
||||
const args = ['exec'];
|
||||
|
||||
// Add model (required for most use cases)
|
||||
if (model) {
|
||||
args.push('--model', model);
|
||||
console.log('[CodexExecutor] Added model argument:', model);
|
||||
}
|
||||
|
||||
// Add JSON output flag for structured parsing
|
||||
args.push('--json');
|
||||
console.log('[CodexExecutor] Added --json flag');
|
||||
|
||||
// Add full-auto mode (non-interactive)
|
||||
// This enables automatic execution with workspace-write sandbox
|
||||
args.push('--full-auto');
|
||||
console.log('[CodexExecutor] Added --full-auto flag');
|
||||
|
||||
// Add the prompt at the end
|
||||
args.push(prompt);
|
||||
console.log('[CodexExecutor] Added prompt (length:', prompt?.length || 0, ')');
|
||||
|
||||
console.log('[CodexExecutor] Final args count:', args.length);
|
||||
return args;
|
||||
}
|
||||
|
||||
/**
|
||||
* Map Claude tool names to Codex tool names
|
||||
* @param {string[]} tools Array of tool names
|
||||
* @returns {string[]} Mapped tool names
|
||||
*/
|
||||
mapToolsToCodex(tools) {
|
||||
const toolMap = {
|
||||
'Read': 'read',
|
||||
'Write': 'write',
|
||||
'Edit': 'edit',
|
||||
'Bash': 'bash',
|
||||
'Glob': 'glob',
|
||||
'Grep': 'grep',
|
||||
'WebSearch': 'web-search',
|
||||
'WebFetch': 'web-fetch'
|
||||
};
|
||||
|
||||
return tools
|
||||
.map(tool => toolMap[tool] || tool.toLowerCase())
|
||||
.filter(tool => tool); // Remove undefined
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert Codex JSONL event to Claude SDK message format
|
||||
* @param {Object} event Codex event object
|
||||
* @returns {Object|null} Claude-format message or null
|
||||
*/
|
||||
convertToClaudeFormat(event) {
|
||||
console.log('[CodexExecutor] Converting event:', JSON.stringify(event).substring(0, 200));
|
||||
const { type, data, item, thread_id } = event;
|
||||
|
||||
switch (type) {
|
||||
case CODEX_EVENT_TYPES.THREAD_STARTED:
|
||||
case 'thread.started':
|
||||
// Session initialization
|
||||
return {
|
||||
type: 'session_start',
|
||||
sessionId: thread_id || data?.thread_id || event.thread_id
|
||||
};
|
||||
|
||||
case CODEX_EVENT_TYPES.ITEM_COMPLETED:
|
||||
case 'item.completed':
|
||||
// Codex uses 'item' field, not 'data'
|
||||
return this.convertItemCompleted(item || data);
|
||||
|
||||
case CODEX_EVENT_TYPES.ITEM_STARTED:
|
||||
case 'item.started':
|
||||
// Convert item.started events - these indicate tool/command usage
|
||||
const startedItem = item || data;
|
||||
if (startedItem?.type === 'command_execution' && startedItem?.command) {
|
||||
return {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{
|
||||
type: 'tool_use',
|
||||
name: 'bash',
|
||||
input: { command: startedItem.command }
|
||||
}]
|
||||
}
|
||||
};
|
||||
}
|
||||
// For other item.started types, return null (we'll show the completed version)
|
||||
return null;
|
||||
|
||||
case CODEX_EVENT_TYPES.THREAD_COMPLETED:
|
||||
case 'thread.completed':
|
||||
return {
|
||||
type: 'complete',
|
||||
sessionId: thread_id || data?.thread_id || event.thread_id
|
||||
};
|
||||
|
||||
case CODEX_EVENT_TYPES.ERROR:
|
||||
case 'error':
|
||||
return {
|
||||
type: 'error',
|
||||
error: data?.message || item?.message || event.message || 'Unknown error from Codex CLI'
|
||||
};
|
||||
|
||||
case 'turn.started':
|
||||
// Turn started - just a marker, no need to convert
|
||||
return null;
|
||||
|
||||
default:
|
||||
// Pass through other events
|
||||
console.log('[CodexExecutor] Unhandled event type:', type);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert item.completed event to Claude format
|
||||
* @param {Object} item Event item data
|
||||
* @returns {Object|null} Claude-format message
|
||||
*/
|
||||
convertItemCompleted(item) {
|
||||
if (!item) {
|
||||
console.log('[CodexExecutor] convertItemCompleted: item is null/undefined');
|
||||
return null;
|
||||
}
|
||||
|
||||
const itemType = item.type || item.item_type;
|
||||
console.log('[CodexExecutor] convertItemCompleted: itemType =', itemType, 'item keys:', Object.keys(item));
|
||||
|
||||
switch (itemType) {
|
||||
case 'reasoning':
|
||||
// Thinking/reasoning output - Codex uses 'text' field
|
||||
const reasoningText = item.text || item.content || '';
|
||||
console.log('[CodexExecutor] Converting reasoning, text length:', reasoningText.length);
|
||||
return {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{
|
||||
type: 'thinking',
|
||||
thinking: reasoningText
|
||||
}]
|
||||
}
|
||||
};
|
||||
|
||||
case 'agent_message':
|
||||
case 'message':
|
||||
// Assistant text message
|
||||
const messageText = item.content || item.text || '';
|
||||
console.log('[CodexExecutor] Converting message, text length:', messageText.length);
|
||||
return {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{
|
||||
type: 'text',
|
||||
text: messageText
|
||||
}]
|
||||
}
|
||||
};
|
||||
|
||||
case 'command_execution':
|
||||
// Command execution - show both the command and its output
|
||||
const command = item.command || '';
|
||||
const output = item.aggregated_output || item.output || '';
|
||||
console.log('[CodexExecutor] Converting command_execution, command:', command.substring(0, 50), 'output length:', output.length);
|
||||
|
||||
// Return as text message showing the command and output
|
||||
return {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{
|
||||
type: 'text',
|
||||
text: `\`\`\`bash\n${command}\n\`\`\`\n\n${output}`
|
||||
}]
|
||||
}
|
||||
};
|
||||
|
||||
case 'tool_use':
|
||||
// Tool use
|
||||
return {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{
|
||||
type: 'tool_use',
|
||||
name: item.tool || item.command || 'unknown',
|
||||
input: item.input || item.args || {}
|
||||
}]
|
||||
}
|
||||
};
|
||||
|
||||
case 'tool_result':
|
||||
// Tool result
|
||||
return {
|
||||
type: 'tool_result',
|
||||
tool_use_id: item.tool_use_id,
|
||||
content: item.output || item.result
|
||||
};
|
||||
|
||||
case 'todo_list':
|
||||
// Todo list - convert to text format
|
||||
const todos = item.items || [];
|
||||
const todoText = todos.map((t, i) => `${i + 1}. ${t.text || t}`).join('\n');
|
||||
console.log('[CodexExecutor] Converting todo_list, items:', todos.length);
|
||||
return {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{
|
||||
type: 'text',
|
||||
text: `**Todo List:**\n${todoText}`
|
||||
}]
|
||||
}
|
||||
};
|
||||
|
||||
default:
|
||||
// Generic text output
|
||||
const text = item.text || item.content || item.aggregated_output;
|
||||
if (text) {
|
||||
console.log('[CodexExecutor] Converting default item type, text length:', text.length);
|
||||
return {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{
|
||||
type: 'text',
|
||||
text: String(text)
|
||||
}]
|
||||
}
|
||||
};
|
||||
}
|
||||
console.log('[CodexExecutor] convertItemCompleted: No text content found, returning null');
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Abort current execution
|
||||
*/
|
||||
abort() {
|
||||
if (this.currentProcess) {
|
||||
console.log('[CodexExecutor] Aborting current process');
|
||||
this.currentProcess.kill('SIGTERM');
|
||||
this.currentProcess = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if execution is in progress
|
||||
* @returns {boolean} Whether execution is in progress
|
||||
*/
|
||||
isRunning() {
|
||||
return this.currentProcess !== null;
|
||||
}
|
||||
}
|
||||
|
||||
// Singleton instance
|
||||
const codexExecutor = new CodexExecutor();
|
||||
|
||||
module.exports = codexExecutor;
|
||||
@@ -3,10 +3,12 @@ const promptBuilder = require("./prompt-builder");
|
||||
const contextManager = require("./context-manager");
|
||||
const featureLoader = require("./feature-loader");
|
||||
const mcpServerFactory = require("./mcp-server-factory");
|
||||
const { ModelRegistry } = require("./model-registry");
|
||||
const { ModelProviderFactory } = require("./model-provider");
|
||||
|
||||
// Model name mappings
|
||||
// Model name mappings for Claude (legacy - kept for backwards compatibility)
|
||||
const MODEL_MAP = {
|
||||
haiku: "claude-haiku-4-20250514",
|
||||
haiku: "claude-haiku-4-5",
|
||||
sonnet: "claude-sonnet-4-20250514",
|
||||
opus: "claude-opus-4-5-20251101",
|
||||
};
|
||||
@@ -23,20 +25,47 @@ const THINKING_BUDGET_MAP = {
|
||||
|
||||
/**
|
||||
* Feature Executor - Handles feature implementation using Claude Agent SDK
|
||||
* Now supports multiple model providers (Claude, Codex/OpenAI)
|
||||
*/
|
||||
class FeatureExecutor {
|
||||
/**
|
||||
* Get the model string based on feature's model setting
|
||||
* Supports both Claude and Codex/OpenAI models
|
||||
*/
|
||||
getModelString(feature) {
|
||||
const modelKey = feature.model || "opus"; // Default to opus
|
||||
return MODEL_MAP[modelKey] || MODEL_MAP.opus;
|
||||
|
||||
// Use the registry for model lookup
|
||||
const modelString = ModelRegistry.getModelString(modelKey);
|
||||
return modelString || MODEL_MAP[modelKey] || MODEL_MAP.opus;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine if the feature uses a Codex/OpenAI model
|
||||
*/
|
||||
isCodexModel(feature) {
|
||||
const modelKey = feature.model || "opus";
|
||||
return ModelRegistry.isCodexModel(modelKey);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the appropriate provider for the feature's model
|
||||
*/
|
||||
getProvider(feature) {
|
||||
const modelKey = feature.model || "opus";
|
||||
return ModelProviderFactory.getProviderForModel(modelKey);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get thinking configuration based on feature's thinkingLevel
|
||||
*/
|
||||
getThinkingConfig(feature) {
|
||||
const modelId = feature.model || "opus";
|
||||
// Skip thinking config for models that don't support it (e.g., Codex CLI)
|
||||
if (!ModelRegistry.modelSupportsThinking(modelId)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const level = feature.thinkingLevel || "none";
|
||||
const budgetTokens = THINKING_BUDGET_MAP[level];
|
||||
|
||||
@@ -109,6 +138,11 @@ class FeatureExecutor {
|
||||
async implementFeature(feature, projectPath, sendToRenderer, execution) {
|
||||
console.log(`[FeatureExecutor] Implementing: ${feature.description}`);
|
||||
|
||||
// Declare variables outside try block so they're available in catch
|
||||
let modelString;
|
||||
let providerName;
|
||||
let isCodex;
|
||||
|
||||
try {
|
||||
// ========================================
|
||||
// PHASE 1: PLANNING
|
||||
@@ -161,7 +195,14 @@ class FeatureExecutor {
|
||||
});
|
||||
}
|
||||
|
||||
console.log(`[FeatureExecutor] Using model: ${modelString}, thinking: ${feature.thinkingLevel || 'none'}`);
|
||||
providerName = this.isCodexModel(feature) ? 'Codex/OpenAI' : 'Claude';
|
||||
console.log(`[FeatureExecutor] Using provider: ${providerName}, model: ${modelString}, thinking: ${feature.thinkingLevel || 'none'}`);
|
||||
|
||||
// Note: Claude Agent SDK handles authentication automatically - it can use:
|
||||
// 1. CLAUDE_CODE_OAUTH_TOKEN env var (for SDK mode)
|
||||
// 2. Claude CLI's own authentication (if CLI is installed)
|
||||
// 3. ANTHROPIC_API_KEY (fallback)
|
||||
// We don't need to validate here - let the SDK/CLI handle auth errors
|
||||
|
||||
// Configure options for the SDK query
|
||||
const options = {
|
||||
@@ -224,8 +265,31 @@ class FeatureExecutor {
|
||||
});
|
||||
console.log(`[FeatureExecutor] Phase: ACTION for ${feature.description}`);
|
||||
|
||||
// Send query
|
||||
const currentQuery = query({ prompt, options });
|
||||
// Send query - use appropriate provider based on model
|
||||
let currentQuery;
|
||||
isCodex = this.isCodexModel(feature);
|
||||
|
||||
if (isCodex) {
|
||||
// Use Codex provider for OpenAI models
|
||||
console.log(`[FeatureExecutor] Using Codex provider for model: ${modelString}`);
|
||||
const provider = this.getProvider(feature);
|
||||
currentQuery = provider.executeQuery({
|
||||
prompt,
|
||||
model: modelString,
|
||||
cwd: projectPath,
|
||||
systemPrompt: promptBuilder.getCodingPrompt(),
|
||||
maxTurns: 20, // Codex CLI typically uses fewer turns
|
||||
allowedTools: options.allowedTools,
|
||||
abortController: abortController,
|
||||
env: {
|
||||
OPENAI_API_KEY: process.env.OPENAI_API_KEY
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// Use Claude SDK (original implementation)
|
||||
currentQuery = query({ prompt, options });
|
||||
}
|
||||
|
||||
execution.query = currentQuery;
|
||||
|
||||
// Stream responses
|
||||
@@ -235,6 +299,18 @@ class FeatureExecutor {
|
||||
// Check if this specific feature was aborted
|
||||
if (!execution.isActive()) break;
|
||||
|
||||
// Handle error messages
|
||||
if (msg.type === "error") {
|
||||
const errorMsg = `\n❌ Error: ${msg.error}\n`;
|
||||
await contextManager.writeToContextFile(projectPath, feature.id, errorMsg);
|
||||
sendToRenderer({
|
||||
type: "auto_mode_error",
|
||||
featureId: feature.id,
|
||||
error: msg.error,
|
||||
});
|
||||
throw new Error(msg.error);
|
||||
}
|
||||
|
||||
if (msg.type === "assistant" && msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === "text") {
|
||||
@@ -249,6 +325,15 @@ class FeatureExecutor {
|
||||
featureId: feature.id,
|
||||
content: block.text,
|
||||
});
|
||||
} else if (block.type === "thinking") {
|
||||
// Handle thinking output from Codex O-series models
|
||||
const thinkingMsg = `\n💭 Thinking: ${block.thinking?.substring(0, 200)}...\n`;
|
||||
await contextManager.writeToContextFile(projectPath, feature.id, thinkingMsg);
|
||||
sendToRenderer({
|
||||
type: "auto_mode_progress",
|
||||
featureId: feature.id,
|
||||
content: thinkingMsg,
|
||||
});
|
||||
} else if (block.type === "tool_use") {
|
||||
// First tool use indicates we're actively implementing
|
||||
if (!hasStartedToolUse) {
|
||||
@@ -341,6 +426,45 @@ class FeatureExecutor {
|
||||
}
|
||||
|
||||
console.error("[FeatureExecutor] Error implementing feature:", error);
|
||||
|
||||
// Safely get model info for error logging (may not be set if error occurred early)
|
||||
const modelInfo = modelString ? {
|
||||
message: error.message,
|
||||
stack: error.stack,
|
||||
name: error.name,
|
||||
code: error.code,
|
||||
model: modelString,
|
||||
provider: providerName || 'unknown',
|
||||
isCodex: isCodex !== undefined ? isCodex : 'unknown'
|
||||
} : {
|
||||
message: error.message,
|
||||
stack: error.stack,
|
||||
name: error.name,
|
||||
code: error.code,
|
||||
model: 'not initialized',
|
||||
provider: 'unknown',
|
||||
isCodex: 'unknown'
|
||||
};
|
||||
|
||||
console.error("[FeatureExecutor] Error details:", modelInfo);
|
||||
|
||||
// Check if this is a Claude CLI process error
|
||||
if (error.message && error.message.includes("process exited with code")) {
|
||||
const modelDisplay = modelString ? `Model: ${modelString}` : 'Model: not initialized';
|
||||
const errorMsg = `Claude Code CLI failed with exit code 1. This might be due to:\n` +
|
||||
`- Invalid or unsupported model (${modelDisplay})\n` +
|
||||
`- Missing or invalid CLAUDE_CODE_OAUTH_TOKEN\n` +
|
||||
`- Claude CLI configuration issue\n` +
|
||||
`- Model not available in your Claude account\n\n` +
|
||||
`Original error: ${error.message}`;
|
||||
|
||||
await contextManager.writeToContextFile(projectPath, feature.id, `\n❌ ${errorMsg}\n`);
|
||||
sendToRenderer({
|
||||
type: "auto_mode_error",
|
||||
featureId: feature.id,
|
||||
error: errorMsg,
|
||||
});
|
||||
}
|
||||
|
||||
// Clean up
|
||||
if (execution) {
|
||||
|
||||
414
app/electron/services/model-provider.js
Normal file
414
app/electron/services/model-provider.js
Normal file
@@ -0,0 +1,414 @@
|
||||
/**
|
||||
* Model Provider Abstraction Layer
|
||||
*
|
||||
* This module provides an abstract interface for model providers (Claude, Codex, etc.)
|
||||
* allowing the application to use different AI models through a unified API.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Base class for model providers
|
||||
* Concrete implementations should extend this class
|
||||
*/
|
||||
class ModelProvider {
|
||||
constructor(config = {}) {
|
||||
this.config = config;
|
||||
this.name = 'base';
|
||||
}
|
||||
|
||||
/**
|
||||
* Get provider name
|
||||
* @returns {string} Provider name
|
||||
*/
|
||||
getName() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a query with the model provider
|
||||
* @param {Object} options Query options
|
||||
* @param {string} options.prompt The prompt to send
|
||||
* @param {string} options.model The model to use
|
||||
* @param {string} options.systemPrompt System prompt
|
||||
* @param {string} options.cwd Working directory
|
||||
* @param {number} options.maxTurns Maximum turns
|
||||
* @param {string[]} options.allowedTools Allowed tools
|
||||
* @param {Object} options.mcpServers MCP servers configuration
|
||||
* @param {AbortController} options.abortController Abort controller
|
||||
* @param {Object} options.thinking Thinking configuration
|
||||
* @returns {AsyncGenerator} Async generator yielding messages
|
||||
*/
|
||||
async *executeQuery(options) {
|
||||
throw new Error('executeQuery must be implemented by subclass');
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect if this provider's CLI/SDK is installed
|
||||
* @returns {Promise<Object>} Installation status
|
||||
*/
|
||||
async detectInstallation() {
|
||||
throw new Error('detectInstallation must be implemented by subclass');
|
||||
}
|
||||
|
||||
/**
|
||||
* Get list of available models for this provider
|
||||
* @returns {Array<Object>} Array of model definitions
|
||||
*/
|
||||
getAvailableModels() {
|
||||
throw new Error('getAvailableModels must be implemented by subclass');
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate provider configuration
|
||||
* @returns {Object} Validation result { valid: boolean, errors: string[] }
|
||||
*/
|
||||
validateConfig() {
|
||||
throw new Error('validateConfig must be implemented by subclass');
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the full model string for a model key
|
||||
* @param {string} modelKey Short model key (e.g., 'opus', 'gpt-5.1-codex')
|
||||
* @returns {string} Full model string
|
||||
*/
|
||||
getModelString(modelKey) {
|
||||
throw new Error('getModelString must be implemented by subclass');
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if provider supports a specific feature
|
||||
* @param {string} feature Feature name (e.g., 'thinking', 'tools', 'streaming')
|
||||
* @returns {boolean} Whether the feature is supported
|
||||
*/
|
||||
supportsFeature(feature) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Claude Provider - Uses Anthropic Claude Agent SDK
|
||||
*/
|
||||
class ClaudeProvider extends ModelProvider {
|
||||
constructor(config = {}) {
|
||||
super(config);
|
||||
this.name = 'claude';
|
||||
this.sdk = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Lazily load the Claude SDK
|
||||
*/
|
||||
loadSdk() {
|
||||
if (!this.sdk) {
|
||||
this.sdk = require('@anthropic-ai/claude-agent-sdk');
|
||||
}
|
||||
return this.sdk;
|
||||
}
|
||||
|
||||
async *executeQuery(options) {
|
||||
const { query } = this.loadSdk();
|
||||
|
||||
const sdkOptions = {
|
||||
model: options.model,
|
||||
systemPrompt: options.systemPrompt,
|
||||
maxTurns: options.maxTurns || 1000,
|
||||
cwd: options.cwd,
|
||||
mcpServers: options.mcpServers,
|
||||
allowedTools: options.allowedTools,
|
||||
permissionMode: options.permissionMode || 'acceptEdits',
|
||||
sandbox: options.sandbox,
|
||||
abortController: options.abortController,
|
||||
};
|
||||
|
||||
// Add thinking configuration if enabled
|
||||
if (options.thinking) {
|
||||
sdkOptions.thinking = options.thinking;
|
||||
}
|
||||
|
||||
const currentQuery = query({ prompt: options.prompt, options: sdkOptions });
|
||||
|
||||
for await (const msg of currentQuery) {
|
||||
yield msg;
|
||||
}
|
||||
}
|
||||
|
||||
async detectInstallation() {
|
||||
const claudeCliDetector = require('./claude-cli-detector');
|
||||
return claudeCliDetector.getInstallationInfo();
|
||||
}
|
||||
|
||||
getAvailableModels() {
|
||||
return [
|
||||
{
|
||||
id: 'haiku',
|
||||
name: 'Claude Haiku',
|
||||
modelString: 'claude-haiku-4-5',
|
||||
provider: 'claude',
|
||||
description: 'Fast and efficient for simple tasks',
|
||||
tier: 'basic'
|
||||
},
|
||||
{
|
||||
id: 'sonnet',
|
||||
name: 'Claude Sonnet',
|
||||
modelString: 'claude-sonnet-4-20250514',
|
||||
provider: 'claude',
|
||||
description: 'Balanced performance and capabilities',
|
||||
tier: 'standard'
|
||||
},
|
||||
{
|
||||
id: 'opus',
|
||||
name: 'Claude Opus 4.5',
|
||||
modelString: 'claude-opus-4-5-20251101',
|
||||
provider: 'claude',
|
||||
description: 'Most capable model for complex tasks',
|
||||
tier: 'premium'
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
validateConfig() {
|
||||
const errors = [];
|
||||
|
||||
// Check for OAuth token or API key
|
||||
if (!process.env.CLAUDE_CODE_OAUTH_TOKEN && !process.env.ANTHROPIC_API_KEY) {
|
||||
errors.push('No Claude authentication found. Set CLAUDE_CODE_OAUTH_TOKEN or ANTHROPIC_API_KEY.');
|
||||
}
|
||||
|
||||
return {
|
||||
valid: errors.length === 0,
|
||||
errors
|
||||
};
|
||||
}
|
||||
|
||||
getModelString(modelKey) {
|
||||
const modelMap = {
|
||||
haiku: 'claude-haiku-4-5',
|
||||
sonnet: 'claude-sonnet-4-20250514',
|
||||
opus: 'claude-opus-4-5-20251101'
|
||||
};
|
||||
return modelMap[modelKey] || modelMap.opus;
|
||||
}
|
||||
|
||||
supportsFeature(feature) {
|
||||
const supportedFeatures = ['thinking', 'tools', 'streaming', 'mcp'];
|
||||
return supportedFeatures.includes(feature);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Codex Provider - Uses OpenAI Codex CLI
|
||||
*/
|
||||
class CodexProvider extends ModelProvider {
|
||||
constructor(config = {}) {
|
||||
super(config);
|
||||
this.name = 'codex';
|
||||
}
|
||||
|
||||
async *executeQuery(options) {
|
||||
const codexExecutor = require('./codex-executor');
|
||||
|
||||
const executeOptions = {
|
||||
prompt: options.prompt,
|
||||
model: options.model,
|
||||
cwd: options.cwd,
|
||||
systemPrompt: options.systemPrompt,
|
||||
maxTurns: options.maxTurns || 20,
|
||||
allowedTools: options.allowedTools,
|
||||
env: {
|
||||
...process.env,
|
||||
OPENAI_API_KEY: process.env.OPENAI_API_KEY
|
||||
}
|
||||
};
|
||||
|
||||
// Execute and yield results
|
||||
const generator = codexExecutor.execute(executeOptions);
|
||||
for await (const msg of generator) {
|
||||
yield msg;
|
||||
}
|
||||
}
|
||||
|
||||
async detectInstallation() {
|
||||
const codexCliDetector = require('./codex-cli-detector');
|
||||
return codexCliDetector.getInstallationInfo();
|
||||
}
|
||||
|
||||
getAvailableModels() {
|
||||
return [
|
||||
{
|
||||
id: 'gpt-5.1-codex-max',
|
||||
name: 'GPT-5.1 Codex Max',
|
||||
modelString: 'gpt-5.1-codex-max',
|
||||
provider: 'codex',
|
||||
description: 'Latest flagship - deep and fast reasoning for coding',
|
||||
tier: 'premium',
|
||||
default: true
|
||||
},
|
||||
{
|
||||
id: 'gpt-5.1-codex',
|
||||
name: 'GPT-5.1 Codex',
|
||||
modelString: 'gpt-5.1-codex',
|
||||
provider: 'codex',
|
||||
description: 'Optimized for code generation',
|
||||
tier: 'standard'
|
||||
},
|
||||
{
|
||||
id: 'gpt-5.1-codex-mini',
|
||||
name: 'GPT-5.1 Codex Mini',
|
||||
modelString: 'gpt-5.1-codex-mini',
|
||||
provider: 'codex',
|
||||
description: 'Faster and cheaper option',
|
||||
tier: 'basic'
|
||||
},
|
||||
{
|
||||
id: 'gpt-5.1',
|
||||
name: 'GPT-5.1',
|
||||
modelString: 'gpt-5.1',
|
||||
provider: 'codex',
|
||||
description: 'Broad world knowledge with strong reasoning',
|
||||
tier: 'standard'
|
||||
},
|
||||
{
|
||||
id: 'o3',
|
||||
name: 'O3',
|
||||
modelString: 'o3',
|
||||
provider: 'codex',
|
||||
description: 'Advanced reasoning model',
|
||||
tier: 'premium'
|
||||
},
|
||||
{
|
||||
id: 'o3-mini',
|
||||
name: 'O3 Mini',
|
||||
modelString: 'o3-mini',
|
||||
provider: 'codex',
|
||||
description: 'Efficient reasoning model',
|
||||
tier: 'standard'
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
validateConfig() {
|
||||
const errors = [];
|
||||
const codexCliDetector = require('./codex-cli-detector');
|
||||
const installation = codexCliDetector.detectCodexInstallation();
|
||||
|
||||
if (!installation.installed && !process.env.OPENAI_API_KEY) {
|
||||
errors.push('Codex CLI not installed and no OPENAI_API_KEY found.');
|
||||
}
|
||||
|
||||
return {
|
||||
valid: errors.length === 0,
|
||||
errors
|
||||
};
|
||||
}
|
||||
|
||||
getModelString(modelKey) {
|
||||
// Codex models use the key directly as the model string
|
||||
const modelMap = {
|
||||
'gpt-5.1-codex-max': 'gpt-5.1-codex-max',
|
||||
'gpt-5.1-codex': 'gpt-5.1-codex',
|
||||
'gpt-5.1-codex-mini': 'gpt-5.1-codex-mini',
|
||||
'gpt-5.1': 'gpt-5.1',
|
||||
'o3': 'o3',
|
||||
'o3-mini': 'o3-mini',
|
||||
'o4-mini': 'o4-mini',
|
||||
'gpt-4o': 'gpt-4o',
|
||||
'gpt-4o-mini': 'gpt-4o-mini'
|
||||
};
|
||||
return modelMap[modelKey] || 'gpt-5.1-codex-max';
|
||||
}
|
||||
|
||||
supportsFeature(feature) {
|
||||
const supportedFeatures = ['tools', 'streaming'];
|
||||
return supportedFeatures.includes(feature);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Model Provider Factory
|
||||
* Creates the appropriate provider based on model or provider name
|
||||
*/
|
||||
class ModelProviderFactory {
|
||||
static providers = {
|
||||
claude: ClaudeProvider,
|
||||
codex: CodexProvider
|
||||
};
|
||||
|
||||
/**
|
||||
* Get provider for a specific model
|
||||
* @param {string} modelId Model ID (e.g., 'opus', 'gpt-5.1-codex')
|
||||
* @returns {ModelProvider} Provider instance
|
||||
*/
|
||||
static getProviderForModel(modelId) {
|
||||
// Check if it's a Claude model
|
||||
const claudeModels = ['haiku', 'sonnet', 'opus'];
|
||||
if (claudeModels.includes(modelId)) {
|
||||
return new ClaudeProvider();
|
||||
}
|
||||
|
||||
// Check if it's a Codex/OpenAI model
|
||||
const codexModels = [
|
||||
'gpt-5.1-codex-max', 'gpt-5.1-codex', 'gpt-5.1-codex-mini', 'gpt-5.1',
|
||||
'o3', 'o3-mini', 'o4-mini', 'gpt-4o', 'gpt-4o-mini'
|
||||
];
|
||||
if (codexModels.includes(modelId)) {
|
||||
return new CodexProvider();
|
||||
}
|
||||
|
||||
// Default to Claude
|
||||
return new ClaudeProvider();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get provider by name
|
||||
* @param {string} providerName Provider name ('claude' or 'codex')
|
||||
* @returns {ModelProvider} Provider instance
|
||||
*/
|
||||
static getProvider(providerName) {
|
||||
const ProviderClass = this.providers[providerName];
|
||||
if (!ProviderClass) {
|
||||
throw new Error(`Unknown provider: ${providerName}`);
|
||||
}
|
||||
return new ProviderClass();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all available providers
|
||||
* @returns {string[]} List of provider names
|
||||
*/
|
||||
static getAvailableProviders() {
|
||||
return Object.keys(this.providers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all available models across all providers
|
||||
* @returns {Array<Object>} All available models
|
||||
*/
|
||||
static getAllModels() {
|
||||
const allModels = [];
|
||||
for (const providerName of this.getAvailableProviders()) {
|
||||
const provider = this.getProvider(providerName);
|
||||
const models = provider.getAvailableModels();
|
||||
allModels.push(...models);
|
||||
}
|
||||
return allModels;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check installation status for all providers
|
||||
* @returns {Promise<Object>} Installation status for each provider
|
||||
*/
|
||||
static async checkAllProviders() {
|
||||
const status = {};
|
||||
for (const providerName of this.getAvailableProviders()) {
|
||||
const provider = this.getProvider(providerName);
|
||||
status[providerName] = await provider.detectInstallation();
|
||||
}
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
ModelProvider,
|
||||
ClaudeProvider,
|
||||
CodexProvider,
|
||||
ModelProviderFactory
|
||||
};
|
||||
369
app/electron/services/model-registry.js
Normal file
369
app/electron/services/model-registry.js
Normal file
@@ -0,0 +1,369 @@
|
||||
/**
|
||||
* Model Registry - Centralized model definitions and metadata
|
||||
*
|
||||
* This module provides a central registry of all available models
|
||||
* across different providers (Claude, Codex/OpenAI).
|
||||
*/
|
||||
|
||||
/**
|
||||
* Model Categories
|
||||
*/
|
||||
const MODEL_CATEGORIES = {
|
||||
CLAUDE: 'claude',
|
||||
OPENAI: 'openai',
|
||||
CODEX: 'codex'
|
||||
};
|
||||
|
||||
/**
|
||||
* Model Tiers (capability levels)
|
||||
*/
|
||||
const MODEL_TIERS = {
|
||||
BASIC: 'basic', // Fast, cheap, simple tasks
|
||||
STANDARD: 'standard', // Balanced performance
|
||||
PREMIUM: 'premium' // Most capable, complex tasks
|
||||
};
|
||||
|
||||
const CODEX_MODEL_IDS = [
|
||||
'gpt-5.1-codex-max',
|
||||
'gpt-5.1-codex',
|
||||
'gpt-5.1-codex-mini',
|
||||
'gpt-5.1',
|
||||
'o3',
|
||||
'o3-mini',
|
||||
'o4-mini',
|
||||
'gpt-4o',
|
||||
'gpt-4o-mini'
|
||||
];
|
||||
|
||||
/**
|
||||
* All available models with full metadata
|
||||
*/
|
||||
const MODELS = {
|
||||
// Claude Models
|
||||
haiku: {
|
||||
id: 'haiku',
|
||||
name: 'Claude Haiku',
|
||||
modelString: 'claude-haiku-4-5',
|
||||
provider: 'claude',
|
||||
category: MODEL_CATEGORIES.CLAUDE,
|
||||
tier: MODEL_TIERS.BASIC,
|
||||
description: 'Fast and efficient for simple tasks',
|
||||
capabilities: ['code', 'text', 'tools'],
|
||||
maxTokens: 8192,
|
||||
contextWindow: 200000,
|
||||
supportsThinking: true,
|
||||
requiresAuth: 'CLAUDE_CODE_OAUTH_TOKEN'
|
||||
},
|
||||
sonnet: {
|
||||
id: 'sonnet',
|
||||
name: 'Claude Sonnet',
|
||||
modelString: 'claude-sonnet-4-20250514',
|
||||
provider: 'claude',
|
||||
category: MODEL_CATEGORIES.CLAUDE,
|
||||
tier: MODEL_TIERS.STANDARD,
|
||||
description: 'Balanced performance and capabilities',
|
||||
capabilities: ['code', 'text', 'tools', 'analysis'],
|
||||
maxTokens: 8192,
|
||||
contextWindow: 200000,
|
||||
supportsThinking: true,
|
||||
requiresAuth: 'CLAUDE_CODE_OAUTH_TOKEN'
|
||||
},
|
||||
opus: {
|
||||
id: 'opus',
|
||||
name: 'Claude Opus 4.5',
|
||||
modelString: 'claude-opus-4-5-20251101',
|
||||
provider: 'claude',
|
||||
category: MODEL_CATEGORIES.CLAUDE,
|
||||
tier: MODEL_TIERS.PREMIUM,
|
||||
description: 'Most capable model for complex tasks',
|
||||
capabilities: ['code', 'text', 'tools', 'analysis', 'reasoning'],
|
||||
maxTokens: 8192,
|
||||
contextWindow: 200000,
|
||||
supportsThinking: true,
|
||||
requiresAuth: 'CLAUDE_CODE_OAUTH_TOKEN',
|
||||
default: true
|
||||
},
|
||||
|
||||
// OpenAI GPT-5.1 Codex Models
|
||||
'gpt-5.1-codex-max': {
|
||||
id: 'gpt-5.1-codex-max',
|
||||
name: 'GPT-5.1 Codex Max',
|
||||
modelString: 'gpt-5.1-codex-max',
|
||||
provider: 'codex',
|
||||
category: MODEL_CATEGORIES.OPENAI,
|
||||
tier: MODEL_TIERS.PREMIUM,
|
||||
description: 'Latest flagship - deep and fast reasoning for coding',
|
||||
capabilities: ['code', 'text', 'tools', 'reasoning'],
|
||||
maxTokens: 32768,
|
||||
contextWindow: 128000,
|
||||
supportsThinking: false,
|
||||
requiresAuth: 'OPENAI_API_KEY',
|
||||
codexDefault: true
|
||||
},
|
||||
'gpt-5.1-codex': {
|
||||
id: 'gpt-5.1-codex',
|
||||
name: 'GPT-5.1 Codex',
|
||||
modelString: 'gpt-5.1-codex',
|
||||
provider: 'codex',
|
||||
category: MODEL_CATEGORIES.OPENAI,
|
||||
tier: MODEL_TIERS.STANDARD,
|
||||
description: 'Optimized for code generation',
|
||||
capabilities: ['code', 'text', 'tools'],
|
||||
maxTokens: 32768,
|
||||
contextWindow: 128000,
|
||||
supportsThinking: false,
|
||||
requiresAuth: 'OPENAI_API_KEY'
|
||||
},
|
||||
'gpt-5.1-codex-mini': {
|
||||
id: 'gpt-5.1-codex-mini',
|
||||
name: 'GPT-5.1 Codex Mini',
|
||||
modelString: 'gpt-5.1-codex-mini',
|
||||
provider: 'codex',
|
||||
category: MODEL_CATEGORIES.OPENAI,
|
||||
tier: MODEL_TIERS.BASIC,
|
||||
description: 'Faster and cheaper option',
|
||||
capabilities: ['code', 'text'],
|
||||
maxTokens: 16384,
|
||||
contextWindow: 128000,
|
||||
supportsThinking: false,
|
||||
requiresAuth: 'OPENAI_API_KEY'
|
||||
},
|
||||
'gpt-5.1': {
|
||||
id: 'gpt-5.1',
|
||||
name: 'GPT-5.1',
|
||||
modelString: 'gpt-5.1',
|
||||
provider: 'codex',
|
||||
category: MODEL_CATEGORIES.OPENAI,
|
||||
tier: MODEL_TIERS.STANDARD,
|
||||
description: 'Broad world knowledge with strong reasoning',
|
||||
capabilities: ['code', 'text', 'reasoning'],
|
||||
maxTokens: 32768,
|
||||
contextWindow: 128000,
|
||||
supportsThinking: false,
|
||||
requiresAuth: 'OPENAI_API_KEY'
|
||||
},
|
||||
|
||||
// OpenAI O-Series Models
|
||||
o3: {
|
||||
id: 'o3',
|
||||
name: 'O3',
|
||||
modelString: 'o3',
|
||||
provider: 'codex',
|
||||
category: MODEL_CATEGORIES.OPENAI,
|
||||
tier: MODEL_TIERS.PREMIUM,
|
||||
description: 'Advanced reasoning model',
|
||||
capabilities: ['code', 'text', 'tools', 'reasoning'],
|
||||
maxTokens: 100000,
|
||||
contextWindow: 200000,
|
||||
supportsThinking: false,
|
||||
requiresAuth: 'OPENAI_API_KEY'
|
||||
},
|
||||
'o3-mini': {
|
||||
id: 'o3-mini',
|
||||
name: 'O3 Mini',
|
||||
modelString: 'o3-mini',
|
||||
provider: 'codex',
|
||||
category: MODEL_CATEGORIES.OPENAI,
|
||||
tier: MODEL_TIERS.STANDARD,
|
||||
description: 'Efficient reasoning model',
|
||||
capabilities: ['code', 'text', 'reasoning'],
|
||||
maxTokens: 65536,
|
||||
contextWindow: 128000,
|
||||
supportsThinking: false,
|
||||
requiresAuth: 'OPENAI_API_KEY'
|
||||
},
|
||||
'o4-mini': {
|
||||
id: 'o4-mini',
|
||||
name: 'O4 Mini',
|
||||
modelString: 'o4-mini',
|
||||
provider: 'codex',
|
||||
category: MODEL_CATEGORIES.OPENAI,
|
||||
tier: MODEL_TIERS.BASIC,
|
||||
description: 'Fast reasoning with lower cost',
|
||||
capabilities: ['code', 'text', 'reasoning'],
|
||||
maxTokens: 65536,
|
||||
contextWindow: 128000,
|
||||
supportsThinking: false,
|
||||
requiresAuth: 'OPENAI_API_KEY'
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Model Registry class for querying and managing models
|
||||
*/
|
||||
class ModelRegistry {
|
||||
/**
|
||||
* Get all registered models
|
||||
* @returns {Object} All models
|
||||
*/
|
||||
static getAllModels() {
|
||||
return MODELS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get model by ID
|
||||
* @param {string} modelId Model ID
|
||||
* @returns {Object|null} Model definition or null
|
||||
*/
|
||||
static getModel(modelId) {
|
||||
return MODELS[modelId] || null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get models by provider
|
||||
* @param {string} provider Provider name ('claude' or 'codex')
|
||||
* @returns {Object[]} Array of models for the provider
|
||||
*/
|
||||
static getModelsByProvider(provider) {
|
||||
return Object.values(MODELS).filter(m => m.provider === provider);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get models by category
|
||||
* @param {string} category Category name
|
||||
* @returns {Object[]} Array of models in the category
|
||||
*/
|
||||
static getModelsByCategory(category) {
|
||||
return Object.values(MODELS).filter(m => m.category === category);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get models by tier
|
||||
* @param {string} tier Tier name
|
||||
* @returns {Object[]} Array of models in the tier
|
||||
*/
|
||||
static getModelsByTier(tier) {
|
||||
return Object.values(MODELS).filter(m => m.tier === tier);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get default model for a provider
|
||||
* @param {string} provider Provider name
|
||||
* @returns {Object|null} Default model or null
|
||||
*/
|
||||
static getDefaultModel(provider = 'claude') {
|
||||
const models = this.getModelsByProvider(provider);
|
||||
if (provider === 'claude') {
|
||||
return models.find(m => m.default) || models[0];
|
||||
}
|
||||
if (provider === 'codex') {
|
||||
return models.find(m => m.codexDefault) || models[0];
|
||||
}
|
||||
return models[0];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get model string (full model name) for a model ID
|
||||
* @param {string} modelId Model ID
|
||||
* @returns {string} Full model string
|
||||
*/
|
||||
static getModelString(modelId) {
|
||||
const model = this.getModel(modelId);
|
||||
return model ? model.modelString : modelId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine provider for a model ID
|
||||
* @param {string} modelId Model ID
|
||||
* @returns {string} Provider name ('claude' or 'codex')
|
||||
*/
|
||||
static getProviderForModel(modelId) {
|
||||
const model = this.getModel(modelId);
|
||||
if (model) {
|
||||
return model.provider;
|
||||
}
|
||||
|
||||
// Fallback detection for models not explicitly registered (keeps legacy Codex IDs working)
|
||||
if (CODEX_MODEL_IDS.includes(modelId)) {
|
||||
return 'codex';
|
||||
}
|
||||
|
||||
return 'claude';
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a model is a Claude model
|
||||
* @param {string} modelId Model ID
|
||||
* @returns {boolean} Whether it's a Claude model
|
||||
*/
|
||||
static isClaudeModel(modelId) {
|
||||
return this.getProviderForModel(modelId) === 'claude';
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a model is a Codex/OpenAI model
|
||||
* @param {string} modelId Model ID
|
||||
* @returns {boolean} Whether it's a Codex model
|
||||
*/
|
||||
static isCodexModel(modelId) {
|
||||
return this.getProviderForModel(modelId) === 'codex';
|
||||
}
|
||||
|
||||
/**
|
||||
* Get models grouped by provider for UI display
|
||||
* @returns {Object} Models grouped by provider
|
||||
*/
|
||||
static getModelsGroupedByProvider() {
|
||||
return {
|
||||
claude: this.getModelsByProvider('claude'),
|
||||
codex: this.getModelsByProvider('codex')
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all model IDs as an array
|
||||
* @returns {string[]} Array of model IDs
|
||||
*/
|
||||
static getAllModelIds() {
|
||||
return Object.keys(MODELS);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if model supports a specific capability
|
||||
* @param {string} modelId Model ID
|
||||
* @param {string} capability Capability name
|
||||
* @returns {boolean} Whether the model supports the capability
|
||||
*/
|
||||
static modelSupportsCapability(modelId, capability) {
|
||||
const model = this.getModel(modelId);
|
||||
return model ? model.capabilities.includes(capability) : false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if model supports extended thinking
|
||||
* @param {string} modelId Model ID
|
||||
* @returns {boolean} Whether the model supports thinking
|
||||
*/
|
||||
static modelSupportsThinking(modelId) {
|
||||
const model = this.getModel(modelId);
|
||||
return model ? model.supportsThinking : false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get required authentication for a model
|
||||
* @param {string} modelId Model ID
|
||||
* @returns {string|null} Required auth env variable name
|
||||
*/
|
||||
static getRequiredAuth(modelId) {
|
||||
const model = this.getModel(modelId);
|
||||
return model ? model.requiresAuth : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if authentication is available for a model
|
||||
* @param {string} modelId Model ID
|
||||
* @returns {boolean} Whether auth is available
|
||||
*/
|
||||
static hasAuthForModel(modelId) {
|
||||
const authVar = this.getRequiredAuth(modelId);
|
||||
if (!authVar) return false;
|
||||
return !!process.env[authVar];
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
MODEL_CATEGORIES,
|
||||
MODEL_TIERS,
|
||||
MODELS,
|
||||
ModelRegistry
|
||||
};
|
||||
Reference in New Issue
Block a user