diff --git a/.cursor/plans/codex_cli_openai_model_support_9987f5e4.plan.md b/.cursor/plans/codex_cli_openai_model_support_9987f5e4.plan.md new file mode 100644 index 00000000..ebec34f8 --- /dev/null +++ b/.cursor/plans/codex_cli_openai_model_support_9987f5e4.plan.md @@ -0,0 +1,453 @@ +--- +name: Codex CLI OpenAI Model Support +overview: Extend the model support system to integrate OpenAI Codex CLI, enabling users to use OpenAI models (GPT-4o, o3, etc.) alongside existing Claude models. This includes CLI detection, model provider abstraction, execution wrapper, and UI updates. +todos: + - id: model-provider-abstraction + content: Create model provider abstraction layer with base interface and Claude/Codex implementations + status: pending + - id: codex-cli-detector + content: Implement Codex CLI detector service to check installation status and version + status: pending + - id: codex-executor + content: Create Codex CLI execution wrapper that spawns subprocess and parses JSON output + status: pending + - id: codex-config-manager + content: Implement Codex TOML configuration manager for model provider setup + status: pending + - id: model-registry + content: Create centralized model registry with provider mappings and metadata + status: pending + - id: update-feature-executor + content: Refactor feature-executor.js to use model provider abstraction instead of direct SDK calls + status: pending + - id: update-agent-service + content: Update agent-service.js to support configurable model selection via provider abstraction + status: pending + - id: message-converter + content: Create message format converter to translate Codex JSONL output to Claude SDK format + status: pending + - id: update-ui-types + content: Extend TypeScript types in app-store.ts to include OpenAI models and provider metadata + status: pending + - id: update-board-view + content: Expand model selection dropdown in board-view.tsx to include OpenAI models with provider grouping + status: pending + - id: update-settings-view + content: Add OpenAI API key input, Codex CLI status check, and test connection button to settings-view.tsx + status: pending + - id: openai-test-api + content: Create OpenAI API test endpoint at app/src/app/api/openai/test/route.ts + status: pending + - id: ipc-handlers + content: Add IPC handlers in main.js for model management (checkCodexCli, getAvailableModels, testOpenAI) + status: pending + - id: preload-api + content: Update preload.js and electron.d.ts to expose new IPC methods to renderer process + status: pending + - id: env-manager + content: Create environment variable manager for centralized API key and config handling + status: pending + - id: error-handling + content: Implement provider fallback logic and user-friendly error messages for missing CLI/API keys + status: pending +--- + +# Codex CLI OpenAI Model Support Implementation Plan + +## Overview + +Extend Automaker's model support to integrate OpenAI Codex CLI, allowing users to use the latest GPT-5.1 Codex models (`gpt-5.1-codex-max`, `gpt-5.1-codex`, `gpt-5.1-codex-mini`, `gpt-5.1`) alongside existing Claude models. Codex CLI defaults to `gpt-5.1-codex-max` and uses ChatGPT Enterprise authentication (no API key required). The implementation will follow the existing Claude CLI pattern but add abstraction for multiple model providers. + +## Current Architecture Analysis + +### Model Usage Points + +1. **Feature Executor** (`app/electron/services/feature-executor.js`): + + - Uses `MODEL_MAP` with hardcoded Claude models (haiku, sonnet, opus) + - Calls `@anthropic-ai/claude-agent-sdk` `query()` function + - Model selection via `getModelString(feature)` method + +2. **Agent Service** (`app/electron/agent-service.js`): + + - Hardcoded model: `"claude-opus-4-5-20251101"` + - Uses Claude Agent SDK directly + +3. **API Route** (`app/src/app/api/chat/route.ts`): + + - Hardcoded model: `"claude-opus-4-5-20251101"` + - Uses Claude Agent SDK + +4. **Project Analyzer** (`app/electron/services/project-analyzer.js`): + + - Hardcoded model: `"claude-sonnet-4-20250514"` + +5. **UI Components**: + + - `board-view.tsx`: Model dropdown (haiku/sonnet/opus) + - `app-store.ts`: `AgentModel` type limited to Claude models + +### Authentication + +- Claude: Uses `CLAUDE_CODE_OAUTH_TOKEN` environment variable +- Codex: Uses `OPENAI_API_KEY` environment variable (per Codex docs) + +## Implementation Strategy + +### Phase 1: Model Provider Abstraction Layer + +#### 1.1 Create Model Provider Interface + +**File**: `app/electron/services/model-provider.js` + +- Abstract base class/interface for model providers +- Methods: `executeQuery()`, `detectInstallation()`, `getAvailableModels()`, `validateConfig()` +- Implementations: + - `ClaudeProvider` (wraps existing SDK usage) + - `CodexProvider` (new, wraps Codex CLI execution) + +#### 1.2 Create Codex CLI Detector + +**File**: `app/electron/services/codex-cli-detector.js` + +- Similar to `claude-cli-detector.js` +- Check for `codex` command in PATH +- Check for npm global installation: `npm list -g @openai/codex` +- Check for Homebrew installation on macOS +- Return: `{ installed: boolean, path: string, version: string, method: 'cli'|'npm'|'brew'|'none' }` + +#### 1.3 Create Codex Provider Implementation + +**File**: `app/electron/services/codex-provider.js` + +- Extends model provider interface +- Executes Codex CLI via `child_process.spawn()` or `execSync()` +- Handles JSON output parsing (`codex exec --json`) +- Manages TOML configuration file creation/updates +- Supports latest GPT-5.1 Codex models: + - `gpt-5.1-codex-max` (default, latest flagship for deep and fast reasoning) + - `gpt-5.1-codex` (optimized for codex) + - `gpt-5.1-codex-mini` (cheaper, faster, less capable) + - `gpt-5.1` (broad world knowledge with strong general reasoning) +- Uses ChatGPT Enterprise authentication (no API key required for these models) +- Note: Legacy models (GPT-4o, o3, o1, etc.) are not supported - Codex CLI focuses on GPT-5.1 Codex family only + +### Phase 2: Model Configuration System + +#### 2.1 Extended Model Registry + +**File**: `app/electron/services/model-registry.js` + +- Centralized model configuration +- Model definitions with provider mapping: + ```javascript + { + id: "claude-opus", + name: "Claude Opus 4.5", + provider: "claude", + modelString: "claude-opus-4-5-20251101", + ... + }, + { + id: "gpt-4o", + name: "GPT-4o", + provider: "codex", + modelString: "gpt-4o", + requiresApiKey: "OPENAI_API_KEY", + ... + } + ``` + +- Model categories: `claude`, `openai`, `azure`, `custom` + +#### 2.2 Codex Configuration Manager + +**File**: `app/electron/services/codex-config-manager.js` + +- Manages Codex TOML config file (typically `~/.config/codex/config.toml` or project-specific) +- Creates/updates model provider configurations: + ```toml + [model_providers.openai-chat-completions] + name = "OpenAI using Chat Completions" + base_url = "https://api.openai.com/v1" + env_key = "OPENAI_API_KEY" + wire_api = "chat" + + [profiles.gpt4o] + model = "gpt-4o" + model_provider = "openai-chat-completions" + ``` + +- Profile management for different use cases +- Validates configuration before execution + +### Phase 3: Execution Integration + +#### 3.1 Update Feature Executor + +**File**: `app/electron/services/feature-executor.js` + +- Replace direct SDK calls with model provider abstraction +- Update `getModelString()` to return model ID instead of string +- Add `getModelProvider(modelId)` method +- Modify `implementFeature()` to: + - Get provider for selected model + - Use provider's `executeQuery()` method + - Handle different response formats (SDK vs CLI JSON) + +#### 3.2 Update Agent Service + +**File**: `app/electron/agent-service.js` + +- Replace hardcoded model with configurable model selection +- Use model provider abstraction +- Support model selection per session + +#### 3.3 Update Project Analyzer + +**File**: `app/electron/services/project-analyzer.js` + +- Use model provider abstraction +- Make model configurable (currently hardcoded to sonnet) + +#### 3.4 Update API Route + +**File**: `app/src/app/api/chat/route.ts` + +- Support model selection from request +- Use model provider abstraction (if running in Electron context) +- Fallback to Claude SDK for web-only usage + +### Phase 4: Codex CLI Execution Wrapper + +#### 4.1 Codex Executor + +**File**: `app/electron/services/codex-executor.js` + +- Wraps `codex exec` command execution +- Handles subprocess spawning with proper environment variables +- Parses JSON output (JSONL format from `--json` flag) +- Converts Codex output format to match Claude SDK message format +- Handles streaming responses +- Error handling and timeout management + +#### 4.2 Message Format Conversion + +**File**: `app/electron/services/message-converter.js` + +- Converts Codex JSONL output to Claude SDK message format +- Maps Codex events: + - `thread.started` → session initialization + - `item.completed` (reasoning) → thinking output + - `item.completed` (command_execution) → tool use + - `item.completed` (agent_message) → assistant message +- Maintains compatibility with existing UI components + +### Phase 5: UI Updates + +#### 5.1 Update Type Definitions + +**File**: `app/src/store/app-store.ts` + +- Extend `AgentModel` type to include OpenAI models: + ```typescript + export type AgentModel = + | "opus" | "sonnet" | "haiku" // Claude + | "gpt-4o" | "gpt-4o-mini" | "gpt-3.5-turbo" | "o3" | "o1"; // OpenAI + ``` + +- Add `modelProvider` field to `Feature` interface +- Add provider metadata to model selection + +#### 5.2 Update Board View + +**File**: `app/src/components/views/board-view.tsx` + +- Expand model dropdown to include OpenAI models +- Group models by provider (Claude / OpenAI) +- Show provider badges/icons +- Display model availability based on CLI detection +- Add tooltips showing model capabilities + +#### 5.3 Update Settings View + +**File**: `app/src/components/views/settings-view.tsx` + +- Add OpenAI API key input field (similar to Anthropic key) +- Add Codex CLI status check (similar to Claude CLI check) +- Show installation instructions if Codex CLI not detected +- Add test connection button for OpenAI API +- Display detected Codex CLI version/path + +#### 5.4 Create API Test Route + +**File**: `app/src/app/api/openai/test/route.ts` + +- Similar to `app/src/app/api/claude/test/route.ts` +- Test OpenAI API connection +- Validate API key format +- Return connection status + +### Phase 6: Configuration & Environment + +#### 6.1 Environment Variable Management + +**File**: `app/electron/services/env-manager.js` + +- Centralized environment variable handling +- Loads from `.env` file and system environment +- Validates required variables per provider +- Provides fallback mechanisms + +#### 6.2 IPC Handlers for Model Management + +**File**: `app/electron/main.js` + +- Add IPC handlers: + - `model:checkCodexCli` - Check Codex CLI installation + - `model:getAvailableModels` - List available models per provider + - `model:testOpenAI` - Test OpenAI API connection + - `model:updateCodexConfig` - Update Codex TOML config + +#### 6.3 Preload API Updates + +**File**: `app/electron/preload.js` + +- Expose new IPC methods to renderer +- Add TypeScript definitions in `app/src/types/electron.d.ts` + +### Phase 7: Error Handling & Fallbacks + +#### 7.1 Provider Fallback Logic + +- If Codex CLI not available, fallback to Claude +- If OpenAI API key missing, show clear error messages +- Graceful degradation when provider unavailable + +#### 7.2 Error Messages + +- User-friendly error messages for missing CLI +- Installation instructions per platform +- API key validation errors +- Model availability warnings + +## File Structure Summary + +### New Files + +``` +app/electron/services/ + ├── model-provider.js # Abstract provider interface + ├── claude-provider.js # Claude SDK wrapper + ├── codex-provider.js # Codex CLI wrapper + ├── codex-cli-detector.js # Codex CLI detection + ├── codex-executor.js # Codex CLI execution wrapper + ├── codex-config-manager.js # TOML config management + ├── model-registry.js # Centralized model definitions + ├── message-converter.js # Format conversion utilities + └── env-manager.js # Environment variable management + +app/src/app/api/openai/ + └── test/route.ts # OpenAI API test endpoint +``` + +### Modified Files + +``` +app/electron/services/ + ├── feature-executor.js # Use model provider abstraction + ├── agent-service.js # Support multiple providers + └── project-analyzer.js # Configurable model selection + +app/electron/ + ├── main.js # Add IPC handlers + └── preload.js # Expose new APIs + +app/src/ + ├── store/app-store.ts # Extended model types + ├── components/views/ + │ ├── board-view.tsx # Expanded model selection UI + │ └── settings-view.tsx # OpenAI API key & Codex CLI status + └── types/electron.d.ts # Updated IPC type definitions +``` + +## Implementation Details + +### Codex CLI Execution Pattern + +```javascript +// Example execution flow +const codexExecutor = require('./codex-executor'); +const result = await codexExecutor.execute({ + prompt: "Implement feature X", + model: "gpt-4o", + cwd: projectPath, + systemPrompt: "...", + maxTurns: 20, + allowedTools: ["Read", "Write", "Edit", "Bash"], + env: { OPENAI_API_KEY: process.env.OPENAI_API_KEY } +}); +``` + +### Model Provider Interface + +```javascript +class ModelProvider { + async executeQuery(options) { + // Returns async generator of messages + } + + async detectInstallation() { + // Returns installation status + } + + getAvailableModels() { + // Returns list of supported models + } + + validateConfig() { + // Validates provider configuration + } +} +``` + +### Configuration File Location + +- User config: `~/.config/codex/config.toml` (or platform equivalent) +- Project config: `.codex/config.toml` (optional, project-specific) +- Fallback: In-memory config passed via CLI args + +## Testing Considerations + +1. **CLI Detection**: Test on macOS, Linux, Windows +2. **Model Execution**: Test with different OpenAI models +3. **Error Handling**: Test missing CLI, invalid API keys, network errors +4. **Format Conversion**: Verify message format compatibility +5. **Concurrent Execution**: Test multiple features with different providers +6. **Fallback Logic**: Test provider fallback scenarios + +## Documentation Updates + +1. Update README with Codex CLI installation instructions: + + - `npm install -g @openai/codex@latest` or `brew install codex` + - ChatGPT Enterprise authentication (no API key needed) + - API-based authentication for older models + +2. Add model selection guide: + + - GPT-5.1 Codex Max (default, best for coding) + - o3/o4-mini with reasoning efforts + - GPT-5.1/GPT-5 with verbosity control + +3. Document reasoning effort and verbosity settings +4. Add troubleshooting section for common issues +5. Document model list discovery via MCP interface + +## Migration Path + +1. Implement provider abstraction alongside existing code +2. Add Codex support without breaking existing Claude functionality +3. Gradually migrate services to use abstraction layer +4. Maintain backward compatibility during transition +5. Remove hardcoded models after full migration \ No newline at end of file diff --git a/app/electron/main.js b/app/electron/main.js index 968c442b..9b0d744f 100644 --- a/app/electron/main.js +++ b/app/electron/main.js @@ -569,3 +569,82 @@ ipcMain.handle("claude:check-cli", async () => { return { success: false, error: error.message }; } }); + +// ============================================================================ +// Codex CLI Detection IPC Handlers +// ============================================================================ + +/** + * Check Codex CLI installation status + */ +ipcMain.handle("codex:check-cli", async () => { + try { + const codexCliDetector = require("./services/codex-cli-detector"); + const info = codexCliDetector.getInstallationInfo(); + return { success: true, ...info }; + } catch (error) { + console.error("[IPC] codex:check-cli error:", error); + return { success: false, error: error.message }; + } +}); + +/** + * Get all available models from all providers + */ +ipcMain.handle("model:get-available", async () => { + try { + const { ModelProviderFactory } = require("./services/model-provider"); + const models = ModelProviderFactory.getAllModels(); + return { success: true, models }; + } catch (error) { + console.error("[IPC] model:get-available error:", error); + return { success: false, error: error.message }; + } +}); + +/** + * Check all provider installation status + */ +ipcMain.handle("model:check-providers", async () => { + try { + const { ModelProviderFactory } = require("./services/model-provider"); + const status = await ModelProviderFactory.checkAllProviders(); + return { success: true, providers: status }; + } catch (error) { + console.error("[IPC] model:check-providers error:", error); + return { success: false, error: error.message }; + } +}); + +/** + * Test OpenAI API connection + */ +ipcMain.handle("openai:test-connection", async (_, { apiKey }) => { + try { + // Simple test using fetch to OpenAI API + const response = await fetch("https://api.openai.com/v1/models", { + method: "GET", + headers: { + "Authorization": `Bearer ${apiKey || process.env.OPENAI_API_KEY}`, + "Content-Type": "application/json" + } + }); + + if (response.ok) { + const data = await response.json(); + return { + success: true, + message: `Connected successfully. Found ${data.data?.length || 0} models.` + }; + } else { + const error = await response.json(); + return { + success: false, + error: error.error?.message || "Failed to connect to OpenAI API" + }; + } + } catch (error) { + console.error("[IPC] openai:test-connection error:", error); + return { success: false, error: error.message }; + } +}); diff --git a/app/electron/preload.js b/app/electron/preload.js index 11819dec..29b9b816 100644 --- a/app/electron/preload.js +++ b/app/electron/preload.js @@ -141,6 +141,22 @@ contextBridge.exposeInMainWorld("electronAPI", { // Claude CLI Detection API checkClaudeCli: () => ipcRenderer.invoke("claude:check-cli"), + + // Codex CLI Detection API + checkCodexCli: () => ipcRenderer.invoke("codex:check-cli"), + + // Model Management APIs + model: { + // Get all available models from all providers + getAvailable: () => ipcRenderer.invoke("model:get-available"), + + // Check all provider installation status + checkProviders: () => ipcRenderer.invoke("model:check-providers"), + }, + + // OpenAI API + testOpenAIConnection: (apiKey) => + ipcRenderer.invoke("openai:test-connection", { apiKey }), }); // Also expose a flag to detect if we're in Electron diff --git a/app/electron/services/codex-cli-detector.js b/app/electron/services/codex-cli-detector.js new file mode 100644 index 00000000..afba5c53 --- /dev/null +++ b/app/electron/services/codex-cli-detector.js @@ -0,0 +1,232 @@ +const { execSync } = require('child_process'); +const fs = require('fs'); +const path = require('path'); +const os = require('os'); + +/** + * Codex CLI Detector - Checks if OpenAI Codex CLI is installed + * + * Codex CLI is OpenAI's agent CLI tool that allows users to use + * GPT-5.1 Codex models (gpt-5.1-codex-max, gpt-5.1-codex, etc.) + * for code generation and agentic tasks. + */ +class CodexCliDetector { + /** + * Check if Codex CLI is installed and accessible + * @returns {Object} { installed: boolean, path: string|null, version: string|null, method: 'cli'|'npm'|'brew'|'none' } + */ + static detectCodexInstallation() { + try { + // Method 1: Check if 'codex' command is in PATH + try { + const codexPath = execSync('which codex 2>/dev/null', { encoding: 'utf-8' }).trim(); + if (codexPath) { + const version = this.getCodexVersion(codexPath); + return { + installed: true, + path: codexPath, + version: version, + method: 'cli' + }; + } + } catch (error) { + // CLI not in PATH, continue checking other methods + } + + // Method 2: Check for npm global installation + try { + const npmListOutput = execSync('npm list -g @openai/codex --depth=0 2>/dev/null', { encoding: 'utf-8' }); + if (npmListOutput && npmListOutput.includes('@openai/codex')) { + // Get the path from npm bin + const npmBinPath = execSync('npm bin -g', { encoding: 'utf-8' }).trim(); + const codexPath = path.join(npmBinPath, 'codex'); + const version = this.getCodexVersion(codexPath); + return { + installed: true, + path: codexPath, + version: version, + method: 'npm' + }; + } + } catch (error) { + // npm global not found + } + + // Method 3: Check for Homebrew installation on macOS + if (process.platform === 'darwin') { + try { + const brewList = execSync('brew list --formula 2>/dev/null', { encoding: 'utf-8' }); + if (brewList.includes('codex')) { + const brewPrefixOutput = execSync('brew --prefix codex 2>/dev/null', { encoding: 'utf-8' }).trim(); + const codexPath = path.join(brewPrefixOutput, 'bin', 'codex'); + const version = this.getCodexVersion(codexPath); + return { + installed: true, + path: codexPath, + version: version, + method: 'brew' + }; + } + } catch (error) { + // Homebrew not found or codex not installed via brew + } + } + + // Method 4: Check Windows path + if (process.platform === 'win32') { + try { + const codexPath = execSync('where codex 2>nul', { encoding: 'utf-8' }).trim().split('\n')[0]; + if (codexPath) { + const version = this.getCodexVersion(codexPath); + return { + installed: true, + path: codexPath, + version: version, + method: 'cli' + }; + } + } catch (error) { + // Not found on Windows + } + } + + // Method 5: Check common installation paths + const commonPaths = [ + path.join(os.homedir(), '.local', 'bin', 'codex'), + path.join(os.homedir(), '.npm-global', 'bin', 'codex'), + '/usr/local/bin/codex', + '/opt/homebrew/bin/codex', + ]; + + for (const checkPath of commonPaths) { + if (fs.existsSync(checkPath)) { + const version = this.getCodexVersion(checkPath); + return { + installed: true, + path: checkPath, + version: version, + method: 'cli' + }; + } + } + + // Method 6: Check if OPENAI_API_KEY is set (can use Codex API directly) + if (process.env.OPENAI_API_KEY) { + return { + installed: false, + path: null, + version: null, + method: 'api-key-only', + hasApiKey: true + }; + } + + return { + installed: false, + path: null, + version: null, + method: 'none' + }; + } catch (error) { + console.error('[CodexCliDetector] Error detecting Codex installation:', error); + return { + installed: false, + path: null, + version: null, + method: 'none', + error: error.message + }; + } + } + + /** + * Get Codex CLI version from executable path + * @param {string} codexPath Path to codex executable + * @returns {string|null} Version string or null + */ + static getCodexVersion(codexPath) { + try { + const version = execSync(`"${codexPath}" --version 2>/dev/null`, { encoding: 'utf-8' }).trim(); + return version || null; + } catch (error) { + return null; + } + } + + /** + * Get installation info and recommendations + * @returns {Object} Installation status and recommendations + */ + static getInstallationInfo() { + const detection = this.detectCodexInstallation(); + + if (detection.installed) { + return { + status: 'installed', + method: detection.method, + version: detection.version, + path: detection.path, + recommendation: detection.method === 'cli' + ? 'Using Codex CLI - ready for GPT-5.1 Codex models' + : `Using Codex CLI via ${detection.method} - ready for GPT-5.1 Codex models` + }; + } + + // Not installed but has API key + if (detection.method === 'api-key-only') { + return { + status: 'api_key_only', + method: 'api-key-only', + recommendation: 'OPENAI_API_KEY detected but Codex CLI not installed. Install Codex CLI for full agentic capabilities.', + installCommands: this.getInstallCommands() + }; + } + + return { + status: 'not_installed', + recommendation: 'Install OpenAI Codex CLI to use GPT-5.1 Codex models for agentic tasks', + installCommands: this.getInstallCommands() + }; + } + + /** + * Get installation commands for different platforms + * @returns {Object} Installation commands by platform + */ + static getInstallCommands() { + return { + npm: 'npm install -g @openai/codex@latest', + macos: 'brew install codex', + linux: 'npm install -g @openai/codex@latest', + windows: 'npm install -g @openai/codex@latest' + }; + } + + /** + * Check if Codex CLI supports a specific model + * @param {string} model Model name to check + * @returns {boolean} Whether the model is supported + */ + static isModelSupported(model) { + const supportedModels = [ + 'gpt-5.1-codex-max', + 'gpt-5.1-codex', + 'gpt-5.1-codex-mini', + 'gpt-5.1', + 'o3', + 'o3-mini', + 'o4-mini' + ]; + return supportedModels.includes(model); + } + + /** + * Get default model for Codex CLI + * @returns {string} Default model name + */ + static getDefaultModel() { + return 'gpt-5.1-codex-max'; + } +} + +module.exports = CodexCliDetector; diff --git a/app/electron/services/codex-executor.js b/app/electron/services/codex-executor.js new file mode 100644 index 00000000..eae4fe4a --- /dev/null +++ b/app/electron/services/codex-executor.js @@ -0,0 +1,585 @@ +/** + * Codex CLI Execution Wrapper + * + * This module handles spawning and managing Codex CLI processes + * for executing OpenAI model queries. + */ + +const { spawn } = require('child_process'); +const { EventEmitter } = require('events'); +const readline = require('readline'); +const CodexCliDetector = require('./codex-cli-detector'); + +/** + * Message types from Codex CLI JSON output + */ +const CODEX_EVENT_TYPES = { + THREAD_STARTED: 'thread.started', + ITEM_STARTED: 'item.started', + ITEM_COMPLETED: 'item.completed', + THREAD_COMPLETED: 'thread.completed', + ERROR: 'error' +}; + +/** + * Codex Executor - Manages Codex CLI process execution + */ +class CodexExecutor extends EventEmitter { + constructor() { + super(); + this.currentProcess = null; + this.codexPath = null; + } + + /** + * Find and cache the Codex CLI path + * @returns {string|null} Path to codex executable + */ + findCodexPath() { + if (this.codexPath) { + return this.codexPath; + } + + const installation = CodexCliDetector.detectCodexInstallation(); + if (installation.installed && installation.path) { + this.codexPath = installation.path; + return this.codexPath; + } + + return null; + } + + /** + * Execute a Codex CLI query + * @param {Object} options Execution options + * @param {string} options.prompt The prompt to execute + * @param {string} options.model Model to use (default: gpt-5.1-codex-max) + * @param {string} options.cwd Working directory + * @param {string} options.systemPrompt System prompt (optional, will be prepended to prompt) + * @param {number} options.maxTurns Not used - Codex CLI doesn't support this parameter + * @param {string[]} options.allowedTools Not used - Codex CLI doesn't support this parameter + * @param {Object} options.env Environment variables + * @returns {AsyncGenerator} Generator yielding messages + */ + async *execute(options) { + const { + prompt, + model = 'gpt-5.1-codex-max', + cwd = process.cwd(), + systemPrompt, + maxTurns, // Not used by Codex CLI + allowedTools, // Not used by Codex CLI + env = {} + } = options; + + const codexPath = this.findCodexPath(); + if (!codexPath) { + yield { + type: 'error', + error: 'Codex CLI not found. Please install it with: npm install -g @openai/codex@latest' + }; + return; + } + + // Combine system prompt with main prompt if provided + // Codex CLI doesn't support --system-prompt argument, so we prepend it to the prompt + let combinedPrompt = prompt; + console.log('[CodexExecutor] Original prompt length:', prompt?.length || 0); + if (systemPrompt) { + combinedPrompt = `${systemPrompt}\n\n---\n\n${prompt}`; + console.log('[CodexExecutor] System prompt prepended to main prompt'); + console.log('[CodexExecutor] System prompt length:', systemPrompt.length); + console.log('[CodexExecutor] Combined prompt length:', combinedPrompt.length); + } + + // Build command arguments + // Note: maxTurns and allowedTools are not supported by Codex CLI + console.log('[CodexExecutor] Building command arguments...'); + const args = this.buildArgs({ + prompt: combinedPrompt, + model + }); + + console.log('[CodexExecutor] Executing command:', codexPath); + console.log('[CodexExecutor] Number of args:', args.length); + console.log('[CodexExecutor] Args (without prompt):', args.slice(0, -1).join(' ')); + console.log('[CodexExecutor] Prompt length in args:', args[args.length - 1]?.length || 0); + console.log('[CodexExecutor] Prompt preview (first 200 chars):', args[args.length - 1]?.substring(0, 200)); + console.log('[CodexExecutor] Working directory:', cwd); + + // Spawn the process + const processEnv = { + ...process.env, + ...env, + // Ensure OPENAI_API_KEY is available + OPENAI_API_KEY: env.OPENAI_API_KEY || process.env.OPENAI_API_KEY + }; + + // Log API key status (without exposing the key) + if (processEnv.OPENAI_API_KEY) { + console.log('[CodexExecutor] OPENAI_API_KEY is set (length:', processEnv.OPENAI_API_KEY.length, ')'); + } else { + console.warn('[CodexExecutor] WARNING: OPENAI_API_KEY is not set!'); + } + + console.log('[CodexExecutor] Spawning process...'); + const proc = spawn(codexPath, args, { + cwd, + env: processEnv, + stdio: ['pipe', 'pipe', 'pipe'] + }); + + this.currentProcess = proc; + console.log('[CodexExecutor] Process spawned with PID:', proc.pid); + + // Track process events + proc.on('error', (error) => { + console.error('[CodexExecutor] Process error:', error); + }); + + proc.on('spawn', () => { + console.log('[CodexExecutor] Process spawned successfully'); + }); + + // Collect stderr output as it comes in + let stderr = ''; + let hasOutput = false; + let stdoutChunks = []; + let stderrChunks = []; + + proc.stderr.on('data', (data) => { + const errorText = data.toString(); + stderr += errorText; + stderrChunks.push(errorText); + hasOutput = true; + console.error('[CodexExecutor] stderr chunk received (', data.length, 'bytes):', errorText.substring(0, 200)); + }); + + proc.stderr.on('end', () => { + console.log('[CodexExecutor] stderr stream ended. Total chunks:', stderrChunks.length, 'Total length:', stderr.length); + }); + + proc.stdout.on('data', (data) => { + const text = data.toString(); + stdoutChunks.push(text); + hasOutput = true; + console.log('[CodexExecutor] stdout chunk received (', data.length, 'bytes):', text.substring(0, 200)); + }); + + proc.stdout.on('end', () => { + console.log('[CodexExecutor] stdout stream ended. Total chunks:', stdoutChunks.length); + }); + + // Create readline interface for parsing JSONL output + console.log('[CodexExecutor] Creating readline interface...'); + const rl = readline.createInterface({ + input: proc.stdout, + crlfDelay: Infinity + }); + + // Track accumulated content for converting to Claude format + let accumulatedText = ''; + let toolUses = []; + let lastOutputTime = Date.now(); + const OUTPUT_TIMEOUT = 30000; // 30 seconds timeout for no output + let lineCount = 0; + let jsonParseErrors = 0; + + // Set up timeout check + const checkTimeout = setInterval(() => { + const timeSinceLastOutput = Date.now() - lastOutputTime; + if (timeSinceLastOutput > OUTPUT_TIMEOUT && !hasOutput) { + console.warn('[CodexExecutor] No output received for', timeSinceLastOutput, 'ms. Process still alive:', !proc.killed); + } + }, 5000); + + console.log('[CodexExecutor] Starting to read lines from stdout...'); + + // Process stdout line by line (JSONL format) + try { + for await (const line of rl) { + hasOutput = true; + lastOutputTime = Date.now(); + lineCount++; + + console.log('[CodexExecutor] Line', lineCount, 'received (length:', line.length, '):', line.substring(0, 100)); + + if (!line.trim()) { + console.log('[CodexExecutor] Skipping empty line'); + continue; + } + + try { + const event = JSON.parse(line); + console.log('[CodexExecutor] Successfully parsed JSON event. Type:', event.type, 'Keys:', Object.keys(event)); + + const convertedMsg = this.convertToClaudeFormat(event); + console.log('[CodexExecutor] Converted message:', convertedMsg ? { type: convertedMsg.type } : 'null'); + + if (convertedMsg) { + // Accumulate text content + if (convertedMsg.type === 'assistant' && convertedMsg.message?.content) { + for (const block of convertedMsg.message.content) { + if (block.type === 'text') { + accumulatedText += block.text; + console.log('[CodexExecutor] Accumulated text block (total length:', accumulatedText.length, ')'); + } else if (block.type === 'tool_use') { + toolUses.push(block); + console.log('[CodexExecutor] Tool use detected:', block.name); + } + } + } + console.log('[CodexExecutor] Yielding message of type:', convertedMsg.type); + yield convertedMsg; + } else { + console.log('[CodexExecutor] Converted message is null, skipping'); + } + } catch (parseError) { + jsonParseErrors++; + // Non-JSON output, yield as text + console.log('[CodexExecutor] JSON parse error (', jsonParseErrors, 'total):', parseError.message); + console.log('[CodexExecutor] Non-JSON line content:', line.substring(0, 200)); + yield { + type: 'assistant', + message: { + content: [{ type: 'text', text: line + '\n' }] + } + }; + } + } + + console.log('[CodexExecutor] Finished reading all lines. Total lines:', lineCount, 'JSON errors:', jsonParseErrors); + } catch (readError) { + console.error('[CodexExecutor] Error reading from readline:', readError); + throw readError; + } finally { + clearInterval(checkTimeout); + console.log('[CodexExecutor] Cleaned up timeout checker'); + } + + // Handle process completion + console.log('[CodexExecutor] Waiting for process to close...'); + const exitCode = await new Promise((resolve) => { + proc.on('close', (code, signal) => { + console.log('[CodexExecutor] Process closed with code:', code, 'signal:', signal); + resolve(code); + }); + }); + + this.currentProcess = null; + console.log('[CodexExecutor] Process completed. Exit code:', exitCode, 'Has output:', hasOutput, 'Stderr length:', stderr.length); + + // Wait a bit for any remaining stderr data to be collected + console.log('[CodexExecutor] Waiting 200ms for any remaining stderr data...'); + await new Promise(resolve => setTimeout(resolve, 200)); + console.log('[CodexExecutor] Final stderr length:', stderr.length, 'Final stdout chunks:', stdoutChunks.length); + + if (exitCode !== 0) { + const errorMessage = stderr.trim() + ? `Codex CLI exited with code ${exitCode}.\n\nError output:\n${stderr}` + : `Codex CLI exited with code ${exitCode}. No error output captured.`; + + console.error('[CodexExecutor] Process failed with exit code', exitCode); + console.error('[CodexExecutor] Error message:', errorMessage); + console.error('[CodexExecutor] Stderr chunks:', stderrChunks.length, 'Stdout chunks:', stdoutChunks.length); + + yield { + type: 'error', + error: errorMessage + }; + } else if (!hasOutput && !stderr) { + // Process exited successfully but produced no output - might be API key issue + const warningMessage = 'Codex CLI completed but produced no output. This might indicate:\n' + + '- Missing or invalid OPENAI_API_KEY\n' + + '- Codex CLI configuration issue\n' + + '- The process completed without generating any response\n\n' + + `Debug info: Exit code ${exitCode}, stdout chunks: ${stdoutChunks.length}, stderr chunks: ${stderrChunks.length}, lines read: ${lineCount}`; + + console.warn('[CodexExecutor] No output detected:', warningMessage); + console.warn('[CodexExecutor] Stdout chunks:', stdoutChunks); + console.warn('[CodexExecutor] Stderr chunks:', stderrChunks); + + yield { + type: 'error', + error: warningMessage + }; + } else { + console.log('[CodexExecutor] Process completed successfully. Exit code:', exitCode, 'Lines processed:', lineCount); + } + } + + /** + * Build command arguments for Codex CLI + * Only includes supported arguments based on Codex CLI help: + * - --model: Model to use + * - --json: JSON output format + * - --full-auto: Non-interactive automatic execution + * + * Note: Codex CLI does NOT support: + * - --system-prompt (system prompt is prepended to main prompt) + * - --max-turns (not available in CLI) + * - --tools (not available in CLI) + * + * @param {Object} options Options + * @returns {string[]} Command arguments + */ + buildArgs(options) { + const { prompt, model } = options; + + console.log('[CodexExecutor] buildArgs called with model:', model, 'prompt length:', prompt?.length || 0); + + const args = ['exec']; + + // Add model (required for most use cases) + if (model) { + args.push('--model', model); + console.log('[CodexExecutor] Added model argument:', model); + } + + // Add JSON output flag for structured parsing + args.push('--json'); + console.log('[CodexExecutor] Added --json flag'); + + // Add full-auto mode (non-interactive) + // This enables automatic execution with workspace-write sandbox + args.push('--full-auto'); + console.log('[CodexExecutor] Added --full-auto flag'); + + // Add the prompt at the end + args.push(prompt); + console.log('[CodexExecutor] Added prompt (length:', prompt?.length || 0, ')'); + + console.log('[CodexExecutor] Final args count:', args.length); + return args; + } + + /** + * Map Claude tool names to Codex tool names + * @param {string[]} tools Array of tool names + * @returns {string[]} Mapped tool names + */ + mapToolsToCodex(tools) { + const toolMap = { + 'Read': 'read', + 'Write': 'write', + 'Edit': 'edit', + 'Bash': 'bash', + 'Glob': 'glob', + 'Grep': 'grep', + 'WebSearch': 'web-search', + 'WebFetch': 'web-fetch' + }; + + return tools + .map(tool => toolMap[tool] || tool.toLowerCase()) + .filter(tool => tool); // Remove undefined + } + + /** + * Convert Codex JSONL event to Claude SDK message format + * @param {Object} event Codex event object + * @returns {Object|null} Claude-format message or null + */ + convertToClaudeFormat(event) { + console.log('[CodexExecutor] Converting event:', JSON.stringify(event).substring(0, 200)); + const { type, data, item, thread_id } = event; + + switch (type) { + case CODEX_EVENT_TYPES.THREAD_STARTED: + case 'thread.started': + // Session initialization + return { + type: 'session_start', + sessionId: thread_id || data?.thread_id || event.thread_id + }; + + case CODEX_EVENT_TYPES.ITEM_COMPLETED: + case 'item.completed': + // Codex uses 'item' field, not 'data' + return this.convertItemCompleted(item || data); + + case CODEX_EVENT_TYPES.ITEM_STARTED: + case 'item.started': + // Convert item.started events - these indicate tool/command usage + const startedItem = item || data; + if (startedItem?.type === 'command_execution' && startedItem?.command) { + return { + type: 'assistant', + message: { + content: [{ + type: 'tool_use', + name: 'bash', + input: { command: startedItem.command } + }] + } + }; + } + // For other item.started types, return null (we'll show the completed version) + return null; + + case CODEX_EVENT_TYPES.THREAD_COMPLETED: + case 'thread.completed': + return { + type: 'complete', + sessionId: thread_id || data?.thread_id || event.thread_id + }; + + case CODEX_EVENT_TYPES.ERROR: + case 'error': + return { + type: 'error', + error: data?.message || item?.message || event.message || 'Unknown error from Codex CLI' + }; + + case 'turn.started': + // Turn started - just a marker, no need to convert + return null; + + default: + // Pass through other events + console.log('[CodexExecutor] Unhandled event type:', type); + return null; + } + } + + /** + * Convert item.completed event to Claude format + * @param {Object} item Event item data + * @returns {Object|null} Claude-format message + */ + convertItemCompleted(item) { + if (!item) { + console.log('[CodexExecutor] convertItemCompleted: item is null/undefined'); + return null; + } + + const itemType = item.type || item.item_type; + console.log('[CodexExecutor] convertItemCompleted: itemType =', itemType, 'item keys:', Object.keys(item)); + + switch (itemType) { + case 'reasoning': + // Thinking/reasoning output - Codex uses 'text' field + const reasoningText = item.text || item.content || ''; + console.log('[CodexExecutor] Converting reasoning, text length:', reasoningText.length); + return { + type: 'assistant', + message: { + content: [{ + type: 'thinking', + thinking: reasoningText + }] + } + }; + + case 'agent_message': + case 'message': + // Assistant text message + const messageText = item.content || item.text || ''; + console.log('[CodexExecutor] Converting message, text length:', messageText.length); + return { + type: 'assistant', + message: { + content: [{ + type: 'text', + text: messageText + }] + } + }; + + case 'command_execution': + // Command execution - show both the command and its output + const command = item.command || ''; + const output = item.aggregated_output || item.output || ''; + console.log('[CodexExecutor] Converting command_execution, command:', command.substring(0, 50), 'output length:', output.length); + + // Return as text message showing the command and output + return { + type: 'assistant', + message: { + content: [{ + type: 'text', + text: `\`\`\`bash\n${command}\n\`\`\`\n\n${output}` + }] + } + }; + + case 'tool_use': + // Tool use + return { + type: 'assistant', + message: { + content: [{ + type: 'tool_use', + name: item.tool || item.command || 'unknown', + input: item.input || item.args || {} + }] + } + }; + + case 'tool_result': + // Tool result + return { + type: 'tool_result', + tool_use_id: item.tool_use_id, + content: item.output || item.result + }; + + case 'todo_list': + // Todo list - convert to text format + const todos = item.items || []; + const todoText = todos.map((t, i) => `${i + 1}. ${t.text || t}`).join('\n'); + console.log('[CodexExecutor] Converting todo_list, items:', todos.length); + return { + type: 'assistant', + message: { + content: [{ + type: 'text', + text: `**Todo List:**\n${todoText}` + }] + } + }; + + default: + // Generic text output + const text = item.text || item.content || item.aggregated_output; + if (text) { + console.log('[CodexExecutor] Converting default item type, text length:', text.length); + return { + type: 'assistant', + message: { + content: [{ + type: 'text', + text: String(text) + }] + } + }; + } + console.log('[CodexExecutor] convertItemCompleted: No text content found, returning null'); + return null; + } + } + + /** + * Abort current execution + */ + abort() { + if (this.currentProcess) { + console.log('[CodexExecutor] Aborting current process'); + this.currentProcess.kill('SIGTERM'); + this.currentProcess = null; + } + } + + /** + * Check if execution is in progress + * @returns {boolean} Whether execution is in progress + */ + isRunning() { + return this.currentProcess !== null; + } +} + +// Singleton instance +const codexExecutor = new CodexExecutor(); + +module.exports = codexExecutor; diff --git a/app/electron/services/feature-executor.js b/app/electron/services/feature-executor.js index 6899d8c5..e6de295e 100644 --- a/app/electron/services/feature-executor.js +++ b/app/electron/services/feature-executor.js @@ -3,10 +3,12 @@ const promptBuilder = require("./prompt-builder"); const contextManager = require("./context-manager"); const featureLoader = require("./feature-loader"); const mcpServerFactory = require("./mcp-server-factory"); +const { ModelRegistry } = require("./model-registry"); +const { ModelProviderFactory } = require("./model-provider"); -// Model name mappings +// Model name mappings for Claude (legacy - kept for backwards compatibility) const MODEL_MAP = { - haiku: "claude-haiku-4-20250514", + haiku: "claude-haiku-4-5", sonnet: "claude-sonnet-4-20250514", opus: "claude-opus-4-5-20251101", }; @@ -23,20 +25,47 @@ const THINKING_BUDGET_MAP = { /** * Feature Executor - Handles feature implementation using Claude Agent SDK + * Now supports multiple model providers (Claude, Codex/OpenAI) */ class FeatureExecutor { /** * Get the model string based on feature's model setting + * Supports both Claude and Codex/OpenAI models */ getModelString(feature) { const modelKey = feature.model || "opus"; // Default to opus - return MODEL_MAP[modelKey] || MODEL_MAP.opus; + + // Use the registry for model lookup + const modelString = ModelRegistry.getModelString(modelKey); + return modelString || MODEL_MAP[modelKey] || MODEL_MAP.opus; + } + + /** + * Determine if the feature uses a Codex/OpenAI model + */ + isCodexModel(feature) { + const modelKey = feature.model || "opus"; + return ModelRegistry.isCodexModel(modelKey); + } + + /** + * Get the appropriate provider for the feature's model + */ + getProvider(feature) { + const modelKey = feature.model || "opus"; + return ModelProviderFactory.getProviderForModel(modelKey); } /** * Get thinking configuration based on feature's thinkingLevel */ getThinkingConfig(feature) { + const modelId = feature.model || "opus"; + // Skip thinking config for models that don't support it (e.g., Codex CLI) + if (!ModelRegistry.modelSupportsThinking(modelId)) { + return null; + } + const level = feature.thinkingLevel || "none"; const budgetTokens = THINKING_BUDGET_MAP[level]; @@ -109,6 +138,11 @@ class FeatureExecutor { async implementFeature(feature, projectPath, sendToRenderer, execution) { console.log(`[FeatureExecutor] Implementing: ${feature.description}`); + // Declare variables outside try block so they're available in catch + let modelString; + let providerName; + let isCodex; + try { // ======================================== // PHASE 1: PLANNING @@ -161,7 +195,14 @@ class FeatureExecutor { }); } - console.log(`[FeatureExecutor] Using model: ${modelString}, thinking: ${feature.thinkingLevel || 'none'}`); + providerName = this.isCodexModel(feature) ? 'Codex/OpenAI' : 'Claude'; + console.log(`[FeatureExecutor] Using provider: ${providerName}, model: ${modelString}, thinking: ${feature.thinkingLevel || 'none'}`); + + // Note: Claude Agent SDK handles authentication automatically - it can use: + // 1. CLAUDE_CODE_OAUTH_TOKEN env var (for SDK mode) + // 2. Claude CLI's own authentication (if CLI is installed) + // 3. ANTHROPIC_API_KEY (fallback) + // We don't need to validate here - let the SDK/CLI handle auth errors // Configure options for the SDK query const options = { @@ -224,8 +265,31 @@ class FeatureExecutor { }); console.log(`[FeatureExecutor] Phase: ACTION for ${feature.description}`); - // Send query - const currentQuery = query({ prompt, options }); + // Send query - use appropriate provider based on model + let currentQuery; + isCodex = this.isCodexModel(feature); + + if (isCodex) { + // Use Codex provider for OpenAI models + console.log(`[FeatureExecutor] Using Codex provider for model: ${modelString}`); + const provider = this.getProvider(feature); + currentQuery = provider.executeQuery({ + prompt, + model: modelString, + cwd: projectPath, + systemPrompt: promptBuilder.getCodingPrompt(), + maxTurns: 20, // Codex CLI typically uses fewer turns + allowedTools: options.allowedTools, + abortController: abortController, + env: { + OPENAI_API_KEY: process.env.OPENAI_API_KEY + } + }); + } else { + // Use Claude SDK (original implementation) + currentQuery = query({ prompt, options }); + } + execution.query = currentQuery; // Stream responses @@ -235,6 +299,18 @@ class FeatureExecutor { // Check if this specific feature was aborted if (!execution.isActive()) break; + // Handle error messages + if (msg.type === "error") { + const errorMsg = `\n❌ Error: ${msg.error}\n`; + await contextManager.writeToContextFile(projectPath, feature.id, errorMsg); + sendToRenderer({ + type: "auto_mode_error", + featureId: feature.id, + error: msg.error, + }); + throw new Error(msg.error); + } + if (msg.type === "assistant" && msg.message?.content) { for (const block of msg.message.content) { if (block.type === "text") { @@ -249,6 +325,15 @@ class FeatureExecutor { featureId: feature.id, content: block.text, }); + } else if (block.type === "thinking") { + // Handle thinking output from Codex O-series models + const thinkingMsg = `\n💭 Thinking: ${block.thinking?.substring(0, 200)}...\n`; + await contextManager.writeToContextFile(projectPath, feature.id, thinkingMsg); + sendToRenderer({ + type: "auto_mode_progress", + featureId: feature.id, + content: thinkingMsg, + }); } else if (block.type === "tool_use") { // First tool use indicates we're actively implementing if (!hasStartedToolUse) { @@ -341,6 +426,45 @@ class FeatureExecutor { } console.error("[FeatureExecutor] Error implementing feature:", error); + + // Safely get model info for error logging (may not be set if error occurred early) + const modelInfo = modelString ? { + message: error.message, + stack: error.stack, + name: error.name, + code: error.code, + model: modelString, + provider: providerName || 'unknown', + isCodex: isCodex !== undefined ? isCodex : 'unknown' + } : { + message: error.message, + stack: error.stack, + name: error.name, + code: error.code, + model: 'not initialized', + provider: 'unknown', + isCodex: 'unknown' + }; + + console.error("[FeatureExecutor] Error details:", modelInfo); + + // Check if this is a Claude CLI process error + if (error.message && error.message.includes("process exited with code")) { + const modelDisplay = modelString ? `Model: ${modelString}` : 'Model: not initialized'; + const errorMsg = `Claude Code CLI failed with exit code 1. This might be due to:\n` + + `- Invalid or unsupported model (${modelDisplay})\n` + + `- Missing or invalid CLAUDE_CODE_OAUTH_TOKEN\n` + + `- Claude CLI configuration issue\n` + + `- Model not available in your Claude account\n\n` + + `Original error: ${error.message}`; + + await contextManager.writeToContextFile(projectPath, feature.id, `\n❌ ${errorMsg}\n`); + sendToRenderer({ + type: "auto_mode_error", + featureId: feature.id, + error: errorMsg, + }); + } // Clean up if (execution) { diff --git a/app/electron/services/model-provider.js b/app/electron/services/model-provider.js new file mode 100644 index 00000000..084c0312 --- /dev/null +++ b/app/electron/services/model-provider.js @@ -0,0 +1,414 @@ +/** + * Model Provider Abstraction Layer + * + * This module provides an abstract interface for model providers (Claude, Codex, etc.) + * allowing the application to use different AI models through a unified API. + */ + +/** + * Base class for model providers + * Concrete implementations should extend this class + */ +class ModelProvider { + constructor(config = {}) { + this.config = config; + this.name = 'base'; + } + + /** + * Get provider name + * @returns {string} Provider name + */ + getName() { + return this.name; + } + + /** + * Execute a query with the model provider + * @param {Object} options Query options + * @param {string} options.prompt The prompt to send + * @param {string} options.model The model to use + * @param {string} options.systemPrompt System prompt + * @param {string} options.cwd Working directory + * @param {number} options.maxTurns Maximum turns + * @param {string[]} options.allowedTools Allowed tools + * @param {Object} options.mcpServers MCP servers configuration + * @param {AbortController} options.abortController Abort controller + * @param {Object} options.thinking Thinking configuration + * @returns {AsyncGenerator} Async generator yielding messages + */ + async *executeQuery(options) { + throw new Error('executeQuery must be implemented by subclass'); + } + + /** + * Detect if this provider's CLI/SDK is installed + * @returns {Promise} Installation status + */ + async detectInstallation() { + throw new Error('detectInstallation must be implemented by subclass'); + } + + /** + * Get list of available models for this provider + * @returns {Array} Array of model definitions + */ + getAvailableModels() { + throw new Error('getAvailableModels must be implemented by subclass'); + } + + /** + * Validate provider configuration + * @returns {Object} Validation result { valid: boolean, errors: string[] } + */ + validateConfig() { + throw new Error('validateConfig must be implemented by subclass'); + } + + /** + * Get the full model string for a model key + * @param {string} modelKey Short model key (e.g., 'opus', 'gpt-5.1-codex') + * @returns {string} Full model string + */ + getModelString(modelKey) { + throw new Error('getModelString must be implemented by subclass'); + } + + /** + * Check if provider supports a specific feature + * @param {string} feature Feature name (e.g., 'thinking', 'tools', 'streaming') + * @returns {boolean} Whether the feature is supported + */ + supportsFeature(feature) { + return false; + } +} + +/** + * Claude Provider - Uses Anthropic Claude Agent SDK + */ +class ClaudeProvider extends ModelProvider { + constructor(config = {}) { + super(config); + this.name = 'claude'; + this.sdk = null; + } + + /** + * Lazily load the Claude SDK + */ + loadSdk() { + if (!this.sdk) { + this.sdk = require('@anthropic-ai/claude-agent-sdk'); + } + return this.sdk; + } + + async *executeQuery(options) { + const { query } = this.loadSdk(); + + const sdkOptions = { + model: options.model, + systemPrompt: options.systemPrompt, + maxTurns: options.maxTurns || 1000, + cwd: options.cwd, + mcpServers: options.mcpServers, + allowedTools: options.allowedTools, + permissionMode: options.permissionMode || 'acceptEdits', + sandbox: options.sandbox, + abortController: options.abortController, + }; + + // Add thinking configuration if enabled + if (options.thinking) { + sdkOptions.thinking = options.thinking; + } + + const currentQuery = query({ prompt: options.prompt, options: sdkOptions }); + + for await (const msg of currentQuery) { + yield msg; + } + } + + async detectInstallation() { + const claudeCliDetector = require('./claude-cli-detector'); + return claudeCliDetector.getInstallationInfo(); + } + + getAvailableModels() { + return [ + { + id: 'haiku', + name: 'Claude Haiku', + modelString: 'claude-haiku-4-5', + provider: 'claude', + description: 'Fast and efficient for simple tasks', + tier: 'basic' + }, + { + id: 'sonnet', + name: 'Claude Sonnet', + modelString: 'claude-sonnet-4-20250514', + provider: 'claude', + description: 'Balanced performance and capabilities', + tier: 'standard' + }, + { + id: 'opus', + name: 'Claude Opus 4.5', + modelString: 'claude-opus-4-5-20251101', + provider: 'claude', + description: 'Most capable model for complex tasks', + tier: 'premium' + } + ]; + } + + validateConfig() { + const errors = []; + + // Check for OAuth token or API key + if (!process.env.CLAUDE_CODE_OAUTH_TOKEN && !process.env.ANTHROPIC_API_KEY) { + errors.push('No Claude authentication found. Set CLAUDE_CODE_OAUTH_TOKEN or ANTHROPIC_API_KEY.'); + } + + return { + valid: errors.length === 0, + errors + }; + } + + getModelString(modelKey) { + const modelMap = { + haiku: 'claude-haiku-4-5', + sonnet: 'claude-sonnet-4-20250514', + opus: 'claude-opus-4-5-20251101' + }; + return modelMap[modelKey] || modelMap.opus; + } + + supportsFeature(feature) { + const supportedFeatures = ['thinking', 'tools', 'streaming', 'mcp']; + return supportedFeatures.includes(feature); + } +} + +/** + * Codex Provider - Uses OpenAI Codex CLI + */ +class CodexProvider extends ModelProvider { + constructor(config = {}) { + super(config); + this.name = 'codex'; + } + + async *executeQuery(options) { + const codexExecutor = require('./codex-executor'); + + const executeOptions = { + prompt: options.prompt, + model: options.model, + cwd: options.cwd, + systemPrompt: options.systemPrompt, + maxTurns: options.maxTurns || 20, + allowedTools: options.allowedTools, + env: { + ...process.env, + OPENAI_API_KEY: process.env.OPENAI_API_KEY + } + }; + + // Execute and yield results + const generator = codexExecutor.execute(executeOptions); + for await (const msg of generator) { + yield msg; + } + } + + async detectInstallation() { + const codexCliDetector = require('./codex-cli-detector'); + return codexCliDetector.getInstallationInfo(); + } + + getAvailableModels() { + return [ + { + id: 'gpt-5.1-codex-max', + name: 'GPT-5.1 Codex Max', + modelString: 'gpt-5.1-codex-max', + provider: 'codex', + description: 'Latest flagship - deep and fast reasoning for coding', + tier: 'premium', + default: true + }, + { + id: 'gpt-5.1-codex', + name: 'GPT-5.1 Codex', + modelString: 'gpt-5.1-codex', + provider: 'codex', + description: 'Optimized for code generation', + tier: 'standard' + }, + { + id: 'gpt-5.1-codex-mini', + name: 'GPT-5.1 Codex Mini', + modelString: 'gpt-5.1-codex-mini', + provider: 'codex', + description: 'Faster and cheaper option', + tier: 'basic' + }, + { + id: 'gpt-5.1', + name: 'GPT-5.1', + modelString: 'gpt-5.1', + provider: 'codex', + description: 'Broad world knowledge with strong reasoning', + tier: 'standard' + }, + { + id: 'o3', + name: 'O3', + modelString: 'o3', + provider: 'codex', + description: 'Advanced reasoning model', + tier: 'premium' + }, + { + id: 'o3-mini', + name: 'O3 Mini', + modelString: 'o3-mini', + provider: 'codex', + description: 'Efficient reasoning model', + tier: 'standard' + } + ]; + } + + validateConfig() { + const errors = []; + const codexCliDetector = require('./codex-cli-detector'); + const installation = codexCliDetector.detectCodexInstallation(); + + if (!installation.installed && !process.env.OPENAI_API_KEY) { + errors.push('Codex CLI not installed and no OPENAI_API_KEY found.'); + } + + return { + valid: errors.length === 0, + errors + }; + } + + getModelString(modelKey) { + // Codex models use the key directly as the model string + const modelMap = { + 'gpt-5.1-codex-max': 'gpt-5.1-codex-max', + 'gpt-5.1-codex': 'gpt-5.1-codex', + 'gpt-5.1-codex-mini': 'gpt-5.1-codex-mini', + 'gpt-5.1': 'gpt-5.1', + 'o3': 'o3', + 'o3-mini': 'o3-mini', + 'o4-mini': 'o4-mini', + 'gpt-4o': 'gpt-4o', + 'gpt-4o-mini': 'gpt-4o-mini' + }; + return modelMap[modelKey] || 'gpt-5.1-codex-max'; + } + + supportsFeature(feature) { + const supportedFeatures = ['tools', 'streaming']; + return supportedFeatures.includes(feature); + } +} + +/** + * Model Provider Factory + * Creates the appropriate provider based on model or provider name + */ +class ModelProviderFactory { + static providers = { + claude: ClaudeProvider, + codex: CodexProvider + }; + + /** + * Get provider for a specific model + * @param {string} modelId Model ID (e.g., 'opus', 'gpt-5.1-codex') + * @returns {ModelProvider} Provider instance + */ + static getProviderForModel(modelId) { + // Check if it's a Claude model + const claudeModels = ['haiku', 'sonnet', 'opus']; + if (claudeModels.includes(modelId)) { + return new ClaudeProvider(); + } + + // Check if it's a Codex/OpenAI model + const codexModels = [ + 'gpt-5.1-codex-max', 'gpt-5.1-codex', 'gpt-5.1-codex-mini', 'gpt-5.1', + 'o3', 'o3-mini', 'o4-mini', 'gpt-4o', 'gpt-4o-mini' + ]; + if (codexModels.includes(modelId)) { + return new CodexProvider(); + } + + // Default to Claude + return new ClaudeProvider(); + } + + /** + * Get provider by name + * @param {string} providerName Provider name ('claude' or 'codex') + * @returns {ModelProvider} Provider instance + */ + static getProvider(providerName) { + const ProviderClass = this.providers[providerName]; + if (!ProviderClass) { + throw new Error(`Unknown provider: ${providerName}`); + } + return new ProviderClass(); + } + + /** + * Get all available providers + * @returns {string[]} List of provider names + */ + static getAvailableProviders() { + return Object.keys(this.providers); + } + + /** + * Get all available models across all providers + * @returns {Array} All available models + */ + static getAllModels() { + const allModels = []; + for (const providerName of this.getAvailableProviders()) { + const provider = this.getProvider(providerName); + const models = provider.getAvailableModels(); + allModels.push(...models); + } + return allModels; + } + + /** + * Check installation status for all providers + * @returns {Promise} Installation status for each provider + */ + static async checkAllProviders() { + const status = {}; + for (const providerName of this.getAvailableProviders()) { + const provider = this.getProvider(providerName); + status[providerName] = await provider.detectInstallation(); + } + return status; + } +} + +module.exports = { + ModelProvider, + ClaudeProvider, + CodexProvider, + ModelProviderFactory +}; diff --git a/app/electron/services/model-registry.js b/app/electron/services/model-registry.js new file mode 100644 index 00000000..3ba584e3 --- /dev/null +++ b/app/electron/services/model-registry.js @@ -0,0 +1,369 @@ +/** + * Model Registry - Centralized model definitions and metadata + * + * This module provides a central registry of all available models + * across different providers (Claude, Codex/OpenAI). + */ + +/** + * Model Categories + */ +const MODEL_CATEGORIES = { + CLAUDE: 'claude', + OPENAI: 'openai', + CODEX: 'codex' +}; + +/** + * Model Tiers (capability levels) + */ +const MODEL_TIERS = { + BASIC: 'basic', // Fast, cheap, simple tasks + STANDARD: 'standard', // Balanced performance + PREMIUM: 'premium' // Most capable, complex tasks +}; + +const CODEX_MODEL_IDS = [ + 'gpt-5.1-codex-max', + 'gpt-5.1-codex', + 'gpt-5.1-codex-mini', + 'gpt-5.1', + 'o3', + 'o3-mini', + 'o4-mini', + 'gpt-4o', + 'gpt-4o-mini' +]; + +/** + * All available models with full metadata + */ +const MODELS = { + // Claude Models + haiku: { + id: 'haiku', + name: 'Claude Haiku', + modelString: 'claude-haiku-4-5', + provider: 'claude', + category: MODEL_CATEGORIES.CLAUDE, + tier: MODEL_TIERS.BASIC, + description: 'Fast and efficient for simple tasks', + capabilities: ['code', 'text', 'tools'], + maxTokens: 8192, + contextWindow: 200000, + supportsThinking: true, + requiresAuth: 'CLAUDE_CODE_OAUTH_TOKEN' + }, + sonnet: { + id: 'sonnet', + name: 'Claude Sonnet', + modelString: 'claude-sonnet-4-20250514', + provider: 'claude', + category: MODEL_CATEGORIES.CLAUDE, + tier: MODEL_TIERS.STANDARD, + description: 'Balanced performance and capabilities', + capabilities: ['code', 'text', 'tools', 'analysis'], + maxTokens: 8192, + contextWindow: 200000, + supportsThinking: true, + requiresAuth: 'CLAUDE_CODE_OAUTH_TOKEN' + }, + opus: { + id: 'opus', + name: 'Claude Opus 4.5', + modelString: 'claude-opus-4-5-20251101', + provider: 'claude', + category: MODEL_CATEGORIES.CLAUDE, + tier: MODEL_TIERS.PREMIUM, + description: 'Most capable model for complex tasks', + capabilities: ['code', 'text', 'tools', 'analysis', 'reasoning'], + maxTokens: 8192, + contextWindow: 200000, + supportsThinking: true, + requiresAuth: 'CLAUDE_CODE_OAUTH_TOKEN', + default: true + }, + + // OpenAI GPT-5.1 Codex Models + 'gpt-5.1-codex-max': { + id: 'gpt-5.1-codex-max', + name: 'GPT-5.1 Codex Max', + modelString: 'gpt-5.1-codex-max', + provider: 'codex', + category: MODEL_CATEGORIES.OPENAI, + tier: MODEL_TIERS.PREMIUM, + description: 'Latest flagship - deep and fast reasoning for coding', + capabilities: ['code', 'text', 'tools', 'reasoning'], + maxTokens: 32768, + contextWindow: 128000, + supportsThinking: false, + requiresAuth: 'OPENAI_API_KEY', + codexDefault: true + }, + 'gpt-5.1-codex': { + id: 'gpt-5.1-codex', + name: 'GPT-5.1 Codex', + modelString: 'gpt-5.1-codex', + provider: 'codex', + category: MODEL_CATEGORIES.OPENAI, + tier: MODEL_TIERS.STANDARD, + description: 'Optimized for code generation', + capabilities: ['code', 'text', 'tools'], + maxTokens: 32768, + contextWindow: 128000, + supportsThinking: false, + requiresAuth: 'OPENAI_API_KEY' + }, + 'gpt-5.1-codex-mini': { + id: 'gpt-5.1-codex-mini', + name: 'GPT-5.1 Codex Mini', + modelString: 'gpt-5.1-codex-mini', + provider: 'codex', + category: MODEL_CATEGORIES.OPENAI, + tier: MODEL_TIERS.BASIC, + description: 'Faster and cheaper option', + capabilities: ['code', 'text'], + maxTokens: 16384, + contextWindow: 128000, + supportsThinking: false, + requiresAuth: 'OPENAI_API_KEY' + }, + 'gpt-5.1': { + id: 'gpt-5.1', + name: 'GPT-5.1', + modelString: 'gpt-5.1', + provider: 'codex', + category: MODEL_CATEGORIES.OPENAI, + tier: MODEL_TIERS.STANDARD, + description: 'Broad world knowledge with strong reasoning', + capabilities: ['code', 'text', 'reasoning'], + maxTokens: 32768, + contextWindow: 128000, + supportsThinking: false, + requiresAuth: 'OPENAI_API_KEY' + }, + + // OpenAI O-Series Models + o3: { + id: 'o3', + name: 'O3', + modelString: 'o3', + provider: 'codex', + category: MODEL_CATEGORIES.OPENAI, + tier: MODEL_TIERS.PREMIUM, + description: 'Advanced reasoning model', + capabilities: ['code', 'text', 'tools', 'reasoning'], + maxTokens: 100000, + contextWindow: 200000, + supportsThinking: false, + requiresAuth: 'OPENAI_API_KEY' + }, + 'o3-mini': { + id: 'o3-mini', + name: 'O3 Mini', + modelString: 'o3-mini', + provider: 'codex', + category: MODEL_CATEGORIES.OPENAI, + tier: MODEL_TIERS.STANDARD, + description: 'Efficient reasoning model', + capabilities: ['code', 'text', 'reasoning'], + maxTokens: 65536, + contextWindow: 128000, + supportsThinking: false, + requiresAuth: 'OPENAI_API_KEY' + }, + 'o4-mini': { + id: 'o4-mini', + name: 'O4 Mini', + modelString: 'o4-mini', + provider: 'codex', + category: MODEL_CATEGORIES.OPENAI, + tier: MODEL_TIERS.BASIC, + description: 'Fast reasoning with lower cost', + capabilities: ['code', 'text', 'reasoning'], + maxTokens: 65536, + contextWindow: 128000, + supportsThinking: false, + requiresAuth: 'OPENAI_API_KEY' + } +}; + +/** + * Model Registry class for querying and managing models + */ +class ModelRegistry { + /** + * Get all registered models + * @returns {Object} All models + */ + static getAllModels() { + return MODELS; + } + + /** + * Get model by ID + * @param {string} modelId Model ID + * @returns {Object|null} Model definition or null + */ + static getModel(modelId) { + return MODELS[modelId] || null; + } + + /** + * Get models by provider + * @param {string} provider Provider name ('claude' or 'codex') + * @returns {Object[]} Array of models for the provider + */ + static getModelsByProvider(provider) { + return Object.values(MODELS).filter(m => m.provider === provider); + } + + /** + * Get models by category + * @param {string} category Category name + * @returns {Object[]} Array of models in the category + */ + static getModelsByCategory(category) { + return Object.values(MODELS).filter(m => m.category === category); + } + + /** + * Get models by tier + * @param {string} tier Tier name + * @returns {Object[]} Array of models in the tier + */ + static getModelsByTier(tier) { + return Object.values(MODELS).filter(m => m.tier === tier); + } + + /** + * Get default model for a provider + * @param {string} provider Provider name + * @returns {Object|null} Default model or null + */ + static getDefaultModel(provider = 'claude') { + const models = this.getModelsByProvider(provider); + if (provider === 'claude') { + return models.find(m => m.default) || models[0]; + } + if (provider === 'codex') { + return models.find(m => m.codexDefault) || models[0]; + } + return models[0]; + } + + /** + * Get model string (full model name) for a model ID + * @param {string} modelId Model ID + * @returns {string} Full model string + */ + static getModelString(modelId) { + const model = this.getModel(modelId); + return model ? model.modelString : modelId; + } + + /** + * Determine provider for a model ID + * @param {string} modelId Model ID + * @returns {string} Provider name ('claude' or 'codex') + */ + static getProviderForModel(modelId) { + const model = this.getModel(modelId); + if (model) { + return model.provider; + } + + // Fallback detection for models not explicitly registered (keeps legacy Codex IDs working) + if (CODEX_MODEL_IDS.includes(modelId)) { + return 'codex'; + } + + return 'claude'; + } + + /** + * Check if a model is a Claude model + * @param {string} modelId Model ID + * @returns {boolean} Whether it's a Claude model + */ + static isClaudeModel(modelId) { + return this.getProviderForModel(modelId) === 'claude'; + } + + /** + * Check if a model is a Codex/OpenAI model + * @param {string} modelId Model ID + * @returns {boolean} Whether it's a Codex model + */ + static isCodexModel(modelId) { + return this.getProviderForModel(modelId) === 'codex'; + } + + /** + * Get models grouped by provider for UI display + * @returns {Object} Models grouped by provider + */ + static getModelsGroupedByProvider() { + return { + claude: this.getModelsByProvider('claude'), + codex: this.getModelsByProvider('codex') + }; + } + + /** + * Get all model IDs as an array + * @returns {string[]} Array of model IDs + */ + static getAllModelIds() { + return Object.keys(MODELS); + } + + /** + * Check if model supports a specific capability + * @param {string} modelId Model ID + * @param {string} capability Capability name + * @returns {boolean} Whether the model supports the capability + */ + static modelSupportsCapability(modelId, capability) { + const model = this.getModel(modelId); + return model ? model.capabilities.includes(capability) : false; + } + + /** + * Check if model supports extended thinking + * @param {string} modelId Model ID + * @returns {boolean} Whether the model supports thinking + */ + static modelSupportsThinking(modelId) { + const model = this.getModel(modelId); + return model ? model.supportsThinking : false; + } + + /** + * Get required authentication for a model + * @param {string} modelId Model ID + * @returns {string|null} Required auth env variable name + */ + static getRequiredAuth(modelId) { + const model = this.getModel(modelId); + return model ? model.requiresAuth : null; + } + + /** + * Check if authentication is available for a model + * @param {string} modelId Model ID + * @returns {boolean} Whether auth is available + */ + static hasAuthForModel(modelId) { + const authVar = this.getRequiredAuth(modelId); + if (!authVar) return false; + return !!process.env[authVar]; + } +} + +module.exports = { + MODEL_CATEGORIES, + MODEL_TIERS, + MODELS, + ModelRegistry +}; diff --git a/app/src/app/globals.css b/app/src/app/globals.css index af2f97c0..ad10f615 100644 --- a/app/src/app/globals.css +++ b/app/src/app/globals.css @@ -1362,6 +1362,39 @@ box-shadow: 0 0 8px #f97e72; } +/* Line clamp utilities for text overflow prevention */ +.line-clamp-2 { + display: -webkit-box; + -webkit-line-clamp: 2; + -webkit-box-orient: vertical; + overflow: hidden; + text-overflow: ellipsis; +} + +.line-clamp-3 { + display: -webkit-box; + -webkit-line-clamp: 3; + -webkit-box-orient: vertical; + overflow: hidden; + text-overflow: ellipsis; +} + +/* Kanban card improvements to prevent text overflow */ +.kanban-card-content { + word-wrap: break-word; + overflow-wrap: break-word; + hyphens: auto; +} + +/* Ensure proper column layout in double-width kanban columns */ +.kanban-columns-layout > * { + page-break-inside: avoid; + break-inside: avoid; + display: block; + width: 100%; + box-sizing: border-box; +} + /* Electron title bar drag region */ .titlebar-drag-region { -webkit-app-region: drag; diff --git a/app/src/components/views/agent-output-modal.tsx b/app/src/components/views/agent-output-modal.tsx index 701a943b..237f53b4 100644 --- a/app/src/components/views/agent-output-modal.tsx +++ b/app/src/components/views/agent-output-modal.tsx @@ -246,13 +246,13 @@ export function AgentOutputModal({ Agent Output -
+
+ ); + })} +
+ ); + + const newModelAllowsThinking = modelSupportsThinking(newFeature.model); + const editModelAllowsThinking = modelSupportsThinking(editingFeature?.model); + if (!currentProject) { return (
Add Feature {ACTION_SHORTCUTS.addFeature} @@ -1265,7 +1402,7 @@ export function BoardView() { > Start Next - + {ACTION_SHORTCUTS.startNext} @@ -1438,37 +1575,66 @@ export function BoardView() {

{/* Model Selection */} -
+
-
- {(["haiku", "sonnet", "opus"] as AgentModel[]).map((model) => ( - - ))} +
+
+

Claude (SDK)

+ + Native + +
+ {renderModelOptions( + CLAUDE_MODELS, + newFeature.model, + (model) => + setNewFeature({ + ...newFeature, + model, + thinkingLevel: modelSupportsThinking(model) + ? newFeature.thinkingLevel + : "none", + }) + )} +
+ +
+
+

+ OpenAI via Codex CLI +

+ + CLI + +
+ {renderModelOptions( + CODEX_MODELS, + newFeature.model, + (model) => + setNewFeature({ + ...newFeature, + model, + thinkingLevel: modelSupportsThinking(model) + ? newFeature.thinkingLevel + : "none", + }) + )}

- Haiku for simple tasks, Sonnet for balanced, Opus for complex tasks. + Claude models use the Claude SDK. OpenAI models run through the Codex CLI. + {!newModelAllowsThinking && ( + + Thinking controls are hidden for Codex CLI models. + + )}

- {/* Thinking Level */} + {/* Thinking Level - Hidden for Codex models */} + {newModelAllowsThinking && (
+ )}
- ))} +
+
+

Claude (SDK)

+ + Native + +
+ {renderModelOptions( + CLAUDE_MODELS, + (editingFeature.model ?? "opus") as AgentModel, + (model) => + setEditingFeature({ + ...editingFeature, + model, + thinkingLevel: modelSupportsThinking(model) + ? editingFeature.thinkingLevel + : "none", + }), + "edit-model-select" + )} +
+ +
+
+

+ OpenAI via Codex CLI +

+ + CLI + +
+ {renderModelOptions( + CODEX_MODELS, + (editingFeature.model ?? "opus") as AgentModel, + (model) => + setEditingFeature({ + ...editingFeature, + model, + thinkingLevel: modelSupportsThinking(model) + ? editingFeature.thinkingLevel + : "none", + }), + "edit-model-select" + )}

- Haiku for simple tasks, Sonnet for balanced, Opus for complex tasks. + Claude models use the Claude SDK. OpenAI models run through the Codex CLI. + {!editModelAllowsThinking && ( + + Thinking controls are hidden for Codex CLI models. + + )}

- {/* Thinking Level */} + {/* Thinking Level - Hidden for Codex models */} + {editModelAllowsThinking && (
+ )}
)} @@ -1860,7 +2059,7 @@ export function BoardView() { > Send Follow-Up - + ⌘↵ diff --git a/app/src/components/views/kanban-card.tsx b/app/src/components/views/kanban-card.tsx index b4fe1c74..1f9742c7 100644 --- a/app/src/components/views/kanban-card.tsx +++ b/app/src/components/views/kanban-card.tsx @@ -191,7 +191,7 @@ export function KanbanCard({ ref={setNodeRef} style={style} className={cn( - "cursor-grab active:cursor-grabbing transition-all backdrop-blur-sm border-border relative", + "cursor-grab active:cursor-grabbing transition-all backdrop-blur-sm border-border relative kanban-card-content", isDragging && "opacity-50 scale-105 shadow-lg", isCurrentAutoTask && "border-purple-500 border-2 shadow-purple-500/50 shadow-lg animate-pulse" @@ -260,10 +260,10 @@ export function KanbanCard({
)}
- + {feature.description} - + {feature.category}
@@ -283,7 +283,7 @@ export function KanbanCard({ ) : ( )} - {step} + {step} ))} {feature.steps.length > 3 && ( @@ -302,7 +302,7 @@ export function KanbanCard({ agentInfo && (isCurrentAutoTask || feature.status === "in_progress") && (
-
+
-
+
{agentInfo.lastToolUsed && ( {agentInfo.lastToolUsed} @@ -403,15 +403,15 @@ export function KanbanCard({ ) : todo.status === "in_progress" ? ( ) : ( - + )} {todo.content} @@ -432,7 +432,7 @@ export function KanbanCard({ feature.status === "verified") && ( <> {(feature.summary || summary || agentInfo.summary) && ( -
+
@@ -443,14 +443,14 @@ export function KanbanCard({ e.stopPropagation(); setIsSummaryDialogOpen(true); }} - className="p-0.5 rounded hover:bg-white/10 transition-colors text-zinc-500 hover:text-zinc-300" + className="p-0.5 rounded hover:bg-accent transition-colors text-muted-foreground hover:text-foreground" title="View full summary" data-testid={`expand-summary-${feature.id}`} >
-

+

{feature.summary || summary || agentInfo.summary}

@@ -460,7 +460,7 @@ export function KanbanCard({ !summary && !agentInfo.summary && agentInfo.toolCallCount > 0 && ( -
+
{agentInfo.toolCallCount} tool calls @@ -753,7 +753,7 @@ export function KanbanCard({ : feature.description} -
+
{feature.summary || summary || diff --git a/app/src/components/views/kanban-column.tsx b/app/src/components/views/kanban-column.tsx index 93755e44..fad6ebbf 100644 --- a/app/src/components/views/kanban-column.tsx +++ b/app/src/components/views/kanban-column.tsx @@ -50,7 +50,7 @@ export function KanbanColumn({ className={cn( "flex-1 overflow-y-auto p-2", isDoubleWidth - ? "columns-2 gap-2 [&>*]:break-inside-avoid [&>*]:mb-2" + ? "columns-2 gap-3 [&>*]:break-inside-avoid [&>*]:mb-3 [&>*]:overflow-hidden kanban-columns-layout" : "space-y-2" )} > diff --git a/app/src/components/views/settings-view.tsx b/app/src/components/views/settings-view.tsx index 2eeadfec..1a5f1b09 100644 --- a/app/src/components/views/settings-view.tsx +++ b/app/src/components/views/settings-view.tsx @@ -46,8 +46,10 @@ export function SettingsView() { } = useAppStore(); const [anthropicKey, setAnthropicKey] = useState(apiKeys.anthropic); const [googleKey, setGoogleKey] = useState(apiKeys.google); + const [openaiKey, setOpenaiKey] = useState(apiKeys.openai); const [showAnthropicKey, setShowAnthropicKey] = useState(false); const [showGoogleKey, setShowGoogleKey] = useState(false); + const [showOpenaiKey, setShowOpenaiKey] = useState(false); const [saved, setSaved] = useState(false); const [testingConnection, setTestingConnection] = useState(false); const [testResult, setTestResult] = useState<{ @@ -74,10 +76,32 @@ export function SettingsView() { }; error?: string; } | null>(null); + const [codexCliStatus, setCodexCliStatus] = useState<{ + success: boolean; + status?: string; + method?: string; + version?: string; + path?: string; + hasApiKey?: boolean; + recommendation?: string; + installCommands?: { + macos?: string; + windows?: string; + linux?: string; + npm?: string; + }; + error?: string; + } | null>(null); + const [testingOpenaiConnection, setTestingOpenaiConnection] = useState(false); + const [openaiTestResult, setOpenaiTestResult] = useState<{ + success: boolean; + message: string; + } | null>(null); useEffect(() => { setAnthropicKey(apiKeys.anthropic); setGoogleKey(apiKeys.google); + setOpenaiKey(apiKeys.openai); }, [apiKeys]); useEffect(() => { @@ -91,6 +115,14 @@ export function SettingsView() { console.error("Failed to check Claude CLI status:", error); } } + if (api?.checkCodexCli) { + try { + const status = await api.checkCodexCli(); + setCodexCliStatus(status); + } catch (error) { + console.error("Failed to check Codex CLI status:", error); + } + } }; checkCliStatus(); }, []); @@ -167,10 +199,64 @@ export function SettingsView() { } }; + const handleTestOpenaiConnection = async () => { + setTestingOpenaiConnection(true); + setOpenaiTestResult(null); + + try { + const api = getElectronAPI(); + if (api?.testOpenAIConnection) { + const result = await api.testOpenAIConnection(openaiKey); + if (result.success) { + setOpenaiTestResult({ + success: true, + message: result.message || "Connection successful! OpenAI API responded.", + }); + } else { + setOpenaiTestResult({ + success: false, + message: result.error || "Failed to connect to OpenAI API.", + }); + } + } else { + // Fallback to web API test + const response = await fetch("/api/openai/test", { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ apiKey: openaiKey }), + }); + + const data = await response.json(); + + if (response.ok && data.success) { + setOpenaiTestResult({ + success: true, + message: data.message || "Connection successful! OpenAI API responded.", + }); + } else { + setOpenaiTestResult({ + success: false, + message: data.error || "Failed to connect to OpenAI API.", + }); + } + } + } catch (error) { + setOpenaiTestResult({ + success: false, + message: "Network error. Please check your connection.", + }); + } finally { + setTestingOpenaiConnection(false); + } + }; + const handleSave = () => { setApiKeys({ anthropic: anthropicKey, google: googleKey, + openai: openaiKey, }); setSaved(true); setTimeout(() => setSaved(false), 2000); @@ -273,7 +359,7 @@ export function SettingsView() { )}
-

+

Used for Claude AI features. Get your key at{" "}

-

+

Used for Gemini AI features (including image/design prompts). Get your key at{" "} + {/* OpenAI API Key */} +

+ {/* Security Notice */}
@@ -419,13 +598,13 @@ export function SettingsView() { {/* Claude CLI Status Section */} {claudeCliStatus && ( -
-
+
+
-

Claude Code CLI

+

Claude Code CLI

-

+

Claude Code CLI provides better performance for long-running tasks, especially with ultrathink.

@@ -452,7 +631,7 @@ export function SettingsView() {
{claudeCliStatus.recommendation && ( -

{claudeCliStatus.recommendation}

+

{claudeCliStatus.recommendation}

)}
) : ( @@ -468,24 +647,123 @@ export function SettingsView() {
{claudeCliStatus.installCommands && (
-

Installation Commands:

+

Installation Commands:

{claudeCliStatus.installCommands.npm && ( -
-

npm:

- {claudeCliStatus.installCommands.npm} +
+

npm:

+ {claudeCliStatus.installCommands.npm}
)} {claudeCliStatus.installCommands.macos && ( -
-

macOS/Linux:

- {claudeCliStatus.installCommands.macos} +
+

macOS/Linux:

+ {claudeCliStatus.installCommands.macos}
)} {claudeCliStatus.installCommands.windows && ( -
-

Windows (PowerShell):

- {claudeCliStatus.installCommands.windows} +
+

Windows (PowerShell):

+ {claudeCliStatus.installCommands.windows} +
+ )} +
+
+ )} +
+ )} +
+
+ )} + + {/* Codex CLI Status Section */} + {codexCliStatus && ( +
+
+
+ +

OpenAI Codex CLI

+
+

+ Codex CLI enables GPT-5.1 Codex models for autonomous coding tasks. +

+
+
+ {codexCliStatus.success && codexCliStatus.status === 'installed' ? ( +
+
+ +
+

Codex CLI Installed

+
+ {codexCliStatus.method && ( +

Method: {codexCliStatus.method}

+ )} + {codexCliStatus.version && ( +

Version: {codexCliStatus.version}

+ )} + {codexCliStatus.path && ( +

+ Path: {codexCliStatus.path} +

+ )} +
+
+
+ {codexCliStatus.recommendation && ( +

{codexCliStatus.recommendation}

+ )} +
+ ) : codexCliStatus.status === 'api_key_only' ? ( +
+
+ +
+

API Key Detected - CLI Not Installed

+

+ {codexCliStatus.recommendation || 'OPENAI_API_KEY found but Codex CLI not installed. Install the CLI for full agentic capabilities.'} +

+
+
+ {codexCliStatus.installCommands && ( +
+

Installation Commands:

+
+ {codexCliStatus.installCommands.npm && ( +
+

npm:

+ {codexCliStatus.installCommands.npm} +
+ )} +
+
+ )} +
+ ) : ( +
+
+ +
+

Codex CLI Not Detected

+

+ {codexCliStatus.recommendation || 'Install OpenAI Codex CLI to use GPT-5.1 Codex models for autonomous coding.'} +

+
+
+ {codexCliStatus.installCommands && ( +
+

Installation Commands:

+
+ {codexCliStatus.installCommands.npm && ( +
+

npm:

+ {codexCliStatus.installCommands.npm} +
+ )} + {codexCliStatus.installCommands.macos && ( +
+

macOS (Homebrew):

+ {codexCliStatus.installCommands.macos}
)}
@@ -664,34 +942,34 @@ export function SettingsView() {
{/* Kanban Card Display Section */} -
-
+
+
-

+

Kanban Card Display

-

+

Control how much information is displayed on Kanban cards.

- +
@@ -699,14 +977,14 @@ export function SettingsView() { onClick={() => setKanbanCardDetailLevel("standard")} className={`flex flex-col items-center justify-center gap-2 px-4 py-4 rounded-lg border transition-all ${ kanbanCardDetailLevel === "standard" - ? "bg-white/5 border-brand-500 text-white" - : "bg-zinc-950/50 border-white/10 text-zinc-400 hover:text-white hover:bg-white/5" + ? "bg-accent border-brand-500 text-foreground" + : "bg-input border-border text-muted-foreground hover:text-foreground hover:bg-accent" }`} data-testid="kanban-detail-standard" > Standard - + Steps & progress @@ -714,19 +992,19 @@ export function SettingsView() { onClick={() => setKanbanCardDetailLevel("detailed")} className={`flex flex-col items-center justify-center gap-2 px-4 py-4 rounded-lg border transition-all ${ kanbanCardDetailLevel === "detailed" - ? "bg-white/5 border-brand-500 text-white" - : "bg-zinc-950/50 border-white/10 text-zinc-400 hover:text-white hover:bg-white/5" + ? "bg-accent border-brand-500 text-foreground" + : "bg-input border-border text-muted-foreground hover:text-foreground hover:bg-accent" }`} data-testid="kanban-detail-detailed" > Detailed - + Model, tools & tasks
-

+

Minimal: Shows only title and category
Standard: Adds steps preview and progress bar @@ -757,7 +1035,7 @@ export function SettingsView() {