diff --git a/.automaker/agents-context/feature-1765333578668-qbzk7xihs.md b/.automaker/agents-context/feature-1765333578668-qbzk7xihs.md new file mode 100644 index 00000000..72c3c1eb --- /dev/null +++ b/.automaker/agents-context/feature-1765333578668-qbzk7xihs.md @@ -0,0 +1,8 @@ +📋 Planning implementation for: For example i got haiku model running or codex one but we can still see opus 4.5 check if it not hardcoded and fix it to use proper model name that was used in this task +⚡ Executing implementation for: For example i got haiku model running or codex one but we can still see opus 4.5 check if it not hardcoded and fix it to use proper model name that was used in this task + +❌ Error: Reconnecting... 1/5 +📋 Planning implementation for: For example i got haiku model running or codex one but we can still see opus 4.5 check if it not hardcoded and fix it to use proper model name that was used in this task +⚡ Executing implementation for: For example i got haiku model running or codex one but we can still see opus 4.5 check if it not hardcoded and fix it to use proper model name that was used in this task +📋 Planning implementation for: For example i got haiku model running or codex one but we can still see opus 4.5 check if it not hardcoded and fix it to use proper model name that was used in this task +⚡ Executing implementation for: For example i got haiku model running or codex one but we can still see opus 4.5 check if it not hardcoded and fix it to use proper model name that was used in this task diff --git a/.automaker/agents-context/feature-1765334118538-zc6n2ngu8.md b/.automaker/agents-context/feature-1765334118538-zc6n2ngu8.md new file mode 100644 index 00000000..e0d3335d --- /dev/null +++ b/.automaker/agents-context/feature-1765334118538-zc6n2ngu8.md @@ -0,0 +1,4 @@ +📋 Planning implementation for: For example i got haiku model running or codex one but we can still see opus 4.5 check if it not hardcoded and fix it to use proper model name that was used in this task +⚡ Executing implementation for: For example i got haiku model running or codex one but we can still see opus 4.5 check if it not hardcoded and fix it to use proper model name that was used in this task +📋 Planning implementation for: For example i got haiku model running or codex one but we can still see opus 4.5 check if it not hardcoded and fix it to use proper model name that was used in this task +⚡ Executing implementation for: For example i got haiku model running or codex one but we can still see opus 4.5 check if it not hardcoded and fix it to use proper model name that was used in this task diff --git a/.automaker/worktrees/176536627888-implement-profile-view-and-in-the-sideba b/.automaker/worktrees/176536627888-implement-profile-view-and-in-the-sideba new file mode 160000 index 00000000..a78b6763 --- /dev/null +++ b/.automaker/worktrees/176536627888-implement-profile-view-and-in-the-sideba @@ -0,0 +1 @@ +Subproject commit a78b6763de82102803fe731662483ade55dd738d diff --git a/.automaker/worktrees/176536775869-so-we-added-ai-profiles-add-a-default-op b/.automaker/worktrees/176536775869-so-we-added-ai-profiles-add-a-default-op new file mode 160000 index 00000000..a78b6763 --- /dev/null +++ b/.automaker/worktrees/176536775869-so-we-added-ai-profiles-add-a-default-op @@ -0,0 +1 @@ +Subproject commit a78b6763de82102803fe731662483ade55dd738d diff --git a/.cursor/plans/codex_cli_openai_model_support_9987f5e4.plan.md b/.cursor/plans/codex_cli_openai_model_support_9987f5e4.plan.md new file mode 100644 index 00000000..ebec34f8 --- /dev/null +++ b/.cursor/plans/codex_cli_openai_model_support_9987f5e4.plan.md @@ -0,0 +1,453 @@ +--- +name: Codex CLI OpenAI Model Support +overview: Extend the model support system to integrate OpenAI Codex CLI, enabling users to use OpenAI models (GPT-4o, o3, etc.) alongside existing Claude models. This includes CLI detection, model provider abstraction, execution wrapper, and UI updates. +todos: + - id: model-provider-abstraction + content: Create model provider abstraction layer with base interface and Claude/Codex implementations + status: pending + - id: codex-cli-detector + content: Implement Codex CLI detector service to check installation status and version + status: pending + - id: codex-executor + content: Create Codex CLI execution wrapper that spawns subprocess and parses JSON output + status: pending + - id: codex-config-manager + content: Implement Codex TOML configuration manager for model provider setup + status: pending + - id: model-registry + content: Create centralized model registry with provider mappings and metadata + status: pending + - id: update-feature-executor + content: Refactor feature-executor.js to use model provider abstraction instead of direct SDK calls + status: pending + - id: update-agent-service + content: Update agent-service.js to support configurable model selection via provider abstraction + status: pending + - id: message-converter + content: Create message format converter to translate Codex JSONL output to Claude SDK format + status: pending + - id: update-ui-types + content: Extend TypeScript types in app-store.ts to include OpenAI models and provider metadata + status: pending + - id: update-board-view + content: Expand model selection dropdown in board-view.tsx to include OpenAI models with provider grouping + status: pending + - id: update-settings-view + content: Add OpenAI API key input, Codex CLI status check, and test connection button to settings-view.tsx + status: pending + - id: openai-test-api + content: Create OpenAI API test endpoint at app/src/app/api/openai/test/route.ts + status: pending + - id: ipc-handlers + content: Add IPC handlers in main.js for model management (checkCodexCli, getAvailableModels, testOpenAI) + status: pending + - id: preload-api + content: Update preload.js and electron.d.ts to expose new IPC methods to renderer process + status: pending + - id: env-manager + content: Create environment variable manager for centralized API key and config handling + status: pending + - id: error-handling + content: Implement provider fallback logic and user-friendly error messages for missing CLI/API keys + status: pending +--- + +# Codex CLI OpenAI Model Support Implementation Plan + +## Overview + +Extend Automaker's model support to integrate OpenAI Codex CLI, allowing users to use the latest GPT-5.1 Codex models (`gpt-5.1-codex-max`, `gpt-5.1-codex`, `gpt-5.1-codex-mini`, `gpt-5.1`) alongside existing Claude models. Codex CLI defaults to `gpt-5.1-codex-max` and uses ChatGPT Enterprise authentication (no API key required). The implementation will follow the existing Claude CLI pattern but add abstraction for multiple model providers. + +## Current Architecture Analysis + +### Model Usage Points + +1. **Feature Executor** (`app/electron/services/feature-executor.js`): + + - Uses `MODEL_MAP` with hardcoded Claude models (haiku, sonnet, opus) + - Calls `@anthropic-ai/claude-agent-sdk` `query()` function + - Model selection via `getModelString(feature)` method + +2. **Agent Service** (`app/electron/agent-service.js`): + + - Hardcoded model: `"claude-opus-4-5-20251101"` + - Uses Claude Agent SDK directly + +3. **API Route** (`app/src/app/api/chat/route.ts`): + + - Hardcoded model: `"claude-opus-4-5-20251101"` + - Uses Claude Agent SDK + +4. **Project Analyzer** (`app/electron/services/project-analyzer.js`): + + - Hardcoded model: `"claude-sonnet-4-20250514"` + +5. **UI Components**: + + - `board-view.tsx`: Model dropdown (haiku/sonnet/opus) + - `app-store.ts`: `AgentModel` type limited to Claude models + +### Authentication + +- Claude: Uses `CLAUDE_CODE_OAUTH_TOKEN` environment variable +- Codex: Uses `OPENAI_API_KEY` environment variable (per Codex docs) + +## Implementation Strategy + +### Phase 1: Model Provider Abstraction Layer + +#### 1.1 Create Model Provider Interface + +**File**: `app/electron/services/model-provider.js` + +- Abstract base class/interface for model providers +- Methods: `executeQuery()`, `detectInstallation()`, `getAvailableModels()`, `validateConfig()` +- Implementations: + - `ClaudeProvider` (wraps existing SDK usage) + - `CodexProvider` (new, wraps Codex CLI execution) + +#### 1.2 Create Codex CLI Detector + +**File**: `app/electron/services/codex-cli-detector.js` + +- Similar to `claude-cli-detector.js` +- Check for `codex` command in PATH +- Check for npm global installation: `npm list -g @openai/codex` +- Check for Homebrew installation on macOS +- Return: `{ installed: boolean, path: string, version: string, method: 'cli'|'npm'|'brew'|'none' }` + +#### 1.3 Create Codex Provider Implementation + +**File**: `app/electron/services/codex-provider.js` + +- Extends model provider interface +- Executes Codex CLI via `child_process.spawn()` or `execSync()` +- Handles JSON output parsing (`codex exec --json`) +- Manages TOML configuration file creation/updates +- Supports latest GPT-5.1 Codex models: + - `gpt-5.1-codex-max` (default, latest flagship for deep and fast reasoning) + - `gpt-5.1-codex` (optimized for codex) + - `gpt-5.1-codex-mini` (cheaper, faster, less capable) + - `gpt-5.1` (broad world knowledge with strong general reasoning) +- Uses ChatGPT Enterprise authentication (no API key required for these models) +- Note: Legacy models (GPT-4o, o3, o1, etc.) are not supported - Codex CLI focuses on GPT-5.1 Codex family only + +### Phase 2: Model Configuration System + +#### 2.1 Extended Model Registry + +**File**: `app/electron/services/model-registry.js` + +- Centralized model configuration +- Model definitions with provider mapping: + ```javascript + { + id: "claude-opus", + name: "Claude Opus 4.5", + provider: "claude", + modelString: "claude-opus-4-5-20251101", + ... + }, + { + id: "gpt-4o", + name: "GPT-4o", + provider: "codex", + modelString: "gpt-4o", + requiresApiKey: "OPENAI_API_KEY", + ... + } + ``` + +- Model categories: `claude`, `openai`, `azure`, `custom` + +#### 2.2 Codex Configuration Manager + +**File**: `app/electron/services/codex-config-manager.js` + +- Manages Codex TOML config file (typically `~/.config/codex/config.toml` or project-specific) +- Creates/updates model provider configurations: + ```toml + [model_providers.openai-chat-completions] + name = "OpenAI using Chat Completions" + base_url = "https://api.openai.com/v1" + env_key = "OPENAI_API_KEY" + wire_api = "chat" + + [profiles.gpt4o] + model = "gpt-4o" + model_provider = "openai-chat-completions" + ``` + +- Profile management for different use cases +- Validates configuration before execution + +### Phase 3: Execution Integration + +#### 3.1 Update Feature Executor + +**File**: `app/electron/services/feature-executor.js` + +- Replace direct SDK calls with model provider abstraction +- Update `getModelString()` to return model ID instead of string +- Add `getModelProvider(modelId)` method +- Modify `implementFeature()` to: + - Get provider for selected model + - Use provider's `executeQuery()` method + - Handle different response formats (SDK vs CLI JSON) + +#### 3.2 Update Agent Service + +**File**: `app/electron/agent-service.js` + +- Replace hardcoded model with configurable model selection +- Use model provider abstraction +- Support model selection per session + +#### 3.3 Update Project Analyzer + +**File**: `app/electron/services/project-analyzer.js` + +- Use model provider abstraction +- Make model configurable (currently hardcoded to sonnet) + +#### 3.4 Update API Route + +**File**: `app/src/app/api/chat/route.ts` + +- Support model selection from request +- Use model provider abstraction (if running in Electron context) +- Fallback to Claude SDK for web-only usage + +### Phase 4: Codex CLI Execution Wrapper + +#### 4.1 Codex Executor + +**File**: `app/electron/services/codex-executor.js` + +- Wraps `codex exec` command execution +- Handles subprocess spawning with proper environment variables +- Parses JSON output (JSONL format from `--json` flag) +- Converts Codex output format to match Claude SDK message format +- Handles streaming responses +- Error handling and timeout management + +#### 4.2 Message Format Conversion + +**File**: `app/electron/services/message-converter.js` + +- Converts Codex JSONL output to Claude SDK message format +- Maps Codex events: + - `thread.started` → session initialization + - `item.completed` (reasoning) → thinking output + - `item.completed` (command_execution) → tool use + - `item.completed` (agent_message) → assistant message +- Maintains compatibility with existing UI components + +### Phase 5: UI Updates + +#### 5.1 Update Type Definitions + +**File**: `app/src/store/app-store.ts` + +- Extend `AgentModel` type to include OpenAI models: + ```typescript + export type AgentModel = + | "opus" | "sonnet" | "haiku" // Claude + | "gpt-4o" | "gpt-4o-mini" | "gpt-3.5-turbo" | "o3" | "o1"; // OpenAI + ``` + +- Add `modelProvider` field to `Feature` interface +- Add provider metadata to model selection + +#### 5.2 Update Board View + +**File**: `app/src/components/views/board-view.tsx` + +- Expand model dropdown to include OpenAI models +- Group models by provider (Claude / OpenAI) +- Show provider badges/icons +- Display model availability based on CLI detection +- Add tooltips showing model capabilities + +#### 5.3 Update Settings View + +**File**: `app/src/components/views/settings-view.tsx` + +- Add OpenAI API key input field (similar to Anthropic key) +- Add Codex CLI status check (similar to Claude CLI check) +- Show installation instructions if Codex CLI not detected +- Add test connection button for OpenAI API +- Display detected Codex CLI version/path + +#### 5.4 Create API Test Route + +**File**: `app/src/app/api/openai/test/route.ts` + +- Similar to `app/src/app/api/claude/test/route.ts` +- Test OpenAI API connection +- Validate API key format +- Return connection status + +### Phase 6: Configuration & Environment + +#### 6.1 Environment Variable Management + +**File**: `app/electron/services/env-manager.js` + +- Centralized environment variable handling +- Loads from `.env` file and system environment +- Validates required variables per provider +- Provides fallback mechanisms + +#### 6.2 IPC Handlers for Model Management + +**File**: `app/electron/main.js` + +- Add IPC handlers: + - `model:checkCodexCli` - Check Codex CLI installation + - `model:getAvailableModels` - List available models per provider + - `model:testOpenAI` - Test OpenAI API connection + - `model:updateCodexConfig` - Update Codex TOML config + +#### 6.3 Preload API Updates + +**File**: `app/electron/preload.js` + +- Expose new IPC methods to renderer +- Add TypeScript definitions in `app/src/types/electron.d.ts` + +### Phase 7: Error Handling & Fallbacks + +#### 7.1 Provider Fallback Logic + +- If Codex CLI not available, fallback to Claude +- If OpenAI API key missing, show clear error messages +- Graceful degradation when provider unavailable + +#### 7.2 Error Messages + +- User-friendly error messages for missing CLI +- Installation instructions per platform +- API key validation errors +- Model availability warnings + +## File Structure Summary + +### New Files + +``` +app/electron/services/ + ├── model-provider.js # Abstract provider interface + ├── claude-provider.js # Claude SDK wrapper + ├── codex-provider.js # Codex CLI wrapper + ├── codex-cli-detector.js # Codex CLI detection + ├── codex-executor.js # Codex CLI execution wrapper + ├── codex-config-manager.js # TOML config management + ├── model-registry.js # Centralized model definitions + ├── message-converter.js # Format conversion utilities + └── env-manager.js # Environment variable management + +app/src/app/api/openai/ + └── test/route.ts # OpenAI API test endpoint +``` + +### Modified Files + +``` +app/electron/services/ + ├── feature-executor.js # Use model provider abstraction + ├── agent-service.js # Support multiple providers + └── project-analyzer.js # Configurable model selection + +app/electron/ + ├── main.js # Add IPC handlers + └── preload.js # Expose new APIs + +app/src/ + ├── store/app-store.ts # Extended model types + ├── components/views/ + │ ├── board-view.tsx # Expanded model selection UI + │ └── settings-view.tsx # OpenAI API key & Codex CLI status + └── types/electron.d.ts # Updated IPC type definitions +``` + +## Implementation Details + +### Codex CLI Execution Pattern + +```javascript +// Example execution flow +const codexExecutor = require('./codex-executor'); +const result = await codexExecutor.execute({ + prompt: "Implement feature X", + model: "gpt-4o", + cwd: projectPath, + systemPrompt: "...", + maxTurns: 20, + allowedTools: ["Read", "Write", "Edit", "Bash"], + env: { OPENAI_API_KEY: process.env.OPENAI_API_KEY } +}); +``` + +### Model Provider Interface + +```javascript +class ModelProvider { + async executeQuery(options) { + // Returns async generator of messages + } + + async detectInstallation() { + // Returns installation status + } + + getAvailableModels() { + // Returns list of supported models + } + + validateConfig() { + // Validates provider configuration + } +} +``` + +### Configuration File Location + +- User config: `~/.config/codex/config.toml` (or platform equivalent) +- Project config: `.codex/config.toml` (optional, project-specific) +- Fallback: In-memory config passed via CLI args + +## Testing Considerations + +1. **CLI Detection**: Test on macOS, Linux, Windows +2. **Model Execution**: Test with different OpenAI models +3. **Error Handling**: Test missing CLI, invalid API keys, network errors +4. **Format Conversion**: Verify message format compatibility +5. **Concurrent Execution**: Test multiple features with different providers +6. **Fallback Logic**: Test provider fallback scenarios + +## Documentation Updates + +1. Update README with Codex CLI installation instructions: + + - `npm install -g @openai/codex@latest` or `brew install codex` + - ChatGPT Enterprise authentication (no API key needed) + - API-based authentication for older models + +2. Add model selection guide: + + - GPT-5.1 Codex Max (default, best for coding) + - o3/o4-mini with reasoning efforts + - GPT-5.1/GPT-5 with verbosity control + +3. Document reasoning effort and verbosity settings +4. Add troubleshooting section for common issues +5. Document model list discovery via MCP interface + +## Migration Path + +1. Implement provider abstraction alongside existing code +2. Add Codex support without breaking existing Claude functionality +3. Gradually migrate services to use abstraction layer +4. Maintain backward compatibility during transition +5. Remove hardcoded models after full migration \ No newline at end of file diff --git a/app/electron/auto-mode-service.js b/app/electron/auto-mode-service.js index 6277f7c8..1ebe370e 100644 --- a/app/electron/auto-mode-service.js +++ b/app/electron/auto-mode-service.js @@ -3,6 +3,7 @@ const featureExecutor = require("./services/feature-executor"); const featureVerifier = require("./services/feature-verifier"); const contextManager = require("./services/context-manager"); const projectAnalyzer = require("./services/project-analyzer"); +const worktreeManager = require("./services/worktree-manager"); /** * Auto Mode Service - Autonomous feature implementation @@ -33,13 +34,78 @@ class AutoModeService { const context = { abortController: null, query: null, - projectPath: null, + projectPath: null, // Original project path + worktreePath: null, // Path to worktree (where agent works) + branchName: null, // Feature branch name sendToRenderer: null, isActive: () => this.runningFeatures.has(featureId), }; return context; } + /** + * Setup worktree for a feature + * Creates an isolated git worktree where the agent can work + * @param {Object} feature - The feature object + * @param {string} projectPath - Path to the project + * @param {Function} sendToRenderer - Function to send events to the renderer + * @param {boolean} useWorktreesEnabled - Whether worktrees are enabled in settings (default: false) + */ + async setupWorktreeForFeature(feature, projectPath, sendToRenderer, useWorktreesEnabled = false) { + // If worktrees are disabled in settings, skip entirely + if (!useWorktreesEnabled) { + console.log(`[AutoMode] Worktrees disabled in settings, working directly on main project`); + return { useWorktree: false, workPath: projectPath }; + } + + // Check if worktrees are enabled (project must be a git repo) + const isGit = await worktreeManager.isGitRepo(projectPath); + if (!isGit) { + console.log(`[AutoMode] Project is not a git repo, skipping worktree creation`); + return { useWorktree: false, workPath: projectPath }; + } + + sendToRenderer({ + type: "auto_mode_progress", + featureId: feature.id, + content: "Creating isolated worktree for feature...\n", + }); + + const result = await worktreeManager.createWorktree(projectPath, feature); + + if (!result.success) { + console.warn(`[AutoMode] Failed to create worktree: ${result.error}. Falling back to main project.`); + sendToRenderer({ + type: "auto_mode_progress", + featureId: feature.id, + content: `Warning: Could not create worktree (${result.error}). Working directly on main project.\n`, + }); + return { useWorktree: false, workPath: projectPath }; + } + + console.log(`[AutoMode] Created worktree at: ${result.worktreePath}, branch: ${result.branchName}`); + sendToRenderer({ + type: "auto_mode_progress", + featureId: feature.id, + content: `Working in isolated branch: ${result.branchName}\n`, + }); + + // Update feature with worktree info in feature_list.json + await featureLoader.updateFeatureWorktree( + feature.id, + projectPath, + result.worktreePath, + result.branchName + ); + + return { + useWorktree: true, + workPath: result.worktreePath, + branchName: result.branchName, + baseBranch: result.baseBranch, + }; + } + /** * Start auto mode - continuously implement features */ @@ -108,14 +174,18 @@ class AutoModeService { /** * Run a specific feature by ID + * @param {string} projectPath - Path to the project + * @param {string} featureId - ID of the feature to run + * @param {Function} sendToRenderer - Function to send events to renderer + * @param {boolean} useWorktrees - Whether to use git worktree isolation (default: false) */ - async runFeature({ projectPath, featureId, sendToRenderer }) { + async runFeature({ projectPath, featureId, sendToRenderer, useWorktrees = false }) { // Check if this specific feature is already running if (this.runningFeatures.has(featureId)) { throw new Error(`Feature ${featureId} is already running`); } - console.log(`[AutoMode] Running specific feature: ${featureId}`); + console.log(`[AutoMode] Running specific feature: ${featureId} (worktrees: ${useWorktrees})`); // Register this feature as running const execution = this.createExecutionContext(featureId); @@ -134,6 +204,14 @@ class AutoModeService { console.log(`[AutoMode] Running feature: ${feature.description}`); + // Setup worktree for isolated work (if enabled) + const worktreeSetup = await this.setupWorktreeForFeature(feature, projectPath, sendToRenderer, useWorktrees); + execution.worktreePath = worktreeSetup.workPath; + execution.branchName = worktreeSetup.branchName; + + // Determine working path (worktree or main project) + const workPath = worktreeSetup.workPath; + // Update feature status to in_progress await featureLoader.updateFeatureStatus( featureId, @@ -144,24 +222,27 @@ class AutoModeService { sendToRenderer({ type: "auto_mode_feature_start", featureId: feature.id, - feature: feature, + feature: { ...feature, worktreePath: worktreeSetup.workPath, branchName: worktreeSetup.branchName }, }); - // Implement the feature + // Implement the feature (agent works in worktree) const result = await featureExecutor.implementFeature( feature, - projectPath, + workPath, // Use worktree path instead of main project sendToRenderer, execution ); // Update feature status based on result // For skipTests features, go to waiting_approval on success instead of verified + // On failure, skipTests features should also go to waiting_approval for user review let newStatus; if (result.passes) { newStatus = feature.skipTests ? "waiting_approval" : "verified"; } else { - newStatus = "backlog"; + // For skipTests features, keep in waiting_approval so user can review + // For normal TDD features, move to backlog for retry + newStatus = feature.skipTests ? "waiting_approval" : "backlog"; } await featureLoader.updateFeatureStatus( feature.id, @@ -554,8 +635,12 @@ class AutoModeService { /** * Start a feature asynchronously (similar to drag operation) + * @param {Object} feature - The feature to start + * @param {string} projectPath - Path to the project + * @param {Function} sendToRenderer - Function to send events to renderer + * @param {boolean} useWorktrees - Whether to use git worktree isolation (default: false) */ - async startFeatureAsync(feature, projectPath, sendToRenderer) { + async startFeatureAsync(feature, projectPath, sendToRenderer, useWorktrees = false) { const featureId = feature.id; // Skip if already running @@ -566,7 +651,7 @@ class AutoModeService { try { console.log( - `[AutoMode] Starting feature: ${feature.description.slice(0, 50)}...` + `[AutoMode] Starting feature: ${feature.description.slice(0, 50)}... (worktrees: ${useWorktrees})` ); // Register this feature as running @@ -575,6 +660,14 @@ class AutoModeService { execution.sendToRenderer = sendToRenderer; this.runningFeatures.set(featureId, execution); + // Setup worktree for isolated work (if enabled) + const worktreeSetup = await this.setupWorktreeForFeature(feature, projectPath, sendToRenderer, useWorktrees); + execution.worktreePath = worktreeSetup.workPath; + execution.branchName = worktreeSetup.branchName; + + // Determine working path (worktree or main project) + const workPath = worktreeSetup.workPath; + // Update status to in_progress with timestamp await featureLoader.updateFeatureStatus( featureId, @@ -585,23 +678,27 @@ class AutoModeService { sendToRenderer({ type: "auto_mode_feature_start", featureId: feature.id, - feature: feature, + feature: { ...feature, worktreePath: worktreeSetup.workPath, branchName: worktreeSetup.branchName }, }); - // Implement the feature (this runs async in background) + // Implement the feature (agent works in worktree) const result = await featureExecutor.implementFeature( feature, - projectPath, + workPath, // Use worktree path instead of main project sendToRenderer, execution ); // Update feature status based on result + // For skipTests features, go to waiting_approval on success instead of verified + // On failure, skipTests features should also go to waiting_approval for user review let newStatus; if (result.passes) { newStatus = feature.skipTests ? "waiting_approval" : "verified"; } else { - newStatus = "backlog"; + // For skipTests features, keep in waiting_approval so user can review + // For normal TDD features, move to backlog for retry + newStatus = feature.skipTests ? "waiting_approval" : "backlog"; } await featureLoader.updateFeatureStatus( feature.id, @@ -975,6 +1072,170 @@ class AutoModeService { sleep(ms) { return new Promise((resolve) => setTimeout(resolve, ms)); } + + /** + * Revert feature changes by removing the worktree + * This effectively discards all changes made by the agent + */ + async revertFeature({ projectPath, featureId, sendToRenderer }) { + console.log(`[AutoMode] Reverting feature: ${featureId}`); + + try { + // Stop the feature if it's running + if (this.runningFeatures.has(featureId)) { + await this.stopFeature({ featureId }); + } + + // Remove the worktree and delete the branch + const result = await worktreeManager.removeWorktree(projectPath, featureId, true); + + if (!result.success) { + throw new Error(result.error || "Failed to remove worktree"); + } + + // Clear worktree info from feature + await featureLoader.updateFeatureWorktree(featureId, projectPath, null, null); + + // Update feature status back to backlog + await featureLoader.updateFeatureStatus(featureId, "backlog", projectPath); + + // Delete context file + await contextManager.deleteContextFile(projectPath, featureId); + + if (sendToRenderer) { + sendToRenderer({ + type: "auto_mode_feature_complete", + featureId: featureId, + passes: false, + message: "Feature reverted - all changes discarded", + }); + } + + console.log(`[AutoMode] Feature ${featureId} reverted successfully`); + return { success: true, removedPath: result.removedPath }; + } catch (error) { + console.error("[AutoMode] Error reverting feature:", error); + if (sendToRenderer) { + sendToRenderer({ + type: "auto_mode_error", + error: error.message, + featureId: featureId, + }); + } + return { success: false, error: error.message }; + } + } + + /** + * Merge feature worktree changes back to main branch + */ + async mergeFeature({ projectPath, featureId, options = {}, sendToRenderer }) { + console.log(`[AutoMode] Merging feature: ${featureId}`); + + try { + // Load feature to get worktree info + const features = await featureLoader.loadFeatures(projectPath); + const feature = features.find((f) => f.id === featureId); + + if (!feature) { + throw new Error(`Feature ${featureId} not found`); + } + + if (sendToRenderer) { + sendToRenderer({ + type: "auto_mode_progress", + featureId: featureId, + content: "Merging feature branch into main...\n", + }); + } + + // Merge the worktree + const result = await worktreeManager.mergeWorktree(projectPath, featureId, { + ...options, + cleanup: true, // Remove worktree after successful merge + }); + + if (!result.success) { + throw new Error(result.error || "Failed to merge worktree"); + } + + // Clear worktree info from feature + await featureLoader.updateFeatureWorktree(featureId, projectPath, null, null); + + // Update feature status to verified + await featureLoader.updateFeatureStatus(featureId, "verified", projectPath); + + if (sendToRenderer) { + sendToRenderer({ + type: "auto_mode_feature_complete", + featureId: featureId, + passes: true, + message: `Feature merged into ${result.intoBranch}`, + }); + } + + console.log(`[AutoMode] Feature ${featureId} merged successfully`); + return { success: true, mergedBranch: result.mergedBranch }; + } catch (error) { + console.error("[AutoMode] Error merging feature:", error); + if (sendToRenderer) { + sendToRenderer({ + type: "auto_mode_error", + error: error.message, + featureId: featureId, + }); + } + return { success: false, error: error.message }; + } + } + + /** + * Get worktree info for a feature + */ + async getWorktreeInfo({ projectPath, featureId }) { + return await worktreeManager.getWorktreeInfo(projectPath, featureId); + } + + /** + * Get worktree status (changed files, commits, etc.) + */ + async getWorktreeStatus({ projectPath, featureId }) { + const worktreeInfo = await worktreeManager.getWorktreeInfo(projectPath, featureId); + if (!worktreeInfo.success) { + return { success: false, error: "Worktree not found" }; + } + return await worktreeManager.getWorktreeStatus(worktreeInfo.worktreePath); + } + + /** + * List all feature worktrees + */ + async listWorktrees({ projectPath }) { + const worktrees = await worktreeManager.getAllFeatureWorktrees(projectPath); + return { success: true, worktrees }; + } + + /** + * Get file diffs for a feature worktree + */ + async getFileDiffs({ projectPath, featureId }) { + const worktreeInfo = await worktreeManager.getWorktreeInfo(projectPath, featureId); + if (!worktreeInfo.success) { + return { success: false, error: "Worktree not found" }; + } + return await worktreeManager.getFileDiffs(worktreeInfo.worktreePath); + } + + /** + * Get diff for a specific file in a feature worktree + */ + async getFileDiff({ projectPath, featureId, filePath }) { + const worktreeInfo = await worktreeManager.getWorktreeInfo(projectPath, featureId); + if (!worktreeInfo.success) { + return { success: false, error: "Worktree not found" }; + } + return await worktreeManager.getFileDiff(worktreeInfo.worktreePath, filePath); + } } // Export singleton instance diff --git a/app/electron/main.js b/app/electron/main.js index c100da40..7083789e 100644 --- a/app/electron/main.js +++ b/app/electron/main.js @@ -7,6 +7,7 @@ const { app, BrowserWindow, ipcMain, dialog, shell } = require("electron"); const fs = require("fs/promises"); const agentService = require("./agent-service"); const autoModeService = require("./auto-mode-service"); +const worktreeManager = require("./services/worktree-manager"); const featureSuggestionsService = require("./services/feature-suggestions-service"); const specRegenerationService = require("./services/spec-regeneration-service"); @@ -61,6 +62,21 @@ app.whenReady().then(async () => { const appDataPath = app.getPath("userData"); await agentService.initialize(appDataPath); + // Pre-load allowed paths from agent history to prevent breaking "Recent Projects" + try { + const sessions = await agentService.listSessions({ includeArchived: true }); + sessions.forEach((session) => { + if (session.projectPath) { + addAllowedPath(session.projectPath); + } + }); + console.log( + `[Security] Pre-loaded ${allowedPaths.size} allowed paths from history` + ); + } catch (error) { + console.error("Failed to load sessions for security whitelist:", error); + } + createWindow(); app.on("activate", () => { @@ -76,6 +92,43 @@ app.on("window-all-closed", () => { } }); +// Track allowed paths for file operations (security) +const allowedPaths = new Set(); + +/** + * Add a path to the allowed list + */ +function addAllowedPath(pathToAdd) { + if (!pathToAdd) return; + allowedPaths.add(path.resolve(pathToAdd)); + console.log(`[Security] Added allowed path: ${pathToAdd}`); +} + +/** + * Check if a file path is allowed (must be within an allowed directory) + */ +function isPathAllowed(filePath) { + const resolvedPath = path.resolve(filePath); + + // Allow access to app data directory (for logs, temp images etc) + const appDataPath = app.getPath("userData"); + if (resolvedPath.startsWith(appDataPath)) return true; + + // Check against all allowed project paths + for (const allowedPath of allowedPaths) { + // Check if path starts with allowed directory + // Ensure we don't match "/foo/bar" against "/foo/b" + if ( + resolvedPath === allowedPath || + resolvedPath.startsWith(allowedPath + path.sep) + ) { + return true; + } + } + + return false; +} + // IPC Handlers // Dialog handlers @@ -83,6 +136,11 @@ ipcMain.handle("dialog:openDirectory", async () => { const result = await dialog.showOpenDialog(mainWindow, { properties: ["openDirectory", "createDirectory"], }); + + if (!result.canceled && result.filePaths.length > 0) { + result.filePaths.forEach((p) => addAllowedPath(p)); + } + return result; }); @@ -91,12 +149,26 @@ ipcMain.handle("dialog:openFile", async (_, options = {}) => { properties: ["openFile"], ...options, }); + + if (!result.canceled && result.filePaths.length > 0) { + // Allow reading the specific file selected + result.filePaths.forEach((p) => addAllowedPath(p)); + } + return result; }); // File system handlers ipcMain.handle("fs:readFile", async (_, filePath) => { try { + // Security check + if (!isPathAllowed(filePath)) { + return { + success: false, + error: "Access denied: Path is outside allowed project directories", + }; + } + const content = await fs.readFile(filePath, "utf-8"); return { success: true, content }; } catch (error) { @@ -106,6 +178,14 @@ ipcMain.handle("fs:readFile", async (_, filePath) => { ipcMain.handle("fs:writeFile", async (_, filePath, content) => { try { + // Security check + if (!isPathAllowed(filePath)) { + return { + success: false, + error: "Access denied: Path is outside allowed project directories", + }; + } + await fs.writeFile(filePath, content, "utf-8"); return { success: true }; } catch (error) { @@ -115,6 +195,14 @@ ipcMain.handle("fs:writeFile", async (_, filePath, content) => { ipcMain.handle("fs:mkdir", async (_, dirPath) => { try { + // Security check + if (!isPathAllowed(dirPath)) { + return { + success: false, + error: "Access denied: Path is outside allowed project directories", + }; + } + await fs.mkdir(dirPath, { recursive: true }); return { success: true }; } catch (error) { @@ -124,6 +212,14 @@ ipcMain.handle("fs:mkdir", async (_, dirPath) => { ipcMain.handle("fs:readdir", async (_, dirPath) => { try { + // Security check + if (!isPathAllowed(dirPath)) { + return { + success: false, + error: "Access denied: Path is outside allowed project directories", + }; + } + const entries = await fs.readdir(dirPath, { withFileTypes: true }); const result = entries.map((entry) => ({ name: entry.name, @@ -138,6 +234,11 @@ ipcMain.handle("fs:readdir", async (_, dirPath) => { ipcMain.handle("fs:exists", async (_, filePath) => { try { + // Exists check is generally safe, but we can restrict it too for strict privacy + if (!isPathAllowed(filePath)) { + return false; + } + await fs.access(filePath); return true; } catch { @@ -147,6 +248,14 @@ ipcMain.handle("fs:exists", async (_, filePath) => { ipcMain.handle("fs:stat", async (_, filePath) => { try { + // Security check + if (!isPathAllowed(filePath)) { + return { + success: false, + error: "Access denied: Path is outside allowed project directories", + }; + } + const stats = await fs.stat(filePath); return { success: true, @@ -164,6 +273,14 @@ ipcMain.handle("fs:stat", async (_, filePath) => { ipcMain.handle("fs:deleteFile", async (_, filePath) => { try { + // Security check + if (!isPathAllowed(filePath)) { + return { + success: false, + error: "Access denied: Path is outside allowed project directories", + }; + } + await fs.unlink(filePath); return { success: true }; } catch (error) { @@ -173,6 +290,14 @@ ipcMain.handle("fs:deleteFile", async (_, filePath) => { ipcMain.handle("fs:trashItem", async (_, targetPath) => { try { + // Security check + if (!isPathAllowed(targetPath)) { + return { + success: false, + error: "Access denied: Path is outside allowed project directories", + }; + } + await shell.trashItem(targetPath); return { success: true }; } catch (error) { @@ -352,6 +477,10 @@ ipcMain.handle( "sessions:create", async (_, { name, projectPath, workingDirectory }) => { try { + // Add project path to allowed paths + addAllowedPath(projectPath); + if (workingDirectory) addAllowedPath(workingDirectory); + return await agentService.createSession({ name, projectPath, @@ -423,6 +552,9 @@ ipcMain.handle( "auto-mode:start", async (_, { projectPath, maxConcurrency }) => { try { + // Add project path to allowed paths + addAllowedPath(projectPath); + const sendToRenderer = (data) => { if (mainWindow && !mainWindow.isDestroyed()) { mainWindow.webContents.send("auto-mode:event", data); @@ -470,7 +602,7 @@ ipcMain.handle("auto-mode:status", () => { */ ipcMain.handle( "auto-mode:run-feature", - async (_, { projectPath, featureId }) => { + async (_, { projectPath, featureId, useWorktrees = false }) => { try { const sendToRenderer = (data) => { if (mainWindow && !mainWindow.isDestroyed()) { @@ -482,6 +614,7 @@ ipcMain.handle( projectPath, featureId, sendToRenderer, + useWorktrees, }); } catch (error) { console.error("[IPC] auto-mode:run-feature error:", error); @@ -581,6 +714,9 @@ ipcMain.handle( ipcMain.handle("auto-mode:analyze-project", async (_, { projectPath }) => { console.log("[IPC] auto-mode:analyze-project called with:", { projectPath }); try { + // Add project path to allowed paths + addAllowedPath(projectPath); + const sendToRenderer = (data) => { if (mainWindow && !mainWindow.isDestroyed()) { mainWindow.webContents.send("auto-mode:event", data); @@ -672,6 +808,111 @@ ipcMain.handle( } ); +// ============================================================================ +// Claude CLI Detection IPC Handlers +// ============================================================================ + +/** + * Check Claude Code CLI installation status + */ +ipcMain.handle("claude:check-cli", async () => { + try { + const claudeCliDetector = require("./services/claude-cli-detector"); + const info = claudeCliDetector.getInstallationInfo(); + return { success: true, ...info }; + } catch (error) { + console.error("[IPC] claude:check-cli error:", error); + return { success: false, error: error.message }; + } +}); + +// ============================================================================ +// Codex CLI Detection IPC Handlers +// ============================================================================ + +/** + * Check Codex CLI installation status + */ +ipcMain.handle("codex:check-cli", async () => { + try { + const codexCliDetector = require("./services/codex-cli-detector"); + const info = codexCliDetector.getInstallationInfo(); + return { success: true, ...info }; + } catch (error) { + console.error("[IPC] codex:check-cli error:", error); + return { success: false, error: error.message }; + } +}); + +/** + * Get all available models from all providers + */ +ipcMain.handle("model:get-available", async () => { + try { + const { ModelProviderFactory } = require("./services/model-provider"); + const models = ModelProviderFactory.getAllModels(); + return { success: true, models }; + } catch (error) { + console.error("[IPC] model:get-available error:", error); + return { success: false, error: error.message }; + } +}); + +/** + * Check all provider installation status + */ +ipcMain.handle("model:check-providers", async () => { + try { + const { ModelProviderFactory } = require("./services/model-provider"); + const status = await ModelProviderFactory.checkAllProviders(); + return { success: true, providers: status }; + } catch (error) { + console.error("[IPC] model:check-providers error:", error); + return { success: false, error: error.message }; + } +}); + +// ============================================================================ +// MCP Server IPC Handlers +// ============================================================================ + +/** + * Handle MCP server callback for updating feature status + * This can be called by the MCP server script via HTTP or other communication mechanism + * Note: The MCP server script runs as a separate process, so it can't directly use Electron IPC. + * For now, the MCP server calls featureLoader.updateFeatureStatus directly. + * This handler is here for future extensibility (e.g., HTTP endpoint bridge). + */ +ipcMain.handle( + "mcp:update-feature-status", + async (_, { featureId, status, projectPath, summary }) => { + try { + const featureLoader = require("./services/feature-loader"); + await featureLoader.updateFeatureStatus( + featureId, + status, + projectPath, + summary + ); + + // Notify renderer if window is available + if (mainWindow && !mainWindow.isDestroyed()) { + mainWindow.webContents.send("mcp:feature-status-updated", { + featureId, + status, + projectPath, + summary, + }); + } + + return { success: true }; + } catch (error) { + console.error("[IPC] mcp:update-feature-status error:", error); + return { success: false, error: error.message }; + } + } +); + // ============================================================================ // Feature Suggestions IPC Handlers // ============================================================================ @@ -682,53 +923,53 @@ let suggestionsExecution = null; /** * Generate feature suggestions by analyzing the project */ -ipcMain.handle( - "suggestions:generate", - async (_, { projectPath }) => { - console.log("[IPC] suggestions:generate called with:", { projectPath }); +ipcMain.handle("suggestions:generate", async (_, { projectPath }) => { + console.log("[IPC] suggestions:generate called with:", { projectPath }); - try { - // Check if already running - if (suggestionsExecution && suggestionsExecution.isActive()) { - return { success: false, error: "Suggestions generation is already running" }; - } - - // Create execution context - suggestionsExecution = { - abortController: null, - query: null, - isActive: () => suggestionsExecution !== null, + try { + // Check if already running + if (suggestionsExecution && suggestionsExecution.isActive()) { + return { + success: false, + error: "Suggestions generation is already running", }; - - const sendToRenderer = (data) => { - if (mainWindow && !mainWindow.isDestroyed()) { - mainWindow.webContents.send("suggestions:event", data); - } - }; - - // Start generating suggestions (runs in background) - featureSuggestionsService - .generateSuggestions(projectPath, sendToRenderer, suggestionsExecution) - .catch((error) => { - console.error("[IPC] suggestions:generate background error:", error); - sendToRenderer({ - type: "suggestions_error", - error: error.message, - }); - }) - .finally(() => { - suggestionsExecution = null; - }); - - // Return immediately - return { success: true }; - } catch (error) { - console.error("[IPC] suggestions:generate error:", error); - suggestionsExecution = null; - return { success: false, error: error.message }; } + + // Create execution context + suggestionsExecution = { + abortController: null, + query: null, + isActive: () => suggestionsExecution !== null, + }; + + const sendToRenderer = (data) => { + if (mainWindow && !mainWindow.isDestroyed()) { + mainWindow.webContents.send("suggestions:event", data); + } + }; + + // Start generating suggestions (runs in background) + featureSuggestionsService + .generateSuggestions(projectPath, sendToRenderer, suggestionsExecution) + .catch((error) => { + console.error("[IPC] suggestions:generate background error:", error); + sendToRenderer({ + type: "suggestions_error", + error: error.message, + }); + }) + .finally(() => { + suggestionsExecution = null; + }); + + // Return immediately + return { success: true }; + } catch (error) { + console.error("[IPC] suggestions:generate error:", error); + suggestionsExecution = null; + return { success: false, error: error.message }; } -); +}); /** * Stop the current suggestions generation @@ -757,6 +998,79 @@ ipcMain.handle("suggestions:status", () => { }; }); +// ============================================================================ +// OpenAI API Handlers +// ============================================================================ + +/** + * Test OpenAI API connection + */ +ipcMain.handle("openai:test-connection", async (_, { apiKey }) => { + try { + // Simple test using fetch to OpenAI API + const response = await fetch("https://api.openai.com/v1/models", { + method: "GET", + headers: { + Authorization: `Bearer ${apiKey || process.env.OPENAI_API_KEY}`, + "Content-Type": "application/json", + }, + }); + + if (response.ok) { + const data = await response.json(); + return { + success: true, + message: `Connected successfully. Found ${ + data.data?.length || 0 + } models.`, + }; + } else { + const error = await response.json(); + return { + success: false, + error: error.error?.message || "Failed to connect to OpenAI API", + }; + } + } catch (error) { + console.error("[IPC] openai:test-connection error:", error); + return { success: false, error: error.message }; + } +}); + +// ============================================================================ +// Worktree Management IPC Handlers +// ============================================================================ + +/** + * Revert feature changes by removing the worktree + * This effectively discards all changes made by the agent + */ +ipcMain.handle( + "worktree:revert-feature", + async (_, { projectPath, featureId }) => { + console.log("[IPC] worktree:revert-feature called with:", { + projectPath, + featureId, + }); + try { + const sendToRenderer = (data) => { + if (mainWindow && !mainWindow.isDestroyed()) { + mainWindow.webContents.send("auto-mode:event", data); + } + }; + + return await autoModeService.revertFeature({ + projectPath, + featureId, + sendToRenderer, + }); + } catch (error) { + console.error("[IPC] worktree:revert-feature error:", error); + return { success: false, error: error.message }; + } + } +); + // ============================================================================ // Spec Regeneration IPC Handlers // ============================================================================ @@ -770,12 +1084,20 @@ let specRegenerationExecution = null; ipcMain.handle( "spec-regeneration:generate", async (_, { projectPath, projectDefinition }) => { - console.log("[IPC] spec-regeneration:generate called with:", { projectPath }); + console.log("[IPC] spec-regeneration:generate called with:", { + projectPath, + }); try { + // Add project path to allowed paths + addAllowedPath(projectPath); + // Check if already running if (specRegenerationExecution && specRegenerationExecution.isActive()) { - return { success: false, error: "Spec regeneration is already running" }; + return { + success: false, + error: "Spec regeneration is already running", + }; } // Create execution context @@ -793,9 +1115,17 @@ ipcMain.handle( // Start regenerating spec (runs in background) specRegenerationService - .regenerateSpec(projectPath, projectDefinition, sendToRenderer, specRegenerationExecution) + .regenerateSpec( + projectPath, + projectDefinition, + sendToRenderer, + specRegenerationExecution + ) .catch((error) => { - console.error("[IPC] spec-regeneration:generate background error:", error); + console.error( + "[IPC] spec-regeneration:generate background error:", + error + ); sendToRenderer({ type: "spec_regeneration_error", error: error.message, @@ -821,7 +1151,10 @@ ipcMain.handle( ipcMain.handle("spec-regeneration:stop", async () => { console.log("[IPC] spec-regeneration:stop called"); try { - if (specRegenerationExecution && specRegenerationExecution.abortController) { + if ( + specRegenerationExecution && + specRegenerationExecution.abortController + ) { specRegenerationExecution.abortController.abort(); } specRegenerationExecution = null; @@ -838,7 +1171,9 @@ ipcMain.handle("spec-regeneration:stop", async () => { ipcMain.handle("spec-regeneration:status", () => { return { success: true, - isRunning: specRegenerationExecution !== null && specRegenerationExecution.isActive(), + isRunning: + specRegenerationExecution !== null && + specRegenerationExecution.isActive(), }; }); @@ -848,9 +1183,15 @@ ipcMain.handle("spec-regeneration:status", () => { ipcMain.handle( "spec-regeneration:create", async (_, { projectPath, projectOverview, generateFeatures = true }) => { - console.log("[IPC] spec-regeneration:create called with:", { projectPath, generateFeatures }); + console.log("[IPC] spec-regeneration:create called with:", { + projectPath, + generateFeatures, + }); try { + // Add project path to allowed paths + addAllowedPath(projectPath); + // Check if already running if (specRegenerationExecution && specRegenerationExecution.isActive()) { return { success: false, error: "Spec creation is already running" }; @@ -871,9 +1212,18 @@ ipcMain.handle( // Start creating spec (runs in background) specRegenerationService - .createInitialSpec(projectPath, projectOverview, sendToRenderer, specRegenerationExecution, generateFeatures) + .createInitialSpec( + projectPath, + projectOverview, + sendToRenderer, + specRegenerationExecution, + generateFeatures + ) .catch((error) => { - console.error("[IPC] spec-regeneration:create background error:", error); + console.error( + "[IPC] spec-regeneration:create background error:", + error + ); sendToRenderer({ type: "spec_regeneration_error", error: error.message, @@ -892,3 +1242,124 @@ ipcMain.handle( } } ); + +/** + * Merge feature worktree changes back to main branch + */ +ipcMain.handle( + "worktree:merge-feature", + async (_, { projectPath, featureId, options }) => { + console.log("[IPC] worktree:merge-feature called with:", { + projectPath, + featureId, + options, + }); + try { + const sendToRenderer = (data) => { + if (mainWindow && !mainWindow.isDestroyed()) { + mainWindow.webContents.send("auto-mode:event", data); + } + }; + + return await autoModeService.mergeFeature({ + projectPath, + featureId, + options, + sendToRenderer, + }); + } catch (error) { + console.error("[IPC] worktree:merge-feature error:", error); + return { success: false, error: error.message }; + } + } +); +/** + * Get worktree info for a feature + */ +ipcMain.handle("worktree:get-info", async (_, { projectPath, featureId }) => { + try { + return await autoModeService.getWorktreeInfo({ projectPath, featureId }); + } catch (error) { + console.error("[IPC] worktree:get-info error:", error); + return { success: false, error: error.message }; + } +}); + +/** + * Get worktree status (changed files, commits) + */ +ipcMain.handle("worktree:get-status", async (_, { projectPath, featureId }) => { + try { + return await autoModeService.getWorktreeStatus({ projectPath, featureId }); + } catch (error) { + console.error("[IPC] worktree:get-status error:", error); + return { success: false, error: error.message }; + } +}); + +/** + * List all feature worktrees + */ +ipcMain.handle("worktree:list", async (_, { projectPath }) => { + try { + return await autoModeService.listWorktrees({ projectPath }); + } catch (error) { + console.error("[IPC] worktree:list error:", error); + return { success: false, error: error.message }; + } +}); + +/** + * Get file diffs for a worktree + */ +ipcMain.handle("worktree:get-diffs", async (_, { projectPath, featureId }) => { + try { + return await autoModeService.getFileDiffs({ projectPath, featureId }); + } catch (error) { + console.error("[IPC] worktree:get-diffs error:", error); + return { success: false, error: error.message }; + } +}); + +/** + * Get diff for a specific file in a worktree + */ +ipcMain.handle( + "worktree:get-file-diff", + async (_, { projectPath, featureId, filePath }) => { + try { + return await autoModeService.getFileDiff({ + projectPath, + featureId, + filePath, + }); + } catch (error) { + console.error("[IPC] worktree:get-file-diff error:", error); + return { success: false, error: error.message }; + } + } +); + +/** + * Get file diffs for the main project (non-worktree) + */ +ipcMain.handle("git:get-diffs", async (_, { projectPath }) => { + try { + return await worktreeManager.getFileDiffs(projectPath); + } catch (error) { + console.error("[IPC] git:get-diffs error:", error); + return { success: false, error: error.message }; + } +}); + +/** + * Get diff for a specific file in the main project (non-worktree) + */ +ipcMain.handle("git:get-file-diff", async (_, { projectPath, filePath }) => { + try { + return await worktreeManager.getFileDiff(projectPath, filePath); + } catch (error) { + console.error("[IPC] git:get-file-diff error:", error); + return { success: false, error: error.message }; + } +}); diff --git a/app/electron/preload.js b/app/electron/preload.js index d98190a3..65d2b03a 100644 --- a/app/electron/preload.js +++ b/app/electron/preload.js @@ -97,8 +97,8 @@ contextBridge.exposeInMainWorld("electronAPI", { status: () => ipcRenderer.invoke("auto-mode:status"), // Run a specific feature - runFeature: (projectPath, featureId) => - ipcRenderer.invoke("auto-mode:run-feature", { projectPath, featureId }), + runFeature: (projectPath, featureId, useWorktrees) => + ipcRenderer.invoke("auto-mode:run-feature", { projectPath, featureId, useWorktrees }), // Verify a specific feature by running its tests verifyFeature: (projectPath, featureId) => @@ -140,6 +140,67 @@ contextBridge.exposeInMainWorld("electronAPI", { }, }, + // Claude CLI Detection API + checkClaudeCli: () => ipcRenderer.invoke("claude:check-cli"), + + // Codex CLI Detection API + checkCodexCli: () => ipcRenderer.invoke("codex:check-cli"), + + // Model Management APIs + model: { + // Get all available models from all providers + getAvailable: () => ipcRenderer.invoke("model:get-available"), + + // Check all provider installation status + checkProviders: () => ipcRenderer.invoke("model:check-providers"), + }, + + // OpenAI API + testOpenAIConnection: (apiKey) => + ipcRenderer.invoke("openai:test-connection", { apiKey }), + + // Worktree Management APIs + worktree: { + // Revert feature changes by removing the worktree + revertFeature: (projectPath, featureId) => + ipcRenderer.invoke("worktree:revert-feature", { projectPath, featureId }), + + // Merge feature worktree changes back to main branch + mergeFeature: (projectPath, featureId, options) => + ipcRenderer.invoke("worktree:merge-feature", { projectPath, featureId, options }), + + // Get worktree info for a feature + getInfo: (projectPath, featureId) => + ipcRenderer.invoke("worktree:get-info", { projectPath, featureId }), + + // Get worktree status (changed files, commits) + getStatus: (projectPath, featureId) => + ipcRenderer.invoke("worktree:get-status", { projectPath, featureId }), + + // List all feature worktrees + list: (projectPath) => + ipcRenderer.invoke("worktree:list", { projectPath }), + + // Get file diffs for a feature worktree + getDiffs: (projectPath, featureId) => + ipcRenderer.invoke("worktree:get-diffs", { projectPath, featureId }), + + // Get diff for a specific file in a worktree + getFileDiff: (projectPath, featureId, filePath) => + ipcRenderer.invoke("worktree:get-file-diff", { projectPath, featureId, filePath }), + }, + + // Git Operations APIs (for non-worktree operations) + git: { + // Get file diffs for the main project + getDiffs: (projectPath) => + ipcRenderer.invoke("git:get-diffs", { projectPath }), + + // Get diff for a specific file in the main project + getFileDiff: (projectPath, filePath) => + ipcRenderer.invoke("git:get-file-diff", { projectPath, filePath }), + }, + // Feature Suggestions API suggestions: { // Generate feature suggestions diff --git a/app/electron/services/claude-cli-detector.js b/app/electron/services/claude-cli-detector.js new file mode 100644 index 00000000..31030f0d --- /dev/null +++ b/app/electron/services/claude-cli-detector.js @@ -0,0 +1,119 @@ +const { execSync } = require('child_process'); +const fs = require('fs'); +const path = require('path'); +const os = require('os'); + +class ClaudeCliDetector { + /** + * Check if Claude Code CLI is installed and accessible + * @returns {Object} { installed: boolean, path: string|null, version: string|null, method: 'cli'|'sdk'|'none' } + */ + static detectClaudeInstallation() { + try { + // Method 1: Check if 'claude' command is in PATH + try { + const claudePath = execSync('which claude', { encoding: 'utf-8' }).trim(); + const version = execSync('claude --version', { encoding: 'utf-8' }).trim(); + return { + installed: true, + path: claudePath, + version: version, + method: 'cli' + }; + } catch (error) { + // CLI not in PATH, check local installation + } + + // Method 2: Check for local installation + const localClaudePath = path.join(os.homedir(), '.claude', 'local', 'claude'); + if (fs.existsSync(localClaudePath)) { + try { + const version = execSync(`${localClaudePath} --version`, { encoding: 'utf-8' }).trim(); + return { + installed: true, + path: localClaudePath, + version: version, + method: 'cli-local' + }; + } catch (error) { + // Local CLI exists but may not be executable + } + } + + // Method 3: Check Windows path + if (process.platform === 'win32') { + try { + const claudePath = execSync('where claude', { encoding: 'utf-8' }).trim(); + const version = execSync('claude --version', { encoding: 'utf-8' }).trim(); + return { + installed: true, + path: claudePath, + version: version, + method: 'cli' + }; + } catch (error) { + // Not found + } + } + + // Method 4: SDK mode (using OAuth token) + if (process.env.CLAUDE_CODE_OAUTH_TOKEN) { + return { + installed: true, + path: null, + version: 'SDK Mode', + method: 'sdk' + }; + } + + return { + installed: false, + path: null, + version: null, + method: 'none' + }; + } catch (error) { + console.error('[ClaudeCliDetector] Error detecting Claude installation:', error); + return { + installed: false, + path: null, + version: null, + method: 'none', + error: error.message + }; + } + } + + /** + * Get installation recommendations + */ + static getInstallationInfo() { + const detection = this.detectClaudeInstallation(); + + if (detection.installed) { + return { + status: 'installed', + method: detection.method, + version: detection.version, + path: detection.path, + recommendation: detection.method === 'cli' + ? 'Using Claude Code CLI - optimal for long-running tasks' + : 'Using SDK mode - works well but CLI may provide better performance' + }; + } + + return { + status: 'not_installed', + recommendation: 'Consider installing Claude Code CLI for better performance with ultrathink', + installCommands: { + macos: 'curl -fsSL claude.ai/install.sh | bash', + windows: 'irm https://claude.ai/install.ps1 | iex', + linux: 'curl -fsSL claude.ai/install.sh | bash', + npm: 'npm install -g @anthropic-ai/claude-code' + } + }; + } +} + +module.exports = ClaudeCliDetector; + diff --git a/app/electron/services/codex-cli-detector.js b/app/electron/services/codex-cli-detector.js new file mode 100644 index 00000000..1a60e3b3 --- /dev/null +++ b/app/electron/services/codex-cli-detector.js @@ -0,0 +1,229 @@ +const { execSync } = require('child_process'); +const fs = require('fs'); +const path = require('path'); +const os = require('os'); + +/** + * Codex CLI Detector - Checks if OpenAI Codex CLI is installed + * + * Codex CLI is OpenAI's agent CLI tool that allows users to use + * GPT-5.1 Codex models (gpt-5.1-codex-max, gpt-5.1-codex, etc.) + * for code generation and agentic tasks. + */ +class CodexCliDetector { + /** + * Check if Codex CLI is installed and accessible + * @returns {Object} { installed: boolean, path: string|null, version: string|null, method: 'cli'|'npm'|'brew'|'none' } + */ + static detectCodexInstallation() { + try { + // Method 1: Check if 'codex' command is in PATH + try { + const codexPath = execSync('which codex 2>/dev/null', { encoding: 'utf-8' }).trim(); + if (codexPath) { + const version = this.getCodexVersion(codexPath); + return { + installed: true, + path: codexPath, + version: version, + method: 'cli' + }; + } + } catch (error) { + // CLI not in PATH, continue checking other methods + } + + // Method 2: Check for npm global installation + try { + const npmListOutput = execSync('npm list -g @openai/codex --depth=0 2>/dev/null', { encoding: 'utf-8' }); + if (npmListOutput && npmListOutput.includes('@openai/codex')) { + // Get the path from npm bin + const npmBinPath = execSync('npm bin -g', { encoding: 'utf-8' }).trim(); + const codexPath = path.join(npmBinPath, 'codex'); + const version = this.getCodexVersion(codexPath); + return { + installed: true, + path: codexPath, + version: version, + method: 'npm' + }; + } + } catch (error) { + // npm global not found + } + + // Method 3: Check for Homebrew installation on macOS + if (process.platform === 'darwin') { + try { + const brewList = execSync('brew list --formula 2>/dev/null', { encoding: 'utf-8' }); + if (brewList.includes('codex')) { + const brewPrefixOutput = execSync('brew --prefix codex 2>/dev/null', { encoding: 'utf-8' }).trim(); + const codexPath = path.join(brewPrefixOutput, 'bin', 'codex'); + const version = this.getCodexVersion(codexPath); + return { + installed: true, + path: codexPath, + version: version, + method: 'brew' + }; + } + } catch (error) { + // Homebrew not found or codex not installed via brew + } + } + + // Method 4: Check Windows path + if (process.platform === 'win32') { + try { + const codexPath = execSync('where codex 2>nul', { encoding: 'utf-8' }).trim().split('\n')[0]; + if (codexPath) { + const version = this.getCodexVersion(codexPath); + return { + installed: true, + path: codexPath, + version: version, + method: 'cli' + }; + } + } catch (error) { + // Not found on Windows + } + } + + // Method 5: Check common installation paths + const commonPaths = [ + path.join(os.homedir(), '.local', 'bin', 'codex'), + path.join(os.homedir(), '.npm-global', 'bin', 'codex'), + '/usr/local/bin/codex', + '/opt/homebrew/bin/codex', + ]; + + for (const checkPath of commonPaths) { + if (fs.existsSync(checkPath)) { + const version = this.getCodexVersion(checkPath); + return { + installed: true, + path: checkPath, + version: version, + method: 'cli' + }; + } + } + + // Method 6: Check if OPENAI_API_KEY is set (can use Codex API directly) + if (process.env.OPENAI_API_KEY) { + return { + installed: false, + path: null, + version: null, + method: 'api-key-only', + hasApiKey: true + }; + } + + return { + installed: false, + path: null, + version: null, + method: 'none' + }; + } catch (error) { + console.error('[CodexCliDetector] Error detecting Codex installation:', error); + return { + installed: false, + path: null, + version: null, + method: 'none', + error: error.message + }; + } + } + + /** + * Get Codex CLI version from executable path + * @param {string} codexPath Path to codex executable + * @returns {string|null} Version string or null + */ + static getCodexVersion(codexPath) { + try { + const version = execSync(`"${codexPath}" --version 2>/dev/null`, { encoding: 'utf-8' }).trim(); + return version || null; + } catch (error) { + return null; + } + } + + /** + * Get installation info and recommendations + * @returns {Object} Installation status and recommendations + */ + static getInstallationInfo() { + const detection = this.detectCodexInstallation(); + + if (detection.installed) { + return { + status: 'installed', + method: detection.method, + version: detection.version, + path: detection.path, + recommendation: detection.method === 'cli' + ? 'Using Codex CLI - ready for GPT-5.1 Codex models' + : `Using Codex CLI via ${detection.method} - ready for GPT-5.1 Codex models` + }; + } + + // Not installed but has API key + if (detection.method === 'api-key-only') { + return { + status: 'api_key_only', + method: 'api-key-only', + recommendation: 'OPENAI_API_KEY detected but Codex CLI not installed. Install Codex CLI for full agentic capabilities.', + installCommands: this.getInstallCommands() + }; + } + + return { + status: 'not_installed', + recommendation: 'Install OpenAI Codex CLI to use GPT-5.1 Codex models for agentic tasks', + installCommands: this.getInstallCommands() + }; + } + + /** + * Get installation commands for different platforms + * @returns {Object} Installation commands by platform + */ + static getInstallCommands() { + return { + npm: 'npm install -g @openai/codex@latest', + macos: 'brew install codex', + linux: 'npm install -g @openai/codex@latest', + windows: 'npm install -g @openai/codex@latest' + }; + } + + /** + * Check if Codex CLI supports a specific model + * @param {string} model Model name to check + * @returns {boolean} Whether the model is supported + */ + static isModelSupported(model) { + const supportedModels = [ + 'gpt-5.1-codex-max', + 'gpt-5.1-codex', + 'gpt-5.1-codex-mini', + 'gpt-5.1' + ]; + return supportedModels.includes(model); + } + + /** + * Get default model for Codex CLI + * @returns {string} Default model name + */ + static getDefaultModel() { + return 'gpt-5.1-codex-max'; + } +} + +module.exports = CodexCliDetector; diff --git a/app/electron/services/codex-config-manager.js b/app/electron/services/codex-config-manager.js new file mode 100644 index 00000000..37832f61 --- /dev/null +++ b/app/electron/services/codex-config-manager.js @@ -0,0 +1,351 @@ +/** + * Codex TOML Configuration Manager + * + * Manages Codex CLI's TOML configuration file to add/update MCP server settings. + * Codex CLI looks for config at: + * - ~/.codex/config.toml (user-level) + * - .codex/config.toml (project-level, takes precedence) + */ + +const fs = require('fs/promises'); +const path = require('path'); +const os = require('os'); + +class CodexConfigManager { + constructor() { + this.userConfigPath = path.join(os.homedir(), '.codex', 'config.toml'); + this.projectConfigPath = null; // Will be set per project + } + + /** + * Set the project path for project-level config + */ + setProjectPath(projectPath) { + this.projectConfigPath = path.join(projectPath, '.codex', 'config.toml'); + } + + /** + * Get the effective config path (project-level if exists, otherwise user-level) + */ + async getConfigPath() { + if (this.projectConfigPath) { + try { + await fs.access(this.projectConfigPath); + return this.projectConfigPath; + } catch (e) { + // Project config doesn't exist, fall back to user config + } + } + + // Ensure user config directory exists + const userConfigDir = path.dirname(this.userConfigPath); + try { + await fs.mkdir(userConfigDir, { recursive: true }); + } catch (e) { + // Directory might already exist + } + + return this.userConfigPath; + } + + /** + * Read existing TOML config (simple parser for our needs) + */ + async readConfig(configPath) { + try { + const content = await fs.readFile(configPath, 'utf-8'); + return this.parseToml(content); + } catch (e) { + if (e.code === 'ENOENT') { + return {}; + } + throw e; + } + } + + /** + * Simple TOML parser for our specific use case + * This is a minimal parser that handles the MCP server config structure + */ + parseToml(content) { + const config = {}; + let currentSection = null; + let currentSubsection = null; + + const lines = content.split('\n'); + + for (const line of lines) { + const trimmed = line.trim(); + + // Skip comments and empty lines + if (!trimmed || trimmed.startsWith('#')) { + continue; + } + + // Section header: [section] + const sectionMatch = trimmed.match(/^\[([^\]]+)\]$/); + if (sectionMatch) { + const sectionName = sectionMatch[1]; + const parts = sectionName.split('.'); + + if (parts.length === 1) { + currentSection = parts[0]; + currentSubsection = null; + if (!config[currentSection]) { + config[currentSection] = {}; + } + } else if (parts.length === 2) { + currentSection = parts[0]; + currentSubsection = parts[1]; + if (!config[currentSection]) { + config[currentSection] = {}; + } + if (!config[currentSection][currentSubsection]) { + config[currentSection][currentSubsection] = {}; + } + } + continue; + } + + // Key-value pair: key = value + const kvMatch = trimmed.match(/^([^=]+)=(.+)$/); + if (kvMatch) { + const key = kvMatch[1].trim(); + let value = kvMatch[2].trim(); + + // Remove quotes if present + if ((value.startsWith('"') && value.endsWith('"')) || + (value.startsWith("'") && value.endsWith("'"))) { + value = value.slice(1, -1); + } + + // Parse boolean + if (value === 'true') value = true; + else if (value === 'false') value = false; + // Parse number + else if (/^-?\d+$/.test(value)) value = parseInt(value, 10); + else if (/^-?\d+\.\d+$/.test(value)) value = parseFloat(value); + + if (currentSubsection) { + if (!config[currentSection][currentSubsection]) { + config[currentSection][currentSubsection] = {}; + } + config[currentSection][currentSubsection][key] = value; + } else if (currentSection) { + if (!config[currentSection]) { + config[currentSection] = {}; + } + config[currentSection][key] = value; + } else { + config[key] = value; + } + } + } + + return config; + } + + /** + * Convert config object back to TOML format + */ + stringifyToml(config, indent = 0) { + const indentStr = ' '.repeat(indent); + let result = ''; + + for (const [key, value] of Object.entries(config)) { + if (typeof value === 'object' && value !== null && !Array.isArray(value)) { + // Section + result += `${indentStr}[${key}]\n`; + result += this.stringifyToml(value, indent); + } else { + // Key-value + let valueStr = value; + if (typeof value === 'string') { + // Escape quotes and wrap in quotes if needed + if (value.includes('"') || value.includes("'") || value.includes(' ')) { + valueStr = `"${value.replace(/"/g, '\\"')}"`; + } + } else if (typeof value === 'boolean') { + valueStr = value.toString(); + } + result += `${indentStr}${key} = ${valueStr}\n`; + } + } + + return result; + } + + /** + * Configure the automaker-tools MCP server + */ + async configureMcpServer(projectPath, mcpServerScriptPath) { + this.setProjectPath(projectPath); + const configPath = await this.getConfigPath(); + + // Read existing config + const config = await this.readConfig(configPath); + + // Ensure mcp_servers section exists + if (!config.mcp_servers) { + config.mcp_servers = {}; + } + + // Configure automaker-tools server + config.mcp_servers['automaker-tools'] = { + command: 'node', + args: [mcpServerScriptPath], + env: { + AUTOMAKER_PROJECT_PATH: projectPath + }, + startup_timeout_sec: 10, + tool_timeout_sec: 60, + enabled_tools: ['UpdateFeatureStatus'] + }; + + // Ensure experimental_use_rmcp_client is enabled (if needed) + if (!config.experimental_use_rmcp_client) { + config.experimental_use_rmcp_client = true; + } + + // Write config back + await this.writeConfig(configPath, config); + + console.log(`[CodexConfigManager] Configured automaker-tools MCP server in ${configPath}`); + return configPath; + } + + /** + * Write config to TOML file + */ + async writeConfig(configPath, config) { + let content = ''; + + // Write top-level keys first (preserve existing non-MCP config) + for (const [key, value] of Object.entries(config)) { + if (key === 'mcp_servers' || key === 'experimental_use_rmcp_client') { + continue; // Handle these separately + } + if (typeof value !== 'object') { + content += `${key} = ${this.formatValue(value)}\n`; + } + } + + // Write experimental flag if enabled + if (config.experimental_use_rmcp_client) { + if (content && !content.endsWith('\n\n')) { + content += '\n'; + } + content += `experimental_use_rmcp_client = true\n`; + } + + // Write mcp_servers section + if (config.mcp_servers && Object.keys(config.mcp_servers).length > 0) { + if (content && !content.endsWith('\n\n')) { + content += '\n'; + } + + for (const [serverName, serverConfig] of Object.entries(config.mcp_servers)) { + content += `\n[mcp_servers.${serverName}]\n`; + + // Write command first + if (serverConfig.command) { + content += `command = "${this.escapeTomlString(serverConfig.command)}"\n`; + } + + // Write args + if (serverConfig.args && Array.isArray(serverConfig.args)) { + const argsStr = serverConfig.args.map(a => `"${this.escapeTomlString(a)}"`).join(', '); + content += `args = [${argsStr}]\n`; + } + + // Write timeouts (must be before env subsection) + if (serverConfig.startup_timeout_sec !== undefined) { + content += `startup_timeout_sec = ${serverConfig.startup_timeout_sec}\n`; + } + + if (serverConfig.tool_timeout_sec !== undefined) { + content += `tool_timeout_sec = ${serverConfig.tool_timeout_sec}\n`; + } + + // Write enabled_tools (must be before env subsection - at server level, not env level) + if (serverConfig.enabled_tools && Array.isArray(serverConfig.enabled_tools)) { + const toolsStr = serverConfig.enabled_tools.map(t => `"${this.escapeTomlString(t)}"`).join(', '); + content += `enabled_tools = [${toolsStr}]\n`; + } + + // Write env section last (as a separate subsection) + // IMPORTANT: In TOML, once we start [mcp_servers.server_name.env], + // everything after belongs to that subsection until a new section starts + if (serverConfig.env && typeof serverConfig.env === 'object' && Object.keys(serverConfig.env).length > 0) { + content += `\n[mcp_servers.${serverName}.env]\n`; + for (const [envKey, envValue] of Object.entries(serverConfig.env)) { + content += `${envKey} = "${this.escapeTomlString(String(envValue))}"\n`; + } + } + } + } + + // Ensure directory exists + const configDir = path.dirname(configPath); + await fs.mkdir(configDir, { recursive: true }); + + // Write file + await fs.writeFile(configPath, content, 'utf-8'); + } + + /** + * Escape special characters in TOML strings + */ + escapeTomlString(str) { + return str + .replace(/\\/g, '\\\\') + .replace(/"/g, '\\"') + .replace(/\n/g, '\\n') + .replace(/\r/g, '\\r') + .replace(/\t/g, '\\t'); + } + + /** + * Format a value for TOML output + */ + formatValue(value) { + if (typeof value === 'string') { + // Escape quotes + const escaped = value.replace(/\\/g, '\\\\').replace(/"/g, '\\"'); + return `"${escaped}"`; + } else if (typeof value === 'boolean') { + return value.toString(); + } else if (typeof value === 'number') { + return value.toString(); + } + return `"${String(value)}"`; + } + + /** + * Remove automaker-tools MCP server configuration + */ + async removeMcpServer(projectPath) { + this.setProjectPath(projectPath); + const configPath = await this.getConfigPath(); + + try { + const config = await this.readConfig(configPath); + + if (config.mcp_servers && config.mcp_servers['automaker-tools']) { + delete config.mcp_servers['automaker-tools']; + + // If no more MCP servers, remove the section + if (Object.keys(config.mcp_servers).length === 0) { + delete config.mcp_servers; + } + + await this.writeConfig(configPath, config); + console.log(`[CodexConfigManager] Removed automaker-tools MCP server from ${configPath}`); + } + } catch (e) { + console.error(`[CodexConfigManager] Error removing MCP server config:`, e); + } + } +} + +module.exports = new CodexConfigManager(); diff --git a/app/electron/services/codex-executor.js b/app/electron/services/codex-executor.js new file mode 100644 index 00000000..b051c170 --- /dev/null +++ b/app/electron/services/codex-executor.js @@ -0,0 +1,610 @@ +/** + * Codex CLI Execution Wrapper + * + * This module handles spawning and managing Codex CLI processes + * for executing OpenAI model queries. + */ + +const { spawn } = require('child_process'); +const { EventEmitter } = require('events'); +const readline = require('readline'); +const path = require('path'); +const CodexCliDetector = require('./codex-cli-detector'); +const codexConfigManager = require('./codex-config-manager'); + +/** + * Message types from Codex CLI JSON output + */ +const CODEX_EVENT_TYPES = { + THREAD_STARTED: 'thread.started', + ITEM_STARTED: 'item.started', + ITEM_COMPLETED: 'item.completed', + THREAD_COMPLETED: 'thread.completed', + ERROR: 'error' +}; + +/** + * Codex Executor - Manages Codex CLI process execution + */ +class CodexExecutor extends EventEmitter { + constructor() { + super(); + this.currentProcess = null; + this.codexPath = null; + } + + /** + * Find and cache the Codex CLI path + * @returns {string|null} Path to codex executable + */ + findCodexPath() { + if (this.codexPath) { + return this.codexPath; + } + + const installation = CodexCliDetector.detectCodexInstallation(); + if (installation.installed && installation.path) { + this.codexPath = installation.path; + return this.codexPath; + } + + return null; + } + + /** + * Execute a Codex CLI query + * @param {Object} options Execution options + * @param {string} options.prompt The prompt to execute + * @param {string} options.model Model to use (default: gpt-5.1-codex-max) + * @param {string} options.cwd Working directory + * @param {string} options.systemPrompt System prompt (optional, will be prepended to prompt) + * @param {number} options.maxTurns Not used - Codex CLI doesn't support this parameter + * @param {string[]} options.allowedTools Not used - Codex CLI doesn't support this parameter + * @param {Object} options.env Environment variables + * @param {Object} options.mcpServers MCP servers configuration (for configuring Codex TOML) + * @returns {AsyncGenerator} Generator yielding messages + */ + async *execute(options) { + const { + prompt, + model = 'gpt-5.1-codex-max', + cwd = process.cwd(), + systemPrompt, + maxTurns, // Not used by Codex CLI + allowedTools, // Not used by Codex CLI + env = {}, + mcpServers = null + } = options; + + const codexPath = this.findCodexPath(); + if (!codexPath) { + yield { + type: 'error', + error: 'Codex CLI not found. Please install it with: npm install -g @openai/codex@latest' + }; + return; + } + + // Configure MCP server if provided + if (mcpServers && mcpServers['automaker-tools']) { + try { + // Get the absolute path to the MCP server script + const mcpServerScriptPath = path.resolve(__dirname, 'mcp-server-stdio.js'); + + // Verify the script exists + const fs = require('fs'); + if (!fs.existsSync(mcpServerScriptPath)) { + console.warn(`[CodexExecutor] MCP server script not found at ${mcpServerScriptPath}, skipping MCP configuration`); + } else { + // Configure Codex TOML to use the MCP server + await codexConfigManager.configureMcpServer(cwd, mcpServerScriptPath); + console.log('[CodexExecutor] Configured automaker-tools MCP server for Codex CLI'); + } + } catch (error) { + console.error('[CodexExecutor] Failed to configure MCP server:', error); + // Continue execution even if MCP config fails - Codex will work without MCP tools + } + } + + // Combine system prompt with main prompt if provided + // Codex CLI doesn't support --system-prompt argument, so we prepend it to the prompt + let combinedPrompt = prompt; + console.log('[CodexExecutor] Original prompt length:', prompt?.length || 0); + if (systemPrompt) { + combinedPrompt = `${systemPrompt}\n\n---\n\n${prompt}`; + console.log('[CodexExecutor] System prompt prepended to main prompt'); + console.log('[CodexExecutor] System prompt length:', systemPrompt.length); + console.log('[CodexExecutor] Combined prompt length:', combinedPrompt.length); + } + + // Build command arguments + // Note: maxTurns and allowedTools are not supported by Codex CLI + console.log('[CodexExecutor] Building command arguments...'); + const args = this.buildArgs({ + prompt: combinedPrompt, + model + }); + + console.log('[CodexExecutor] Executing command:', codexPath); + console.log('[CodexExecutor] Number of args:', args.length); + console.log('[CodexExecutor] Args (without prompt):', args.slice(0, -1).join(' ')); + console.log('[CodexExecutor] Prompt length in args:', args[args.length - 1]?.length || 0); + console.log('[CodexExecutor] Prompt preview (first 200 chars):', args[args.length - 1]?.substring(0, 200)); + console.log('[CodexExecutor] Working directory:', cwd); + + // Spawn the process + const processEnv = { + ...process.env, + ...env, + // Ensure OPENAI_API_KEY is available + OPENAI_API_KEY: env.OPENAI_API_KEY || process.env.OPENAI_API_KEY + }; + + // Log API key status (without exposing the key) + if (processEnv.OPENAI_API_KEY) { + console.log('[CodexExecutor] OPENAI_API_KEY is set (length:', processEnv.OPENAI_API_KEY.length, ')'); + } else { + console.warn('[CodexExecutor] WARNING: OPENAI_API_KEY is not set!'); + } + + console.log('[CodexExecutor] Spawning process...'); + const proc = spawn(codexPath, args, { + cwd, + env: processEnv, + stdio: ['pipe', 'pipe', 'pipe'] + }); + + this.currentProcess = proc; + console.log('[CodexExecutor] Process spawned with PID:', proc.pid); + + // Track process events + proc.on('error', (error) => { + console.error('[CodexExecutor] Process error:', error); + }); + + proc.on('spawn', () => { + console.log('[CodexExecutor] Process spawned successfully'); + }); + + // Collect stderr output as it comes in + let stderr = ''; + let hasOutput = false; + let stdoutChunks = []; + let stderrChunks = []; + + proc.stderr.on('data', (data) => { + const errorText = data.toString(); + stderr += errorText; + stderrChunks.push(errorText); + hasOutput = true; + console.error('[CodexExecutor] stderr chunk received (', data.length, 'bytes):', errorText.substring(0, 200)); + }); + + proc.stderr.on('end', () => { + console.log('[CodexExecutor] stderr stream ended. Total chunks:', stderrChunks.length, 'Total length:', stderr.length); + }); + + proc.stdout.on('data', (data) => { + const text = data.toString(); + stdoutChunks.push(text); + hasOutput = true; + console.log('[CodexExecutor] stdout chunk received (', data.length, 'bytes):', text.substring(0, 200)); + }); + + proc.stdout.on('end', () => { + console.log('[CodexExecutor] stdout stream ended. Total chunks:', stdoutChunks.length); + }); + + // Create readline interface for parsing JSONL output + console.log('[CodexExecutor] Creating readline interface...'); + const rl = readline.createInterface({ + input: proc.stdout, + crlfDelay: Infinity + }); + + // Track accumulated content for converting to Claude format + let accumulatedText = ''; + let toolUses = []; + let lastOutputTime = Date.now(); + const OUTPUT_TIMEOUT = 30000; // 30 seconds timeout for no output + let lineCount = 0; + let jsonParseErrors = 0; + + // Set up timeout check + const checkTimeout = setInterval(() => { + const timeSinceLastOutput = Date.now() - lastOutputTime; + if (timeSinceLastOutput > OUTPUT_TIMEOUT && !hasOutput) { + console.warn('[CodexExecutor] No output received for', timeSinceLastOutput, 'ms. Process still alive:', !proc.killed); + } + }, 5000); + + console.log('[CodexExecutor] Starting to read lines from stdout...'); + + // Process stdout line by line (JSONL format) + try { + for await (const line of rl) { + hasOutput = true; + lastOutputTime = Date.now(); + lineCount++; + + console.log('[CodexExecutor] Line', lineCount, 'received (length:', line.length, '):', line.substring(0, 100)); + + if (!line.trim()) { + console.log('[CodexExecutor] Skipping empty line'); + continue; + } + + try { + const event = JSON.parse(line); + console.log('[CodexExecutor] Successfully parsed JSON event. Type:', event.type, 'Keys:', Object.keys(event)); + + const convertedMsg = this.convertToClaudeFormat(event); + console.log('[CodexExecutor] Converted message:', convertedMsg ? { type: convertedMsg.type } : 'null'); + + if (convertedMsg) { + // Accumulate text content + if (convertedMsg.type === 'assistant' && convertedMsg.message?.content) { + for (const block of convertedMsg.message.content) { + if (block.type === 'text') { + accumulatedText += block.text; + console.log('[CodexExecutor] Accumulated text block (total length:', accumulatedText.length, ')'); + } else if (block.type === 'tool_use') { + toolUses.push(block); + console.log('[CodexExecutor] Tool use detected:', block.name); + } + } + } + console.log('[CodexExecutor] Yielding message of type:', convertedMsg.type); + yield convertedMsg; + } else { + console.log('[CodexExecutor] Converted message is null, skipping'); + } + } catch (parseError) { + jsonParseErrors++; + // Non-JSON output, yield as text + console.log('[CodexExecutor] JSON parse error (', jsonParseErrors, 'total):', parseError.message); + console.log('[CodexExecutor] Non-JSON line content:', line.substring(0, 200)); + yield { + type: 'assistant', + message: { + content: [{ type: 'text', text: line + '\n' }] + } + }; + } + } + + console.log('[CodexExecutor] Finished reading all lines. Total lines:', lineCount, 'JSON errors:', jsonParseErrors); + } catch (readError) { + console.error('[CodexExecutor] Error reading from readline:', readError); + throw readError; + } finally { + clearInterval(checkTimeout); + console.log('[CodexExecutor] Cleaned up timeout checker'); + } + + // Handle process completion + console.log('[CodexExecutor] Waiting for process to close...'); + const exitCode = await new Promise((resolve) => { + proc.on('close', (code, signal) => { + console.log('[CodexExecutor] Process closed with code:', code, 'signal:', signal); + resolve(code); + }); + }); + + this.currentProcess = null; + console.log('[CodexExecutor] Process completed. Exit code:', exitCode, 'Has output:', hasOutput, 'Stderr length:', stderr.length); + + // Wait a bit for any remaining stderr data to be collected + console.log('[CodexExecutor] Waiting 200ms for any remaining stderr data...'); + await new Promise(resolve => setTimeout(resolve, 200)); + console.log('[CodexExecutor] Final stderr length:', stderr.length, 'Final stdout chunks:', stdoutChunks.length); + + if (exitCode !== 0) { + const errorMessage = stderr.trim() + ? `Codex CLI exited with code ${exitCode}.\n\nError output:\n${stderr}` + : `Codex CLI exited with code ${exitCode}. No error output captured.`; + + console.error('[CodexExecutor] Process failed with exit code', exitCode); + console.error('[CodexExecutor] Error message:', errorMessage); + console.error('[CodexExecutor] Stderr chunks:', stderrChunks.length, 'Stdout chunks:', stdoutChunks.length); + + yield { + type: 'error', + error: errorMessage + }; + } else if (!hasOutput && !stderr) { + // Process exited successfully but produced no output - might be API key issue + const warningMessage = 'Codex CLI completed but produced no output. This might indicate:\n' + + '- Missing or invalid OPENAI_API_KEY\n' + + '- Codex CLI configuration issue\n' + + '- The process completed without generating any response\n\n' + + `Debug info: Exit code ${exitCode}, stdout chunks: ${stdoutChunks.length}, stderr chunks: ${stderrChunks.length}, lines read: ${lineCount}`; + + console.warn('[CodexExecutor] No output detected:', warningMessage); + console.warn('[CodexExecutor] Stdout chunks:', stdoutChunks); + console.warn('[CodexExecutor] Stderr chunks:', stderrChunks); + + yield { + type: 'error', + error: warningMessage + }; + } else { + console.log('[CodexExecutor] Process completed successfully. Exit code:', exitCode, 'Lines processed:', lineCount); + } + } + + /** + * Build command arguments for Codex CLI + * Only includes supported arguments based on Codex CLI help: + * - --model: Model to use + * - --json: JSON output format + * - --full-auto: Non-interactive automatic execution + * + * Note: Codex CLI does NOT support: + * - --system-prompt (system prompt is prepended to main prompt) + * - --max-turns (not available in CLI) + * - --tools (not available in CLI) + * + * @param {Object} options Options + * @returns {string[]} Command arguments + */ + buildArgs(options) { + const { prompt, model } = options; + + console.log('[CodexExecutor] buildArgs called with model:', model, 'prompt length:', prompt?.length || 0); + + const args = ['exec']; + + // Add model (required for most use cases) + if (model) { + args.push('--model', model); + console.log('[CodexExecutor] Added model argument:', model); + } + + // Add JSON output flag for structured parsing + args.push('--json'); + console.log('[CodexExecutor] Added --json flag'); + + // Add full-auto mode (non-interactive) + // This enables automatic execution with workspace-write sandbox + args.push('--full-auto'); + console.log('[CodexExecutor] Added --full-auto flag'); + + // Add the prompt at the end + args.push(prompt); + console.log('[CodexExecutor] Added prompt (length:', prompt?.length || 0, ')'); + + console.log('[CodexExecutor] Final args count:', args.length); + return args; + } + + /** + * Map Claude tool names to Codex tool names + * @param {string[]} tools Array of tool names + * @returns {string[]} Mapped tool names + */ + mapToolsToCodex(tools) { + const toolMap = { + 'Read': 'read', + 'Write': 'write', + 'Edit': 'edit', + 'Bash': 'bash', + 'Glob': 'glob', + 'Grep': 'grep', + 'WebSearch': 'web-search', + 'WebFetch': 'web-fetch' + }; + + return tools + .map(tool => toolMap[tool] || tool.toLowerCase()) + .filter(tool => tool); // Remove undefined + } + + /** + * Convert Codex JSONL event to Claude SDK message format + * @param {Object} event Codex event object + * @returns {Object|null} Claude-format message or null + */ + convertToClaudeFormat(event) { + console.log('[CodexExecutor] Converting event:', JSON.stringify(event).substring(0, 200)); + const { type, data, item, thread_id } = event; + + switch (type) { + case CODEX_EVENT_TYPES.THREAD_STARTED: + case 'thread.started': + // Session initialization + return { + type: 'session_start', + sessionId: thread_id || data?.thread_id || event.thread_id + }; + + case CODEX_EVENT_TYPES.ITEM_COMPLETED: + case 'item.completed': + // Codex uses 'item' field, not 'data' + return this.convertItemCompleted(item || data); + + case CODEX_EVENT_TYPES.ITEM_STARTED: + case 'item.started': + // Convert item.started events - these indicate tool/command usage + const startedItem = item || data; + if (startedItem?.type === 'command_execution' && startedItem?.command) { + return { + type: 'assistant', + message: { + content: [{ + type: 'tool_use', + name: 'bash', + input: { command: startedItem.command } + }] + } + }; + } + // For other item.started types, return null (we'll show the completed version) + return null; + + case CODEX_EVENT_TYPES.THREAD_COMPLETED: + case 'thread.completed': + return { + type: 'complete', + sessionId: thread_id || data?.thread_id || event.thread_id + }; + + case CODEX_EVENT_TYPES.ERROR: + case 'error': + return { + type: 'error', + error: data?.message || item?.message || event.message || 'Unknown error from Codex CLI' + }; + + case 'turn.started': + // Turn started - just a marker, no need to convert + return null; + + default: + // Pass through other events + console.log('[CodexExecutor] Unhandled event type:', type); + return null; + } + } + + /** + * Convert item.completed event to Claude format + * @param {Object} item Event item data + * @returns {Object|null} Claude-format message + */ + convertItemCompleted(item) { + if (!item) { + console.log('[CodexExecutor] convertItemCompleted: item is null/undefined'); + return null; + } + + const itemType = item.type || item.item_type; + console.log('[CodexExecutor] convertItemCompleted: itemType =', itemType, 'item keys:', Object.keys(item)); + + switch (itemType) { + case 'reasoning': + // Thinking/reasoning output - Codex uses 'text' field + const reasoningText = item.text || item.content || ''; + console.log('[CodexExecutor] Converting reasoning, text length:', reasoningText.length); + return { + type: 'assistant', + message: { + content: [{ + type: 'thinking', + thinking: reasoningText + }] + } + }; + + case 'agent_message': + case 'message': + // Assistant text message + const messageText = item.content || item.text || ''; + console.log('[CodexExecutor] Converting message, text length:', messageText.length); + return { + type: 'assistant', + message: { + content: [{ + type: 'text', + text: messageText + }] + } + }; + + case 'command_execution': + // Command execution - show both the command and its output + const command = item.command || ''; + const output = item.aggregated_output || item.output || ''; + console.log('[CodexExecutor] Converting command_execution, command:', command.substring(0, 50), 'output length:', output.length); + + // Return as text message showing the command and output + return { + type: 'assistant', + message: { + content: [{ + type: 'text', + text: `\`\`\`bash\n${command}\n\`\`\`\n\n${output}` + }] + } + }; + + case 'tool_use': + // Tool use + return { + type: 'assistant', + message: { + content: [{ + type: 'tool_use', + name: item.tool || item.command || 'unknown', + input: item.input || item.args || {} + }] + } + }; + + case 'tool_result': + // Tool result + return { + type: 'tool_result', + tool_use_id: item.tool_use_id, + content: item.output || item.result + }; + + case 'todo_list': + // Todo list - convert to text format + const todos = item.items || []; + const todoText = todos.map((t, i) => `${i + 1}. ${t.text || t}`).join('\n'); + console.log('[CodexExecutor] Converting todo_list, items:', todos.length); + return { + type: 'assistant', + message: { + content: [{ + type: 'text', + text: `**Todo List:**\n${todoText}` + }] + } + }; + + default: + // Generic text output + const text = item.text || item.content || item.aggregated_output; + if (text) { + console.log('[CodexExecutor] Converting default item type, text length:', text.length); + return { + type: 'assistant', + message: { + content: [{ + type: 'text', + text: String(text) + }] + } + }; + } + console.log('[CodexExecutor] convertItemCompleted: No text content found, returning null'); + return null; + } + } + + /** + * Abort current execution + */ + abort() { + if (this.currentProcess) { + console.log('[CodexExecutor] Aborting current process'); + this.currentProcess.kill('SIGTERM'); + this.currentProcess = null; + } + } + + /** + * Check if execution is in progress + * @returns {boolean} Whether execution is in progress + */ + isRunning() { + return this.currentProcess !== null; + } +} + +// Singleton instance +const codexExecutor = new CodexExecutor(); + +module.exports = codexExecutor; diff --git a/app/electron/services/feature-executor.js b/app/electron/services/feature-executor.js index 9e73062a..8ab6e14e 100644 --- a/app/electron/services/feature-executor.js +++ b/app/electron/services/feature-executor.js @@ -3,11 +3,176 @@ const promptBuilder = require("./prompt-builder"); const contextManager = require("./context-manager"); const featureLoader = require("./feature-loader"); const mcpServerFactory = require("./mcp-server-factory"); +const { ModelRegistry } = require("./model-registry"); +const { ModelProviderFactory } = require("./model-provider"); + +// Model name mappings for Claude (legacy - kept for backwards compatibility) +const MODEL_MAP = { + haiku: "claude-haiku-4-5", + sonnet: "claude-sonnet-4-20250514", + opus: "claude-opus-4-5-20251101", +}; + +// Thinking level to budget_tokens mapping +// These values control how much "thinking time" the model gets for extended thinking +const THINKING_BUDGET_MAP = { + none: null, // No extended thinking + low: 4096, // Light thinking + medium: 16384, // Moderate thinking + high: 65536, // Deep thinking + ultrathink: 262144, // Ultra-deep thinking (maximum reasoning) +}; /** * Feature Executor - Handles feature implementation using Claude Agent SDK + * Now supports multiple model providers (Claude, Codex/OpenAI) */ class FeatureExecutor { + /** + * Get the model string based on feature's model setting + * Supports both Claude and Codex/OpenAI models + */ + getModelString(feature) { + const modelKey = feature.model || "opus"; // Default to opus + + // First check if this is a Codex model - they use the model key directly as the string + if (ModelRegistry.isCodexModel(modelKey)) { + const model = ModelRegistry.getModel(modelKey); + if (model && model.modelString) { + console.log( + `[FeatureExecutor] getModelString: modelKey=${modelKey}, modelString=${model.modelString} (Codex model)` + ); + return model.modelString; + } + // If model exists in registry but somehow no modelString, use the key itself + console.log( + `[FeatureExecutor] getModelString: modelKey=${modelKey}, modelString=${modelKey} (Codex fallback)` + ); + return modelKey; + } + + // For Claude models, use the registry lookup + let modelString = ModelRegistry.getModelString(modelKey); + + // Fallback to MODEL_MAP if registry doesn't have it (legacy support) + if (!modelString) { + modelString = MODEL_MAP[modelKey]; + } + + // Final fallback to opus for Claude models only + if (!modelString) { + modelString = MODEL_MAP.opus; + } + + // Validate model string format - ensure it's not incorrectly constructed + // Prevent incorrect formats like "claude-haiku-4-20250514" (mixing haiku with sonnet date) + if (modelString.includes("haiku") && modelString.includes("20250514")) { + console.error( + `[FeatureExecutor] Invalid model string detected: ${modelString}, using correct format` + ); + modelString = MODEL_MAP.haiku || "claude-haiku-4-5"; + } + + console.log( + `[FeatureExecutor] getModelString: modelKey=${modelKey}, modelString=${modelString}` + ); + return modelString; + } + + /** + * Determine if the feature uses a Codex/OpenAI model + */ + isCodexModel(feature) { + const modelKey = feature.model || "opus"; + return ModelRegistry.isCodexModel(modelKey); + } + + /** + * Get the appropriate provider for the feature's model + */ + getProvider(feature) { + const modelKey = feature.model || "opus"; + return ModelProviderFactory.getProviderForModel(modelKey); + } + + /** + * Get thinking configuration based on feature's thinkingLevel + */ + getThinkingConfig(feature) { + const modelId = feature.model || "opus"; + // Skip thinking config for models that don't support it (e.g., Codex CLI) + if (!ModelRegistry.modelSupportsThinking(modelId)) { + return null; + } + + const level = feature.thinkingLevel || "none"; + const budgetTokens = THINKING_BUDGET_MAP[level]; + + if (budgetTokens === null) { + return null; // No extended thinking + } + + return { + type: "enabled", + budget_tokens: budgetTokens, + }; + } + + /** + * Prepare for ultrathink execution - validate and warn + */ + prepareForUltrathink(feature, thinkingConfig) { + if (feature.thinkingLevel !== "ultrathink") { + return { ready: true }; + } + + const warnings = []; + const recommendations = []; + + // Check CLI installation + const claudeCliDetector = require("./claude-cli-detector"); + const cliInfo = claudeCliDetector.getInstallationInfo(); + + if (cliInfo.status === "not_installed") { + warnings.push( + "Claude Code CLI not detected - ultrathink may have timeout issues" + ); + recommendations.push( + "Install Claude Code CLI for optimal ultrathink performance" + ); + } + + // Validate budget tokens + if (thinkingConfig && thinkingConfig.budget_tokens > 32000) { + warnings.push( + `Ultrathink budget (${thinkingConfig.budget_tokens} tokens) exceeds recommended 32K - may cause long-running requests` + ); + recommendations.push( + "Consider using batch processing for budgets above 32K" + ); + } + + // Cost estimate (rough) + const estimatedCost = ((thinkingConfig?.budget_tokens || 0) / 1000) * 0.015; // Rough estimate + if (estimatedCost > 1.0) { + warnings.push( + `Estimated cost: ~$${estimatedCost.toFixed(2)} per execution` + ); + } + + // Time estimate + warnings.push("Ultrathink tasks typically take 45-180 seconds"); + + return { + ready: true, + warnings, + recommendations, + estimatedCost, + estimatedTime: "45-180 seconds", + cliInfo, + }; + } + /** * Sleep helper */ @@ -22,6 +187,11 @@ class FeatureExecutor { async implementFeature(feature, projectPath, sendToRenderer, execution) { console.log(`[FeatureExecutor] Implementing: ${feature.description}`); + // Declare variables outside try block so they're available in catch + let modelString; + let providerName; + let isCodex; + try { // ======================================== // PHASE 1: PLANNING @@ -52,13 +222,59 @@ class FeatureExecutor { projectPath ); - // Determine if we're in TDD mode (skipTests=false means TDD mode) - const isTDD = !feature.skipTests; + // Ensure feature has a model set (for backward compatibility with old features) + if (!feature.model) { + console.warn( + `[FeatureExecutor] Feature ${feature.id} missing model property, defaulting to 'opus'` + ); + feature.model = "opus"; + } + + // Get model and thinking configuration from feature settings + const modelString = this.getModelString(feature); + const thinkingConfig = this.getThinkingConfig(feature); + + // Prepare for ultrathink if needed + if (feature.thinkingLevel === "ultrathink") { + const preparation = this.prepareForUltrathink(feature, thinkingConfig); + + console.log(`[FeatureExecutor] Ultrathink preparation:`, preparation); + + // Log warnings + if (preparation.warnings && preparation.warnings.length > 0) { + preparation.warnings.forEach((warning) => { + console.warn(`[FeatureExecutor] ⚠️ ${warning}`); + }); + } + + // Send preparation info to renderer + sendToRenderer({ + type: "auto_mode_ultrathink_preparation", + featureId: feature.id, + warnings: preparation.warnings || [], + recommendations: preparation.recommendations || [], + estimatedCost: preparation.estimatedCost, + estimatedTime: preparation.estimatedTime, + }); + } + + providerName = this.isCodexModel(feature) ? "Codex/OpenAI" : "Claude"; + console.log( + `[FeatureExecutor] Using provider: ${providerName}, model: ${modelString}, thinking: ${ + feature.thinkingLevel || "none" + }` + ); + + // Note: Claude Agent SDK handles authentication automatically - it can use: + // 1. CLAUDE_CODE_OAUTH_TOKEN env var (for SDK mode) + // 2. Claude CLI's own authentication (if CLI is installed) + // 3. ANTHROPIC_API_KEY (fallback) + // We don't need to validate here - let the SDK/CLI handle auth errors // Configure options for the SDK query const options = { - model: "claude-opus-4-5-20251101", - systemPrompt: await promptBuilder.getCodingPrompt(projectPath, isTDD), + model: modelString, + systemPrompt: promptBuilder.getCodingPrompt(), maxTurns: 1000, cwd: projectPath, mcpServers: { @@ -83,6 +299,11 @@ class FeatureExecutor { abortController: abortController, }; + // Add thinking configuration if enabled + if (thinkingConfig) { + options.thinking = thinkingConfig; + } + // Build the prompt for this specific feature let prompt = await promptBuilder.buildFeaturePrompt(feature, projectPath); @@ -135,8 +356,18 @@ class FeatureExecutor { } } - // Use content blocks instead of plain text - prompt = contentBlocks; + // Wrap content blocks in async generator for SDK (required format for multimodal prompts) + prompt = (async function* () { + yield { + type: "user", + session_id: "", + message: { + role: "user", + content: contentBlocks, + }, + parent_tool_use_id: null, + }; + })(); } // Planning: Analyze the codebase and create implementation plan @@ -168,8 +399,85 @@ class FeatureExecutor { }); console.log(`[FeatureExecutor] Phase: ACTION for ${feature.description}`); - // Send query - const currentQuery = query({ prompt, options }); + // Send query - use appropriate provider based on model + let currentQuery; + isCodex = this.isCodexModel(feature); + + // Ensure provider auth is available (especially for Claude SDK) + const provider = this.getProvider(feature); + if (provider?.ensureAuthEnv && !provider.ensureAuthEnv()) { + // Check if CLI is installed to provide better error message + let authMsg = + "Missing Anthropic auth. Set ANTHROPIC_API_KEY or CLAUDE_CODE_OAUTH_TOKEN environment variable."; + try { + const claudeCliDetector = require("./claude-cli-detector"); + const detection = claudeCliDetector.detectClaudeInstallation(); + if (detection.installed && detection.method === "cli") { + authMsg = + "Claude CLI is installed but not authenticated. Run `claude login` to authenticate, or set ANTHROPIC_API_KEY or CLAUDE_CODE_OAUTH_TOKEN environment variable."; + } else { + authMsg = + "Missing Anthropic auth. Set ANTHROPIC_API_KEY or CLAUDE_CODE_OAUTH_TOKEN, or install Claude CLI and run `claude login`."; + } + } catch (err) { + // Fallback to default message + } + console.error(`[FeatureExecutor] ${authMsg}`); + throw new Error(authMsg); + } + + // Validate that model string matches the provider + if (isCodex) { + // Ensure model string is actually a Codex model, not a Claude model + if (modelString.startsWith("claude-")) { + console.error( + `[FeatureExecutor] ERROR: Codex provider selected but Claude model string detected: ${modelString}` + ); + console.error( + `[FeatureExecutor] Feature model: ${ + feature.model || "not set" + }, modelString: ${modelString}` + ); + throw new Error( + `Invalid model configuration: Codex provider cannot use Claude model '${modelString}'. Please check feature model setting.` + ); + } + + // Use Codex provider for OpenAI models + console.log( + `[FeatureExecutor] Using Codex provider for model: ${modelString}` + ); + // Pass MCP server config to Codex provider so it can configure Codex CLI TOML + currentQuery = provider.executeQuery({ + prompt, + model: modelString, + cwd: projectPath, + systemPrompt: promptBuilder.getCodingPrompt(), + maxTurns: 20, // Codex CLI typically uses fewer turns + allowedTools: options.allowedTools, + mcpServers: { + "automaker-tools": featureToolsServer, + }, + abortController: abortController, + env: { + OPENAI_API_KEY: process.env.OPENAI_API_KEY, + }, + }); + } else { + // Ensure model string is actually a Claude model, not a Codex model + if ( + !modelString.startsWith("claude-") && + !modelString.match(/^(gpt-|o\d)/) + ) { + console.warn( + `[FeatureExecutor] WARNING: Claude provider selected but unexpected model string: ${modelString}` + ); + } + + // Use Claude SDK (original implementation) + currentQuery = query({ prompt, options }); + } + execution.query = currentQuery; // Stream responses @@ -179,6 +487,22 @@ class FeatureExecutor { // Check if this specific feature was aborted if (!execution.isActive()) break; + // Handle error messages + if (msg.type === "error") { + const errorMsg = `\n❌ Error: ${msg.error}\n`; + await contextManager.writeToContextFile( + projectPath, + feature.id, + errorMsg + ); + sendToRenderer({ + type: "auto_mode_error", + featureId: feature.id, + error: msg.error, + }); + throw new Error(msg.error); + } + if (msg.type === "assistant" && msg.message?.content) { for (const block of msg.message.content) { if (block.type === "text") { @@ -197,6 +521,22 @@ class FeatureExecutor { featureId: feature.id, content: block.text, }); + } else if (block.type === "thinking") { + // Handle thinking output from Codex O-series models + const thinkingMsg = `\n💭 Thinking: ${block.thinking?.substring( + 0, + 200 + )}...\n`; + await contextManager.writeToContextFile( + projectPath, + feature.id, + thinkingMsg + ); + sendToRenderer({ + type: "auto_mode_progress", + featureId: feature.id, + content: thinkingMsg, + }); } else if (block.type === "tool_use") { // First tool use indicates we're actively implementing if (!hasStartedToolUse) { @@ -314,6 +654,54 @@ class FeatureExecutor { console.error("[FeatureExecutor] Error implementing feature:", error); + // Safely get model info for error logging (may not be set if error occurred early) + const modelInfo = modelString + ? { + message: error.message, + stack: error.stack, + name: error.name, + code: error.code, + model: modelString, + provider: providerName || "unknown", + isCodex: isCodex !== undefined ? isCodex : "unknown", + } + : { + message: error.message, + stack: error.stack, + name: error.name, + code: error.code, + model: "not initialized", + provider: "unknown", + isCodex: "unknown", + }; + + console.error("[FeatureExecutor] Error details:", modelInfo); + + // Check if this is a Claude CLI process error + if (error.message && error.message.includes("process exited with code")) { + const modelDisplay = modelString + ? `Model: ${modelString}` + : "Model: not initialized"; + const errorMsg = + `Claude Code CLI failed with exit code 1. This might be due to:\n` + + `- Invalid or unsupported model (${modelDisplay})\n` + + `- Missing or invalid CLAUDE_CODE_OAUTH_TOKEN\n` + + `- Claude CLI configuration issue\n` + + `- Model not available in your Claude account\n\n` + + `Original error: ${error.message}`; + + await contextManager.writeToContextFile( + projectPath, + feature.id, + `\n❌ ${errorMsg}\n` + ); + sendToRenderer({ + type: "auto_mode_error", + featureId: feature.id, + error: errorMsg, + }); + } + // Clean up if (execution) { execution.abortController = null; @@ -365,9 +753,53 @@ class FeatureExecutor { projectPath ); + // Ensure feature has a model set (for backward compatibility with old features) + if (!feature.model) { + console.warn( + `[FeatureExecutor] Feature ${feature.id} missing model property, defaulting to 'opus'` + ); + feature.model = "opus"; + } + + // Get model and thinking configuration from feature settings + const modelString = this.getModelString(feature); + const thinkingConfig = this.getThinkingConfig(feature); + + // Prepare for ultrathink if needed + if (feature.thinkingLevel === "ultrathink") { + const preparation = this.prepareForUltrathink(feature, thinkingConfig); + + console.log(`[FeatureExecutor] Ultrathink preparation:`, preparation); + + // Log warnings + if (preparation.warnings && preparation.warnings.length > 0) { + preparation.warnings.forEach((warning) => { + console.warn(`[FeatureExecutor] ⚠️ ${warning}`); + }); + } + + // Send preparation info to renderer + sendToRenderer({ + type: "auto_mode_ultrathink_preparation", + featureId: feature.id, + warnings: preparation.warnings || [], + recommendations: preparation.recommendations || [], + estimatedCost: preparation.estimatedCost, + estimatedTime: preparation.estimatedTime, + }); + } + + const isCodex = this.isCodexModel(feature); + const providerName = isCodex ? "Codex/OpenAI" : "Claude"; + console.log( + `[FeatureExecutor] Resuming with provider: ${providerName}, model: ${modelString}, thinking: ${ + feature.thinkingLevel || "none" + }` + ); + const options = { - model: "claude-opus-4-5-20251101", - systemPrompt: await promptBuilder.getVerificationPrompt(projectPath, isTDD), + model: modelString, + systemPrompt: promptBuilder.getVerificationPrompt(), maxTurns: 1000, cwd: projectPath, mcpServers: { @@ -392,6 +824,11 @@ class FeatureExecutor { abortController: abortController, }; + // Add thinking configuration if enabled + if (thinkingConfig) { + options.thinking = thinkingConfig; + } + // Build prompt with previous context let prompt = await promptBuilder.buildResumePrompt( feature, @@ -459,11 +896,53 @@ class FeatureExecutor { } } - // Use content blocks instead of plain text - prompt = contentBlocks; + // Wrap content blocks in async generator for SDK (required format for multimodal prompts) + prompt = (async function* () { + yield { + type: "user", + session_id: "", + message: { + role: "user", + content: contentBlocks, + }, + parent_tool_use_id: null, + }; + })(); } - const currentQuery = query({ prompt, options }); + // Use appropriate provider based on model type + let currentQuery; + if (isCodex) { + // Validate that model string is actually a Codex model + if (modelString.startsWith("claude-")) { + console.error( + `[FeatureExecutor] ERROR: Codex provider selected but Claude model string detected: ${modelString}` + ); + throw new Error( + `Invalid model configuration: Codex provider cannot use Claude model '${modelString}'. Please check feature model setting.` + ); + } + + console.log( + `[FeatureExecutor] Using Codex provider for resume with model: ${modelString}` + ); + const provider = this.getProvider(feature); + currentQuery = provider.executeQuery({ + prompt, + model: modelString, + cwd: projectPath, + systemPrompt: promptBuilder.getVerificationPrompt(), + maxTurns: 20, + allowedTools: options.allowedTools, + abortController: abortController, + env: { + OPENAI_API_KEY: process.env.OPENAI_API_KEY, + }, + }); + } else { + // Use Claude SDK + currentQuery = query({ prompt, options }); + } execution.query = currentQuery; let responseText = ""; diff --git a/app/electron/services/feature-loader.js b/app/electron/services/feature-loader.js index d9ae2b44..c5239fe3 100644 --- a/app/electron/services/feature-loader.js +++ b/app/electron/services/feature-loader.js @@ -132,9 +132,22 @@ class FeatureLoader { if (f.summary !== undefined) { featureData.summary = f.summary; } + if (f.model !== undefined) { + featureData.model = f.model; + } + if (f.thinkingLevel !== undefined) { + featureData.thinkingLevel = f.thinkingLevel; + } if (f.error !== undefined) { featureData.error = f.error; } + // Preserve worktree info + if (f.worktreePath !== undefined) { + featureData.worktreePath = f.worktreePath; + } + if (f.branchName !== undefined) { + featureData.branchName = f.branchName; + } return featureData; }); @@ -157,6 +170,69 @@ class FeatureLoader { // Skip verified and waiting_approval (which needs user input) return features.find((f) => f.status !== "verified" && f.status !== "waiting_approval"); } + + /** + * Update worktree info for a feature + * @param {string} featureId - The ID of the feature to update + * @param {string} projectPath - Path to the project + * @param {string|null} worktreePath - Path to the worktree (null to clear) + * @param {string|null} branchName - Name of the feature branch (null to clear) + */ + async updateFeatureWorktree(featureId, projectPath, worktreePath, branchName) { + const featuresPath = path.join( + projectPath, + ".automaker", + "feature_list.json" + ); + + const features = await this.loadFeatures(projectPath); + + if (!Array.isArray(features) || features.length === 0) { + console.error("[FeatureLoader] Cannot update worktree: feature list is empty"); + return; + } + + const feature = features.find((f) => f.id === featureId); + + if (!feature) { + console.error(`[FeatureLoader] Feature ${featureId} not found`); + return; + } + + // Update or clear worktree info + if (worktreePath) { + feature.worktreePath = worktreePath; + feature.branchName = branchName; + } else { + delete feature.worktreePath; + delete feature.branchName; + } + + // Save back to file (reuse the same mapping logic) + const toSave = features.map((f) => { + const featureData = { + id: f.id, + category: f.category, + description: f.description, + steps: f.steps, + status: f.status, + }; + if (f.skipTests !== undefined) featureData.skipTests = f.skipTests; + if (f.images !== undefined) featureData.images = f.images; + if (f.imagePaths !== undefined) featureData.imagePaths = f.imagePaths; + if (f.startedAt !== undefined) featureData.startedAt = f.startedAt; + if (f.summary !== undefined) featureData.summary = f.summary; + if (f.model !== undefined) featureData.model = f.model; + if (f.thinkingLevel !== undefined) featureData.thinkingLevel = f.thinkingLevel; + if (f.error !== undefined) featureData.error = f.error; + if (f.worktreePath !== undefined) featureData.worktreePath = f.worktreePath; + if (f.branchName !== undefined) featureData.branchName = f.branchName; + return featureData; + }); + + await fs.writeFile(featuresPath, JSON.stringify(toSave, null, 2), "utf-8"); + console.log(`[FeatureLoader] Updated feature ${featureId}: worktreePath=${worktreePath}, branchName=${branchName}`); + } } module.exports = new FeatureLoader(); diff --git a/app/electron/services/mcp-server-stdio.js b/app/electron/services/mcp-server-stdio.js new file mode 100644 index 00000000..b7f5c1db --- /dev/null +++ b/app/electron/services/mcp-server-stdio.js @@ -0,0 +1,347 @@ +#!/usr/bin/env node +/** + * Standalone STDIO MCP Server for Automaker Tools + * + * This script runs as a standalone process and communicates via JSON-RPC 2.0 + * over stdin/stdout. It implements the MCP protocol to expose the UpdateFeatureStatus + * tool to Codex CLI. + * + * Environment variables: + * - AUTOMAKER_PROJECT_PATH: Path to the project directory + * - AUTOMAKER_IPC_CHANNEL: IPC channel name for callback communication (optional, uses default) + */ + +const readline = require('readline'); +const path = require('path'); + +// Redirect all console.log output to stderr to avoid polluting MCP stdout +const originalConsoleLog = console.log; +console.log = (...args) => { + console.error(...args); +}; + +// Set up readline interface for line-by-line JSON-RPC input +// IMPORTANT: Use a separate output stream for readline to avoid interfering with JSON-RPC stdout +// We'll write JSON-RPC responses directly to stdout, not through readline +const rl = readline.createInterface({ + input: process.stdin, + output: null, // Don't use stdout for readline output + terminal: false +}); + +let initialized = false; +let projectPath = null; +let ipcChannel = null; + +// Get configuration from environment +projectPath = process.env.AUTOMAKER_PROJECT_PATH || process.cwd(); +ipcChannel = process.env.AUTOMAKER_IPC_CHANNEL || 'mcp:update-feature-status'; + +// Load dependencies (these will be available in the Electron app context) +let featureLoader; +let electron; + +// Try to load Electron IPC if available (when running from Electron app) +try { + // In Electron, we can use IPC directly + if (typeof require !== 'undefined') { + // Check if we're in Electron context + const electronModule = require('electron'); + if (electronModule && electronModule.ipcMain) { + electron = electronModule; + } + } +} catch (e) { + // Not in Electron context, will use alternative method +} + +// Load feature loader +// Try multiple paths since this script might be run from different contexts +try { + // First try relative path (when run from electron/services/) + featureLoader = require('./feature-loader'); +} catch (e) { + try { + // Try absolute path resolution + const featureLoaderPath = path.resolve(__dirname, 'feature-loader.js'); + delete require.cache[require.resolve(featureLoaderPath)]; + featureLoader = require(featureLoaderPath); + } catch (e2) { + // If still fails, try from parent directory + try { + featureLoader = require(path.join(__dirname, '..', 'services', 'feature-loader')); + } catch (e3) { + console.error('[McpServerStdio] Error loading feature-loader:', e3.message); + console.error('[McpServerStdio] Tried paths:', [ + './feature-loader', + path.resolve(__dirname, 'feature-loader.js'), + path.join(__dirname, '..', 'services', 'feature-loader') + ]); + process.exit(1); + } + } +} + +/** + * Send JSON-RPC response + * CRITICAL: Must write directly to stdout, not via console.log + * MCP protocol requires ONLY JSON-RPC messages on stdout + */ +function sendResponse(id, result, error = null) { + const response = { + jsonrpc: '2.0', + id + }; + + if (error) { + response.error = error; + } else { + response.result = result; + } + + // Write directly to stdout with newline (MCP uses line-delimited JSON) + process.stdout.write(JSON.stringify(response) + '\n'); +} + +/** + * Send JSON-RPC notification + * CRITICAL: Must write directly to stdout, not via console.log + */ +function sendNotification(method, params) { + const notification = { + jsonrpc: '2.0', + method, + params + }; + + // Write directly to stdout with newline (MCP uses line-delimited JSON) + process.stdout.write(JSON.stringify(notification) + '\n'); +} + +/** + * Handle MCP initialize request + */ +async function handleInitialize(params, id) { + initialized = true; + + sendResponse(id, { + protocolVersion: '2024-11-05', + capabilities: { + tools: {} + }, + serverInfo: { + name: 'automaker-tools', + version: '1.0.0' + } + }); +} + +/** + * Handle tools/list request + */ +async function handleToolsList(params, id) { + sendResponse(id, { + tools: [ + { + name: 'UpdateFeatureStatus', + description: 'Update the status of a feature in the feature list. Use this tool instead of directly modifying feature_list.json to safely update feature status. IMPORTANT: If the feature has skipTests=true, you should NOT mark it as verified - instead it will automatically go to waiting_approval status for manual review. Always include a summary of what was done.', + inputSchema: { + type: 'object', + properties: { + featureId: { + type: 'string', + description: 'The ID of the feature to update' + }, + status: { + type: 'string', + enum: ['backlog', 'in_progress', 'verified'], + description: 'The new status for the feature. Note: If skipTests=true, verified will be converted to waiting_approval automatically.' + }, + summary: { + type: 'string', + description: 'A brief summary of what was implemented/changed. This will be displayed on the Kanban card. Example: "Added dark mode toggle. Modified: settings.tsx, theme-provider.tsx"' + } + }, + required: ['featureId', 'status'] + } + } + ] + }); +} + +/** + * Handle tools/call request + */ +async function handleToolsCall(params, id) { + const { name, arguments: args } = params; + + if (name !== 'UpdateFeatureStatus') { + sendResponse(id, null, { + code: -32601, + message: `Unknown tool: ${name}` + }); + return; + } + + try { + const { featureId, status, summary } = args; + + if (!featureId || !status) { + sendResponse(id, null, { + code: -32602, + message: 'Missing required parameters: featureId and status are required' + }); + return; + } + + // Load the feature to check skipTests flag + const features = await featureLoader.loadFeatures(projectPath); + const feature = features.find((f) => f.id === featureId); + + if (!feature) { + sendResponse(id, null, { + code: -32602, + message: `Feature ${featureId} not found` + }); + return; + } + + // If agent tries to mark as verified but feature has skipTests=true, convert to waiting_approval + let finalStatus = status; + if (status === 'verified' && feature.skipTests === true) { + finalStatus = 'waiting_approval'; + } + + // Call the update callback via IPC or direct call + // Since we're in a separate process, we need to use IPC to communicate back + // For now, we'll call the feature loader directly since it has the update method + await featureLoader.updateFeatureStatus(featureId, finalStatus, projectPath, summary); + + const statusMessage = finalStatus !== status + ? `Successfully updated feature ${featureId} to status "${finalStatus}" (converted from "${status}" because skipTests=true)${summary ? ` with summary: "${summary}"` : ''}` + : `Successfully updated feature ${featureId} to status "${finalStatus}"${summary ? ` with summary: "${summary}"` : ''}`; + + sendResponse(id, { + content: [ + { + type: 'text', + text: statusMessage + } + ] + }); + } catch (error) { + console.error('[McpServerStdio] UpdateFeatureStatus error:', error); + sendResponse(id, null, { + code: -32603, + message: `Failed to update feature status: ${error.message}` + }); + } +} + +/** + * Handle JSON-RPC request + */ +async function handleRequest(line) { + let request; + + try { + request = JSON.parse(line); + } catch (e) { + sendResponse(null, null, { + code: -32700, + message: 'Parse error' + }); + return; + } + + // Validate JSON-RPC 2.0 structure + if (request.jsonrpc !== '2.0') { + sendResponse(request.id || null, null, { + code: -32600, + message: 'Invalid Request' + }); + return; + } + + const { method, params, id } = request; + + // Handle notifications (no id) + if (id === undefined) { + // Handle notifications if needed + return; + } + + // Handle requests + try { + switch (method) { + case 'initialize': + await handleInitialize(params, id); + break; + + case 'tools/list': + if (!initialized) { + sendResponse(id, null, { + code: -32002, + message: 'Server not initialized' + }); + return; + } + await handleToolsList(params, id); + break; + + case 'tools/call': + if (!initialized) { + sendResponse(id, null, { + code: -32002, + message: 'Server not initialized' + }); + return; + } + await handleToolsCall(params, id); + break; + + default: + sendResponse(id, null, { + code: -32601, + message: `Method not found: ${method}` + }); + } + } catch (error) { + console.error('[McpServerStdio] Error handling request:', error); + sendResponse(id, null, { + code: -32603, + message: `Internal error: ${error.message}` + }); + } +} + +// Process stdin line by line +rl.on('line', async (line) => { + if (!line.trim()) { + return; + } + + await handleRequest(line); +}); + +// Handle errors +rl.on('error', (error) => { + console.error('[McpServerStdio] Readline error:', error); + process.exit(1); +}); + +// Handle process termination +process.on('SIGTERM', () => { + rl.close(); + process.exit(0); +}); + +process.on('SIGINT', () => { + rl.close(); + process.exit(0); +}); + +// Log startup +console.error('[McpServerStdio] Starting MCP server for automaker-tools'); +console.error(`[McpServerStdio] Project path: ${projectPath}`); +console.error(`[McpServerStdio] IPC channel: ${ipcChannel}`); diff --git a/app/electron/services/model-provider.js b/app/electron/services/model-provider.js new file mode 100644 index 00000000..d5a31850 --- /dev/null +++ b/app/electron/services/model-provider.js @@ -0,0 +1,477 @@ +/** + * Model Provider Abstraction Layer + * + * This module provides an abstract interface for model providers (Claude, Codex, etc.) + * allowing the application to use different AI models through a unified API. + */ + +/** + * Base class for model providers + * Concrete implementations should extend this class + */ +class ModelProvider { + constructor(config = {}) { + this.config = config; + this.name = 'base'; + } + + /** + * Get provider name + * @returns {string} Provider name + */ + getName() { + return this.name; + } + + /** + * Execute a query with the model provider + * @param {Object} options Query options + * @param {string} options.prompt The prompt to send + * @param {string} options.model The model to use + * @param {string} options.systemPrompt System prompt + * @param {string} options.cwd Working directory + * @param {number} options.maxTurns Maximum turns + * @param {string[]} options.allowedTools Allowed tools + * @param {Object} options.mcpServers MCP servers configuration + * @param {AbortController} options.abortController Abort controller + * @param {Object} options.thinking Thinking configuration + * @returns {AsyncGenerator} Async generator yielding messages + */ + async *executeQuery(options) { + throw new Error('executeQuery must be implemented by subclass'); + } + + /** + * Detect if this provider's CLI/SDK is installed + * @returns {Promise} Installation status + */ + async detectInstallation() { + throw new Error('detectInstallation must be implemented by subclass'); + } + + /** + * Get list of available models for this provider + * @returns {Array} Array of model definitions + */ + getAvailableModels() { + throw new Error('getAvailableModels must be implemented by subclass'); + } + + /** + * Validate provider configuration + * @returns {Object} Validation result { valid: boolean, errors: string[] } + */ + validateConfig() { + throw new Error('validateConfig must be implemented by subclass'); + } + + /** + * Get the full model string for a model key + * @param {string} modelKey Short model key (e.g., 'opus', 'gpt-5.1-codex') + * @returns {string} Full model string + */ + getModelString(modelKey) { + throw new Error('getModelString must be implemented by subclass'); + } + + /** + * Check if provider supports a specific feature + * @param {string} feature Feature name (e.g., 'thinking', 'tools', 'streaming') + * @returns {boolean} Whether the feature is supported + */ + supportsFeature(feature) { + return false; + } +} + +/** + * Claude Provider - Uses Anthropic Claude Agent SDK + */ +class ClaudeProvider extends ModelProvider { + constructor(config = {}) { + super(config); + this.name = 'claude'; + this.sdk = null; + } + + /** + * Try to load a Claude OAuth token from the local CLI config (~/.claude/config.json). + * Returns the token string or null if not found. + */ + loadTokenFromCliConfig() { + try { + const fs = require('fs'); + const path = require('path'); + const configPath = path.join(require('os').homedir(), '.claude', 'config.json'); + if (!fs.existsSync(configPath)) { + return null; + } + const raw = fs.readFileSync(configPath, 'utf-8'); + const parsed = JSON.parse(raw); + // CLI config stores token as oauth_token (newer) or token (older) + return parsed.oauth_token || parsed.token || null; + } catch (err) { + console.warn('[ClaudeProvider] Failed to read CLI config token:', err?.message); + return null; + } + } + + ensureAuthEnv() { + // If API key or token already present, keep as-is. + if (process.env.ANTHROPIC_API_KEY || process.env.CLAUDE_CODE_OAUTH_TOKEN) { + console.log('[ClaudeProvider] Auth already present in environment'); + return true; + } + // Try to hydrate from CLI login config + const token = this.loadTokenFromCliConfig(); + if (token) { + process.env.CLAUDE_CODE_OAUTH_TOKEN = token; + console.log('[ClaudeProvider] Loaded CLAUDE_CODE_OAUTH_TOKEN from ~/.claude/config.json'); + return true; + } + + // Check if CLI is installed but not logged in + try { + const claudeCliDetector = require('./claude-cli-detector'); + const detection = claudeCliDetector.detectClaudeInstallation(); + if (detection.installed && detection.method === 'cli') { + console.error('[ClaudeProvider] Claude CLI is installed but not logged in. Run `claude login` to authenticate.'); + } else { + console.error('[ClaudeProvider] No Anthropic auth found (env empty, ~/.claude/config.json missing token)'); + } + } catch (err) { + console.error('[ClaudeProvider] No Anthropic auth found (env empty, ~/.claude/config.json missing token)'); + } + return false; + } + + /** + * Lazily load the Claude SDK + */ + loadSdk() { + if (!this.sdk) { + this.sdk = require('@anthropic-ai/claude-agent-sdk'); + } + return this.sdk; + } + + async *executeQuery(options) { + // Ensure we have auth; fall back to CLI login token if available. + if (!this.ensureAuthEnv()) { + // Check if CLI is installed to provide better error message + let msg = 'Missing Anthropic auth. Set ANTHROPIC_API_KEY or CLAUDE_CODE_OAUTH_TOKEN environment variable.'; + try { + const claudeCliDetector = require('./claude-cli-detector'); + const detection = claudeCliDetector.detectClaudeInstallation(); + if (detection.installed && detection.method === 'cli') { + msg = 'Claude CLI is installed but not authenticated. Run `claude login` to authenticate, or set ANTHROPIC_API_KEY or CLAUDE_CODE_OAUTH_TOKEN environment variable.'; + } else { + msg = 'Missing Anthropic auth. Set ANTHROPIC_API_KEY or CLAUDE_CODE_OAUTH_TOKEN, or install Claude CLI and run `claude login`.'; + } + } catch (err) { + // Fallback to default message + } + console.error(`[ClaudeProvider] ${msg}`); + yield { type: 'error', error: msg }; + return; + } + + const { query } = this.loadSdk(); + + const sdkOptions = { + model: options.model, + systemPrompt: options.systemPrompt, + maxTurns: options.maxTurns || 1000, + cwd: options.cwd, + mcpServers: options.mcpServers, + allowedTools: options.allowedTools, + permissionMode: options.permissionMode || 'acceptEdits', + sandbox: options.sandbox, + abortController: options.abortController, + }; + + // Add thinking configuration if enabled + if (options.thinking) { + sdkOptions.thinking = options.thinking; + } + + const currentQuery = query({ prompt: options.prompt, options: sdkOptions }); + + for await (const msg of currentQuery) { + yield msg; + } + } + + async detectInstallation() { + const claudeCliDetector = require('./claude-cli-detector'); + return claudeCliDetector.getInstallationInfo(); + } + + getAvailableModels() { + return [ + { + id: 'haiku', + name: 'Claude Haiku', + modelString: 'claude-haiku-4-5', + provider: 'claude', + description: 'Fast and efficient for simple tasks', + tier: 'basic' + }, + { + id: 'sonnet', + name: 'Claude Sonnet', + modelString: 'claude-sonnet-4-20250514', + provider: 'claude', + description: 'Balanced performance and capabilities', + tier: 'standard' + }, + { + id: 'opus', + name: 'Claude Opus 4.5', + modelString: 'claude-opus-4-5-20251101', + provider: 'claude', + description: 'Most capable model for complex tasks', + tier: 'premium' + } + ]; + } + + validateConfig() { + const errors = []; + + // Ensure auth is available (try to auto-load from CLI config) + this.ensureAuthEnv(); + + if (!process.env.CLAUDE_CODE_OAUTH_TOKEN && !process.env.ANTHROPIC_API_KEY) { + errors.push('No Claude authentication found. Set CLAUDE_CODE_OAUTH_TOKEN or ANTHROPIC_API_KEY, or run `claude login` to populate ~/.claude/config.json.'); + } + + return { + valid: errors.length === 0, + errors + }; + } + + getModelString(modelKey) { + const modelMap = { + haiku: 'claude-haiku-4-5', + sonnet: 'claude-sonnet-4-20250514', + opus: 'claude-opus-4-5-20251101' + }; + return modelMap[modelKey] || modelMap.opus; + } + + supportsFeature(feature) { + const supportedFeatures = ['thinking', 'tools', 'streaming', 'mcp']; + return supportedFeatures.includes(feature); + } +} + +/** + * Codex Provider - Uses OpenAI Codex CLI + */ +class CodexProvider extends ModelProvider { + constructor(config = {}) { + super(config); + this.name = 'codex'; + } + + async *executeQuery(options) { + const codexExecutor = require('./codex-executor'); + + // Validate that we're not receiving a Claude model string + if (options.model && options.model.startsWith('claude-')) { + const errorMsg = `Codex provider cannot use Claude model '${options.model}'. Codex only supports OpenAI models (gpt-5.1-codex-max, gpt-5.1-codex, gpt-5.1-codex-mini, gpt-5.1).`; + console.error(`[CodexProvider] ${errorMsg}`); + yield { + type: 'error', + error: errorMsg + }; + return; + } + + const executeOptions = { + prompt: options.prompt, + model: options.model, + cwd: options.cwd, + systemPrompt: options.systemPrompt, + maxTurns: options.maxTurns || 20, + allowedTools: options.allowedTools, + mcpServers: options.mcpServers, // Pass MCP servers config to executor + env: { + ...process.env, + OPENAI_API_KEY: process.env.OPENAI_API_KEY + } + }; + + // Execute and yield results + const generator = codexExecutor.execute(executeOptions); + for await (const msg of generator) { + yield msg; + } + } + + async detectInstallation() { + const codexCliDetector = require('./codex-cli-detector'); + return codexCliDetector.getInstallationInfo(); + } + + getAvailableModels() { + return [ + { + id: 'gpt-5.1-codex-max', + name: 'GPT-5.1 Codex Max', + modelString: 'gpt-5.1-codex-max', + provider: 'codex', + description: 'Latest flagship - deep and fast reasoning for coding', + tier: 'premium', + default: true + }, + { + id: 'gpt-5.1-codex', + name: 'GPT-5.1 Codex', + modelString: 'gpt-5.1-codex', + provider: 'codex', + description: 'Optimized for code generation', + tier: 'standard' + }, + { + id: 'gpt-5.1-codex-mini', + name: 'GPT-5.1 Codex Mini', + modelString: 'gpt-5.1-codex-mini', + provider: 'codex', + description: 'Faster and cheaper option', + tier: 'basic' + }, + { + id: 'gpt-5.1', + name: 'GPT-5.1', + modelString: 'gpt-5.1', + provider: 'codex', + description: 'Broad world knowledge with strong reasoning', + tier: 'standard' + } + ]; + } + + validateConfig() { + const errors = []; + const codexCliDetector = require('./codex-cli-detector'); + const installation = codexCliDetector.detectCodexInstallation(); + + if (!installation.installed && !process.env.OPENAI_API_KEY) { + errors.push('Codex CLI not installed and no OPENAI_API_KEY found.'); + } + + return { + valid: errors.length === 0, + errors + }; + } + + getModelString(modelKey) { + // Codex models use the key directly as the model string + const modelMap = { + 'gpt-5.1-codex-max': 'gpt-5.1-codex-max', + 'gpt-5.1-codex': 'gpt-5.1-codex', + 'gpt-5.1-codex-mini': 'gpt-5.1-codex-mini', + 'gpt-5.1': 'gpt-5.1' + }; + return modelMap[modelKey] || 'gpt-5.1-codex-max'; + } + + supportsFeature(feature) { + const supportedFeatures = ['tools', 'streaming']; + return supportedFeatures.includes(feature); + } +} + +/** + * Model Provider Factory + * Creates the appropriate provider based on model or provider name + */ +class ModelProviderFactory { + static providers = { + claude: ClaudeProvider, + codex: CodexProvider + }; + + /** + * Get provider for a specific model + * @param {string} modelId Model ID (e.g., 'opus', 'gpt-5.1-codex') + * @returns {ModelProvider} Provider instance + */ + static getProviderForModel(modelId) { + // Check if it's a Claude model + const claudeModels = ['haiku', 'sonnet', 'opus']; + if (claudeModels.includes(modelId)) { + return new ClaudeProvider(); + } + + // Check if it's a Codex/OpenAI model + const codexModels = [ + 'gpt-5.1-codex-max', 'gpt-5.1-codex', 'gpt-5.1-codex-mini', 'gpt-5.1' + ]; + if (codexModels.includes(modelId)) { + return new CodexProvider(); + } + + // Default to Claude + return new ClaudeProvider(); + } + + /** + * Get provider by name + * @param {string} providerName Provider name ('claude' or 'codex') + * @returns {ModelProvider} Provider instance + */ + static getProvider(providerName) { + const ProviderClass = this.providers[providerName]; + if (!ProviderClass) { + throw new Error(`Unknown provider: ${providerName}`); + } + return new ProviderClass(); + } + + /** + * Get all available providers + * @returns {string[]} List of provider names + */ + static getAvailableProviders() { + return Object.keys(this.providers); + } + + /** + * Get all available models across all providers + * @returns {Array} All available models + */ + static getAllModels() { + const allModels = []; + for (const providerName of this.getAvailableProviders()) { + const provider = this.getProvider(providerName); + const models = provider.getAvailableModels(); + allModels.push(...models); + } + return allModels; + } + + /** + * Check installation status for all providers + * @returns {Promise} Installation status for each provider + */ + static async checkAllProviders() { + const status = {}; + for (const providerName of this.getAvailableProviders()) { + const provider = this.getProvider(providerName); + status[providerName] = await provider.detectInstallation(); + } + return status; + } +} + +module.exports = { + ModelProvider, + ClaudeProvider, + CodexProvider, + ModelProviderFactory +}; diff --git a/app/electron/services/model-registry.js b/app/electron/services/model-registry.js new file mode 100644 index 00000000..41d1118c --- /dev/null +++ b/app/electron/services/model-registry.js @@ -0,0 +1,320 @@ +/** + * Model Registry - Centralized model definitions and metadata + * + * This module provides a central registry of all available models + * across different providers (Claude, Codex/OpenAI). + */ + +/** + * Model Categories + */ +const MODEL_CATEGORIES = { + CLAUDE: 'claude', + OPENAI: 'openai', + CODEX: 'codex' +}; + +/** + * Model Tiers (capability levels) + */ +const MODEL_TIERS = { + BASIC: 'basic', // Fast, cheap, simple tasks + STANDARD: 'standard', // Balanced performance + PREMIUM: 'premium' // Most capable, complex tasks +}; + +const CODEX_MODEL_IDS = [ + 'gpt-5.1-codex-max', + 'gpt-5.1-codex', + 'gpt-5.1-codex-mini', + 'gpt-5.1' +]; + +/** + * All available models with full metadata + */ +const MODELS = { + // Claude Models + haiku: { + id: 'haiku', + name: 'Claude Haiku', + modelString: 'claude-haiku-4-5', + provider: 'claude', + category: MODEL_CATEGORIES.CLAUDE, + tier: MODEL_TIERS.BASIC, + description: 'Fast and efficient for simple tasks', + capabilities: ['code', 'text', 'tools'], + maxTokens: 8192, + contextWindow: 200000, + supportsThinking: true, + requiresAuth: 'CLAUDE_CODE_OAUTH_TOKEN' + }, + sonnet: { + id: 'sonnet', + name: 'Claude Sonnet', + modelString: 'claude-sonnet-4-20250514', + provider: 'claude', + category: MODEL_CATEGORIES.CLAUDE, + tier: MODEL_TIERS.STANDARD, + description: 'Balanced performance and capabilities', + capabilities: ['code', 'text', 'tools', 'analysis'], + maxTokens: 8192, + contextWindow: 200000, + supportsThinking: true, + requiresAuth: 'CLAUDE_CODE_OAUTH_TOKEN' + }, + opus: { + id: 'opus', + name: 'Claude Opus 4.5', + modelString: 'claude-opus-4-5-20251101', + provider: 'claude', + category: MODEL_CATEGORIES.CLAUDE, + tier: MODEL_TIERS.PREMIUM, + description: 'Most capable model for complex tasks', + capabilities: ['code', 'text', 'tools', 'analysis', 'reasoning'], + maxTokens: 8192, + contextWindow: 200000, + supportsThinking: true, + requiresAuth: 'CLAUDE_CODE_OAUTH_TOKEN', + default: true + }, + + // OpenAI GPT-5.1 Codex Models + 'gpt-5.1-codex-max': { + id: 'gpt-5.1-codex-max', + name: 'GPT-5.1 Codex Max', + modelString: 'gpt-5.1-codex-max', + provider: 'codex', + category: MODEL_CATEGORIES.OPENAI, + tier: MODEL_TIERS.PREMIUM, + description: 'Latest flagship - deep and fast reasoning for coding', + capabilities: ['code', 'text', 'tools', 'reasoning'], + maxTokens: 32768, + contextWindow: 128000, + supportsThinking: false, + requiresAuth: 'OPENAI_API_KEY', + codexDefault: true + }, + 'gpt-5.1-codex': { + id: 'gpt-5.1-codex', + name: 'GPT-5.1 Codex', + modelString: 'gpt-5.1-codex', + provider: 'codex', + category: MODEL_CATEGORIES.OPENAI, + tier: MODEL_TIERS.STANDARD, + description: 'Optimized for code generation', + capabilities: ['code', 'text', 'tools'], + maxTokens: 32768, + contextWindow: 128000, + supportsThinking: false, + requiresAuth: 'OPENAI_API_KEY' + }, + 'gpt-5.1-codex-mini': { + id: 'gpt-5.1-codex-mini', + name: 'GPT-5.1 Codex Mini', + modelString: 'gpt-5.1-codex-mini', + provider: 'codex', + category: MODEL_CATEGORIES.OPENAI, + tier: MODEL_TIERS.BASIC, + description: 'Faster and cheaper option', + capabilities: ['code', 'text'], + maxTokens: 16384, + contextWindow: 128000, + supportsThinking: false, + requiresAuth: 'OPENAI_API_KEY' + }, + 'gpt-5.1': { + id: 'gpt-5.1', + name: 'GPT-5.1', + modelString: 'gpt-5.1', + provider: 'codex', + category: MODEL_CATEGORIES.OPENAI, + tier: MODEL_TIERS.STANDARD, + description: 'Broad world knowledge with strong reasoning', + capabilities: ['code', 'text', 'reasoning'], + maxTokens: 32768, + contextWindow: 128000, + supportsThinking: false, + requiresAuth: 'OPENAI_API_KEY' + } +}; + +/** + * Model Registry class for querying and managing models + */ +class ModelRegistry { + /** + * Get all registered models + * @returns {Object} All models + */ + static getAllModels() { + return MODELS; + } + + /** + * Get model by ID + * @param {string} modelId Model ID + * @returns {Object|null} Model definition or null + */ + static getModel(modelId) { + return MODELS[modelId] || null; + } + + /** + * Get models by provider + * @param {string} provider Provider name ('claude' or 'codex') + * @returns {Object[]} Array of models for the provider + */ + static getModelsByProvider(provider) { + return Object.values(MODELS).filter(m => m.provider === provider); + } + + /** + * Get models by category + * @param {string} category Category name + * @returns {Object[]} Array of models in the category + */ + static getModelsByCategory(category) { + return Object.values(MODELS).filter(m => m.category === category); + } + + /** + * Get models by tier + * @param {string} tier Tier name + * @returns {Object[]} Array of models in the tier + */ + static getModelsByTier(tier) { + return Object.values(MODELS).filter(m => m.tier === tier); + } + + /** + * Get default model for a provider + * @param {string} provider Provider name + * @returns {Object|null} Default model or null + */ + static getDefaultModel(provider = 'claude') { + const models = this.getModelsByProvider(provider); + if (provider === 'claude') { + return models.find(m => m.default) || models[0]; + } + if (provider === 'codex') { + return models.find(m => m.codexDefault) || models[0]; + } + return models[0]; + } + + /** + * Get model string (full model name) for a model ID + * @param {string} modelId Model ID + * @returns {string} Full model string + */ + static getModelString(modelId) { + const model = this.getModel(modelId); + return model ? model.modelString : modelId; + } + + /** + * Determine provider for a model ID + * @param {string} modelId Model ID + * @returns {string} Provider name ('claude' or 'codex') + */ + static getProviderForModel(modelId) { + const model = this.getModel(modelId); + if (model) { + return model.provider; + } + + // Fallback detection for models not explicitly registered (keeps legacy Codex IDs working) + if (CODEX_MODEL_IDS.includes(modelId)) { + return 'codex'; + } + + return 'claude'; + } + + /** + * Check if a model is a Claude model + * @param {string} modelId Model ID + * @returns {boolean} Whether it's a Claude model + */ + static isClaudeModel(modelId) { + return this.getProviderForModel(modelId) === 'claude'; + } + + /** + * Check if a model is a Codex/OpenAI model + * @param {string} modelId Model ID + * @returns {boolean} Whether it's a Codex model + */ + static isCodexModel(modelId) { + return this.getProviderForModel(modelId) === 'codex'; + } + + /** + * Get models grouped by provider for UI display + * @returns {Object} Models grouped by provider + */ + static getModelsGroupedByProvider() { + return { + claude: this.getModelsByProvider('claude'), + codex: this.getModelsByProvider('codex') + }; + } + + /** + * Get all model IDs as an array + * @returns {string[]} Array of model IDs + */ + static getAllModelIds() { + return Object.keys(MODELS); + } + + /** + * Check if model supports a specific capability + * @param {string} modelId Model ID + * @param {string} capability Capability name + * @returns {boolean} Whether the model supports the capability + */ + static modelSupportsCapability(modelId, capability) { + const model = this.getModel(modelId); + return model ? model.capabilities.includes(capability) : false; + } + + /** + * Check if model supports extended thinking + * @param {string} modelId Model ID + * @returns {boolean} Whether the model supports thinking + */ + static modelSupportsThinking(modelId) { + const model = this.getModel(modelId); + return model ? model.supportsThinking : false; + } + + /** + * Get required authentication for a model + * @param {string} modelId Model ID + * @returns {string|null} Required auth env variable name + */ + static getRequiredAuth(modelId) { + const model = this.getModel(modelId); + return model ? model.requiresAuth : null; + } + + /** + * Check if authentication is available for a model + * @param {string} modelId Model ID + * @returns {boolean} Whether auth is available + */ + static hasAuthForModel(modelId) { + const authVar = this.getRequiredAuth(modelId); + if (!authVar) return false; + return !!process.env[authVar]; + } +} + +module.exports = { + MODEL_CATEGORIES, + MODEL_TIERS, + MODELS, + ModelRegistry +}; diff --git a/app/electron/services/worktree-manager.js b/app/electron/services/worktree-manager.js new file mode 100644 index 00000000..1f7fef3c --- /dev/null +++ b/app/electron/services/worktree-manager.js @@ -0,0 +1,576 @@ +const path = require("path"); +const fs = require("fs/promises"); +const { exec, spawn } = require("child_process"); +const { promisify } = require("util"); + +const execAsync = promisify(exec); + +/** + * Worktree Manager - Handles git worktrees for feature isolation + * + * This service creates isolated git worktrees for each feature, allowing: + * - Features to be worked on in isolation without affecting the main branch + * - Easy rollback/revert by simply deleting the worktree + * - Checkpointing - user can see changes in the worktree before merging + */ +class WorktreeManager { + constructor() { + // Cache for worktree info + this.worktreeCache = new Map(); + } + + /** + * Get the base worktree directory path + */ + getWorktreeBasePath(projectPath) { + return path.join(projectPath, ".automaker", "worktrees"); + } + + /** + * Generate a safe branch name from feature description + */ + generateBranchName(feature) { + // Create a slug from the description + const slug = feature.description + .toLowerCase() + .replace(/[^a-z0-9\s-]/g, "") // Remove special chars + .replace(/\s+/g, "-") // Replace spaces with hyphens + .substring(0, 40); // Limit length + + // Add feature ID for uniqueness + const shortId = feature.id.replace("feature-", "").substring(0, 12); + return `feature/${shortId}-${slug}`; + } + + /** + * Check if the project is a git repository + */ + async isGitRepo(projectPath) { + try { + await execAsync("git rev-parse --is-inside-work-tree", { cwd: projectPath }); + return true; + } catch { + return false; + } + } + + /** + * Get the current branch name + */ + async getCurrentBranch(projectPath) { + try { + const { stdout } = await execAsync("git rev-parse --abbrev-ref HEAD", { cwd: projectPath }); + return stdout.trim(); + } catch (error) { + console.error("[WorktreeManager] Failed to get current branch:", error); + return null; + } + } + + /** + * Check if a branch exists (local or remote) + */ + async branchExists(projectPath, branchName) { + try { + await execAsync(`git rev-parse --verify ${branchName}`, { cwd: projectPath }); + return true; + } catch { + return false; + } + } + + /** + * List all existing worktrees + */ + async listWorktrees(projectPath) { + try { + const { stdout } = await execAsync("git worktree list --porcelain", { cwd: projectPath }); + const worktrees = []; + const lines = stdout.split("\n"); + + let currentWorktree = null; + for (const line of lines) { + if (line.startsWith("worktree ")) { + if (currentWorktree) { + worktrees.push(currentWorktree); + } + currentWorktree = { path: line.replace("worktree ", "") }; + } else if (line.startsWith("branch ") && currentWorktree) { + currentWorktree.branch = line.replace("branch refs/heads/", ""); + } else if (line.startsWith("HEAD ") && currentWorktree) { + currentWorktree.head = line.replace("HEAD ", ""); + } + } + if (currentWorktree) { + worktrees.push(currentWorktree); + } + + return worktrees; + } catch (error) { + console.error("[WorktreeManager] Failed to list worktrees:", error); + return []; + } + } + + /** + * Create a worktree for a feature + * @param {string} projectPath - Path to the main project + * @param {object} feature - Feature object with id and description + * @returns {object} - { success, worktreePath, branchName, error } + */ + async createWorktree(projectPath, feature) { + console.log(`[WorktreeManager] Creating worktree for feature: ${feature.id}`); + + // Check if project is a git repo + if (!await this.isGitRepo(projectPath)) { + return { success: false, error: "Project is not a git repository" }; + } + + const branchName = this.generateBranchName(feature); + const worktreeBasePath = this.getWorktreeBasePath(projectPath); + const worktreePath = path.join(worktreeBasePath, branchName.replace("feature/", "")); + + try { + // Ensure worktree directory exists + await fs.mkdir(worktreeBasePath, { recursive: true }); + + // Check if worktree already exists + const worktrees = await this.listWorktrees(projectPath); + const existingWorktree = worktrees.find( + w => w.path === worktreePath || w.branch === branchName + ); + + if (existingWorktree) { + console.log(`[WorktreeManager] Worktree already exists for feature: ${feature.id}`); + return { + success: true, + worktreePath: existingWorktree.path, + branchName: existingWorktree.branch, + existed: true, + }; + } + + // Get current branch to base the new branch on + const baseBranch = await this.getCurrentBranch(projectPath); + if (!baseBranch) { + return { success: false, error: "Could not determine current branch" }; + } + + // Check if branch already exists + const branchExists = await this.branchExists(projectPath, branchName); + + if (branchExists) { + // Use existing branch + console.log(`[WorktreeManager] Using existing branch: ${branchName}`); + await execAsync(`git worktree add "${worktreePath}" ${branchName}`, { cwd: projectPath }); + } else { + // Create new worktree with new branch + console.log(`[WorktreeManager] Creating new branch: ${branchName} based on ${baseBranch}`); + await execAsync(`git worktree add -b ${branchName} "${worktreePath}" ${baseBranch}`, { cwd: projectPath }); + } + + // Copy .automaker directory to worktree (except worktrees directory itself to avoid recursion) + const automakerSrc = path.join(projectPath, ".automaker"); + const automakerDst = path.join(worktreePath, ".automaker"); + + try { + await fs.mkdir(automakerDst, { recursive: true }); + + // Copy feature_list.json + const featureListSrc = path.join(automakerSrc, "feature_list.json"); + const featureListDst = path.join(automakerDst, "feature_list.json"); + try { + const content = await fs.readFile(featureListSrc, "utf-8"); + await fs.writeFile(featureListDst, content, "utf-8"); + } catch { + // Feature list might not exist yet + } + + // Copy app_spec.txt if it exists + const appSpecSrc = path.join(automakerSrc, "app_spec.txt"); + const appSpecDst = path.join(automakerDst, "app_spec.txt"); + try { + const content = await fs.readFile(appSpecSrc, "utf-8"); + await fs.writeFile(appSpecDst, content, "utf-8"); + } catch { + // App spec might not exist yet + } + + // Copy categories.json if it exists + const categoriesSrc = path.join(automakerSrc, "categories.json"); + const categoriesDst = path.join(automakerDst, "categories.json"); + try { + const content = await fs.readFile(categoriesSrc, "utf-8"); + await fs.writeFile(categoriesDst, content, "utf-8"); + } catch { + // Categories might not exist yet + } + } catch (error) { + console.warn("[WorktreeManager] Failed to copy .automaker directory:", error); + } + + // Store worktree info in cache + this.worktreeCache.set(feature.id, { + worktreePath, + branchName, + createdAt: new Date().toISOString(), + baseBranch, + }); + + console.log(`[WorktreeManager] Worktree created at: ${worktreePath}`); + return { + success: true, + worktreePath, + branchName, + baseBranch, + existed: false, + }; + } catch (error) { + console.error("[WorktreeManager] Failed to create worktree:", error); + return { success: false, error: error.message }; + } + } + + /** + * Get worktree info for a feature + */ + async getWorktreeInfo(projectPath, featureId) { + // Check cache first + if (this.worktreeCache.has(featureId)) { + return { success: true, ...this.worktreeCache.get(featureId) }; + } + + // Scan worktrees to find matching one + const worktrees = await this.listWorktrees(projectPath); + const worktreeBasePath = this.getWorktreeBasePath(projectPath); + + for (const worktree of worktrees) { + // Check if this worktree is in our worktree directory + if (worktree.path.startsWith(worktreeBasePath)) { + // Check if the feature ID is in the branch name + const shortId = featureId.replace("feature-", "").substring(0, 12); + if (worktree.branch && worktree.branch.includes(shortId)) { + const info = { + worktreePath: worktree.path, + branchName: worktree.branch, + head: worktree.head, + }; + this.worktreeCache.set(featureId, info); + return { success: true, ...info }; + } + } + } + + return { success: false, error: "Worktree not found" }; + } + + /** + * Remove a worktree for a feature + * This effectively reverts all changes made by the agent + */ + async removeWorktree(projectPath, featureId, deleteBranch = false) { + console.log(`[WorktreeManager] Removing worktree for feature: ${featureId}`); + + const worktreeInfo = await this.getWorktreeInfo(projectPath, featureId); + if (!worktreeInfo.success) { + console.log(`[WorktreeManager] No worktree found for feature: ${featureId}`); + return { success: true, message: "No worktree to remove" }; + } + + const { worktreePath, branchName } = worktreeInfo; + + try { + // Remove the worktree + await execAsync(`git worktree remove "${worktreePath}" --force`, { cwd: projectPath }); + console.log(`[WorktreeManager] Worktree removed: ${worktreePath}`); + + // Optionally delete the branch too + if (deleteBranch && branchName) { + try { + await execAsync(`git branch -D ${branchName}`, { cwd: projectPath }); + console.log(`[WorktreeManager] Branch deleted: ${branchName}`); + } catch (error) { + console.warn(`[WorktreeManager] Could not delete branch ${branchName}:`, error.message); + } + } + + // Remove from cache + this.worktreeCache.delete(featureId); + + return { success: true, removedPath: worktreePath, removedBranch: deleteBranch ? branchName : null }; + } catch (error) { + console.error("[WorktreeManager] Failed to remove worktree:", error); + return { success: false, error: error.message }; + } + } + + /** + * Get status of changes in a worktree + */ + async getWorktreeStatus(worktreePath) { + try { + const { stdout: statusOutput } = await execAsync("git status --porcelain", { cwd: worktreePath }); + const { stdout: diffStat } = await execAsync("git diff --stat", { cwd: worktreePath }); + const { stdout: commitLog } = await execAsync("git log --oneline -10", { cwd: worktreePath }); + + const files = statusOutput.trim().split("\n").filter(Boolean); + const commits = commitLog.trim().split("\n").filter(Boolean); + + return { + success: true, + modifiedFiles: files.length, + files: files.slice(0, 20), // Limit to 20 files + diffStat: diffStat.trim(), + recentCommits: commits.slice(0, 5), // Last 5 commits + }; + } catch (error) { + console.error("[WorktreeManager] Failed to get worktree status:", error); + return { success: false, error: error.message }; + } + } + + /** + * Get detailed file diff content for a worktree + * Returns unified diff format for all changes + */ + async getFileDiffs(worktreePath) { + try { + // Get both staged and unstaged diffs + const { stdout: unstagedDiff } = await execAsync("git diff --no-color", { + cwd: worktreePath, + maxBuffer: 10 * 1024 * 1024 // 10MB buffer for large diffs + }); + const { stdout: stagedDiff } = await execAsync("git diff --cached --no-color", { + cwd: worktreePath, + maxBuffer: 10 * 1024 * 1024 + }); + + // Get list of files with their status + const { stdout: statusOutput } = await execAsync("git status --porcelain", { cwd: worktreePath }); + const files = statusOutput.trim().split("\n").filter(Boolean); + + // Parse file statuses + const fileStatuses = files.map(line => { + const status = line.substring(0, 2); + const filePath = line.substring(3); + return { + status: status.trim() || 'M', + path: filePath, + statusText: this.getStatusText(status) + }; + }); + + // Combine diffs + const combinedDiff = [stagedDiff, unstagedDiff].filter(Boolean).join("\n"); + + return { + success: true, + diff: combinedDiff, + files: fileStatuses, + hasChanges: files.length > 0 + }; + } catch (error) { + console.error("[WorktreeManager] Failed to get file diffs:", error); + return { success: false, error: error.message }; + } + } + + /** + * Get human-readable status text from git status code + */ + getStatusText(status) { + const statusMap = { + 'M': 'Modified', + 'A': 'Added', + 'D': 'Deleted', + 'R': 'Renamed', + 'C': 'Copied', + 'U': 'Updated', + '?': 'Untracked', + '!': 'Ignored' + }; + const firstChar = status.charAt(0); + const secondChar = status.charAt(1); + return statusMap[firstChar] || statusMap[secondChar] || 'Changed'; + } + + /** + * Get diff for a specific file in a worktree + */ + async getFileDiff(worktreePath, filePath) { + try { + // Try to get unstaged diff first, then staged if no unstaged changes + let diff = ''; + try { + const { stdout } = await execAsync(`git diff --no-color -- "${filePath}"`, { + cwd: worktreePath, + maxBuffer: 5 * 1024 * 1024 + }); + diff = stdout; + } catch { + // File might be staged + } + + if (!diff) { + try { + const { stdout } = await execAsync(`git diff --cached --no-color -- "${filePath}"`, { + cwd: worktreePath, + maxBuffer: 5 * 1024 * 1024 + }); + diff = stdout; + } catch { + // File might be untracked, show the content + } + } + + // If still no diff, might be an untracked file - show the content + if (!diff) { + try { + const fullPath = path.join(worktreePath, filePath); + const content = await fs.readFile(fullPath, 'utf-8'); + diff = `+++ ${filePath} (new file)\n${content.split('\n').map(l => '+' + l).join('\n')}`; + } catch { + diff = '(Unable to read file content)'; + } + } + + return { + success: true, + diff, + filePath + }; + } catch (error) { + console.error(`[WorktreeManager] Failed to get diff for ${filePath}:`, error); + return { success: false, error: error.message }; + } + } + + /** + * Merge worktree changes back to the main branch + */ + async mergeWorktree(projectPath, featureId, options = {}) { + console.log(`[WorktreeManager] Merging worktree for feature: ${featureId}`); + + const worktreeInfo = await this.getWorktreeInfo(projectPath, featureId); + if (!worktreeInfo.success) { + return { success: false, error: "Worktree not found" }; + } + + const { branchName, worktreePath } = worktreeInfo; + const baseBranch = await this.getCurrentBranch(projectPath); + + try { + // First commit any uncommitted changes in the worktree + const { stdout: status } = await execAsync("git status --porcelain", { cwd: worktreePath }); + if (status.trim()) { + // There are uncommitted changes - commit them + await execAsync("git add -A", { cwd: worktreePath }); + const commitMsg = options.commitMessage || `feat: complete ${featureId}`; + await execAsync(`git commit -m "${commitMsg}"`, { cwd: worktreePath }); + } + + // Merge the feature branch into the current branch in the main repo + if (options.squash) { + await execAsync(`git merge --squash ${branchName}`, { cwd: projectPath }); + const squashMsg = options.squashMessage || `feat: ${featureId} - squashed merge`; + await execAsync(`git commit -m "${squashMsg}"`, { cwd: projectPath }); + } else { + await execAsync(`git merge ${branchName} --no-ff -m "Merge ${branchName}"`, { cwd: projectPath }); + } + + console.log(`[WorktreeManager] Successfully merged ${branchName} into ${baseBranch}`); + + // Optionally cleanup worktree after merge + if (options.cleanup) { + await this.removeWorktree(projectPath, featureId, true); + } + + return { + success: true, + mergedBranch: branchName, + intoBranch: baseBranch, + }; + } catch (error) { + console.error("[WorktreeManager] Failed to merge worktree:", error); + return { success: false, error: error.message }; + } + } + + /** + * Sync changes from main branch to worktree (rebase or merge) + */ + async syncWorktree(projectPath, featureId, method = "rebase") { + console.log(`[WorktreeManager] Syncing worktree for feature: ${featureId}`); + + const worktreeInfo = await this.getWorktreeInfo(projectPath, featureId); + if (!worktreeInfo.success) { + return { success: false, error: "Worktree not found" }; + } + + const { worktreePath, baseBranch } = worktreeInfo; + + try { + if (method === "rebase") { + await execAsync(`git rebase ${baseBranch}`, { cwd: worktreePath }); + } else { + await execAsync(`git merge ${baseBranch}`, { cwd: worktreePath }); + } + + return { success: true, method }; + } catch (error) { + console.error("[WorktreeManager] Failed to sync worktree:", error); + return { success: false, error: error.message }; + } + } + + /** + * Get list of all feature worktrees + */ + async getAllFeatureWorktrees(projectPath) { + const worktrees = await this.listWorktrees(projectPath); + const worktreeBasePath = this.getWorktreeBasePath(projectPath); + + return worktrees.filter(w => + w.path.startsWith(worktreeBasePath) && + w.branch && + w.branch.startsWith("feature/") + ); + } + + /** + * Cleanup orphaned worktrees (worktrees without matching features) + */ + async cleanupOrphanedWorktrees(projectPath, activeFeatureIds) { + console.log("[WorktreeManager] Cleaning up orphaned worktrees..."); + + const worktrees = await this.getAllFeatureWorktrees(projectPath); + const cleaned = []; + + for (const worktree of worktrees) { + // Extract feature ID from branch name + const branchParts = worktree.branch.replace("feature/", "").split("-"); + const shortId = branchParts[0]; + + // Check if any active feature has this short ID + const hasMatchingFeature = activeFeatureIds.some(id => { + const featureShortId = id.replace("feature-", "").substring(0, 12); + return featureShortId === shortId; + }); + + if (!hasMatchingFeature) { + console.log(`[WorktreeManager] Removing orphaned worktree: ${worktree.path}`); + try { + await execAsync(`git worktree remove "${worktree.path}" --force`, { cwd: projectPath }); + await execAsync(`git branch -D ${worktree.branch}`, { cwd: projectPath }); + cleaned.push(worktree.path); + } catch (error) { + console.warn(`[WorktreeManager] Failed to cleanup worktree ${worktree.path}:`, error.message); + } + } + } + + return { success: true, cleaned }; + } +} + +module.exports = new WorktreeManager(); diff --git a/app/example/AppSidebar.tsx b/app/example/AppSidebar.tsx deleted file mode 100644 index 1bf707da..00000000 --- a/app/example/AppSidebar.tsx +++ /dev/null @@ -1,417 +0,0 @@ -"use client"; - -import { useState, useEffect, useRef } from "react"; -import Link from "next/link"; -import { usePathname } from "next/navigation"; -import { - Sparkles, - Wand2, - LayoutGrid, - Layers, - FolderOpen, - FileText, - List, - Cpu, - Search, - Share2, - Trash2, - BarChart3, - Settings, - PanelLeftClose, - PanelLeft, - Home, - LogOut, - User, - CreditCard, -} from "lucide-react"; - -interface AppSidebarProps { - user: any; - creditsBalance: number | null; -} - -interface NavItem { - href: string; - icon: any; - label: string; -} - -interface NavSection { - label?: string; - items: NavItem[]; -} - -export function AppSidebar({ user, creditsBalance }: AppSidebarProps) { - const pathname = usePathname(); - const [sidebarCollapsed, setSidebarCollapsed] = useState(false); - const [userMenuOpen, setUserMenuOpen] = useState(false); - const userMenuRef = useRef(null); - - // Close dropdown when clicking outside - useEffect(() => { - function handleClickOutside(event: MouseEvent) { - if ( - userMenuRef.current && - !userMenuRef.current.contains(event.target as Node) - ) { - setUserMenuOpen(false); - } - } - - if (userMenuOpen) { - document.addEventListener("mousedown", handleClickOutside); - return () => { - document.removeEventListener("mousedown", handleClickOutside); - }; - } - }, [userMenuOpen]); - - const navSections: NavSection[] = [ - { - items: [ - { href: "/generate", icon: Home, label: "Overview" }, - { href: "/generate/canvas", icon: Wand2, label: "Canvas" }, - ], - }, - { - label: "Content", - items: [ - { href: "/generate/gallery", icon: LayoutGrid, label: "Gallery" }, - { href: "/generate/collections", icon: Layers, label: "Collections" }, - { href: "/generate/projects", icon: FolderOpen, label: "Projects" }, - { href: "/generate/prompts", icon: FileText, label: "Prompts" }, - ], - }, - { - label: "Tools", - items: [ - { href: "/generate/batch", icon: List, label: "Batch" }, - { href: "/generate/models", icon: Cpu, label: "Models" }, - ], - }, - { - label: "Manage", - items: [ - { href: "/generate/shared", icon: Share2, label: "Shared" }, - { href: "/generate/trash", icon: Trash2, label: "Trash" }, - ], - }, - ]; - - const isActiveRoute = (href: string) => { - if (href === "/generate") { - return pathname === "/generate"; - } - return pathname?.startsWith(href); - }; - - return ( - - ); -} diff --git a/app/example/page.tsx b/app/example/page.tsx deleted file mode 100644 index fa379fd9..00000000 --- a/app/example/page.tsx +++ /dev/null @@ -1,2590 +0,0 @@ -"use client"; - -import { useState, useEffect, Suspense, useRef } from "react"; -import { useRouter, useSearchParams } from "next/navigation"; -import Link from "next/link"; -import { toast } from "sonner"; -import { Button } from "@/components/ui/button"; -import { Input } from "@/components/ui/input"; -import { Label } from "@/components/ui/label"; -import { Textarea } from "@/components/ui/textarea"; -import { Select } from "@/components/ui/select"; -import { Slider } from "@/components/ui/slider"; -import { FileInput } from "@/components/ui/file-input"; -import { - Dialog, - DialogContent, - DialogHeader, - DialogBody, - DialogFooter, -} from "@/components/ui/dialog"; -import { replaceVariables } from "@/lib/prompt-template"; -import { - Loader2, - Sparkles, - Wand2, - LayoutGrid, - Layers, - History, - Settings, - Bell, - HelpCircle, - Clock, - List, - Maximize2, - Copy, - Download, - SlidersHorizontal, - RotateCcw, - X, - Wand, - Dices, - ChevronRight, - ChevronDown, - ChevronLeft, - Plus, - ImagePlus, - Save, - Heart, - FolderOpen, - FileText, - PanelLeftClose, - PanelLeft, -} from "lucide-react"; -import Image from "next/image"; -import { ParameterTooltip } from "@/components/ui/tooltip"; -import { EmptyState } from "@/components/ui/empty-state"; -import { ImageLightbox } from "@/components/ImageLightbox"; -import { MediaRenderer } from "@/components/MediaRenderer"; -import { useSession } from "@/hooks/use-auth"; -import { useSettings } from "@/hooks/use-settings"; -import { usePresets, useCreatePreset } from "@/hooks/use-presets"; -import { usePrompts, useCreatePrompt } from "@/hooks/use-prompts"; -import { useImages, useImage, useToggleFavorite, useCreateVariation } from "@/hooks/use-images"; -import { useSubmitJob, useJobStatus } from "@/hooks/use-jobs"; -import { useUpload } from "@/hooks/use-upload"; -import { useQueryClient } from "@tanstack/react-query"; - -// Parameter tooltips content -const PARAMETER_TOOLTIPS = { - aspectRatio: - "The width-to-height ratio of the generated image. Square (1:1) works well for icons, while widescreen (16:9) is great for landscapes.", - imageCount: - "The number of images to generate in one batch. More images give you more options to choose from.", - guidance: - "Controls how closely the AI follows your prompt. Higher values (10-20) follow the prompt more strictly, while lower values (1-5) give more creative freedom.", - steps: - "The number of refinement iterations. More steps (50-150) produce higher quality but take longer. 20-30 steps is usually sufficient.", - seed: "A number that determines the random starting point. Using the same seed with the same prompt produces identical results, useful for variations.", - model: - "The AI model to use for generation. Different models have different strengths, speeds, and styles.", - negativePrompt: - "Things you don't want to appear in the image. For example: 'blurry, low quality, distorted'.", - styleModifiers: - "Quick-add keywords that enhance your prompt with common quality and style improvements.", - cameraModifiers: - "Add camera types, lenses, focal lengths, and apertures to achieve specific photographic looks and effects.", - depthAngleModifiers: - "Control camera angles, shot distances, and perspectives to create compelling compositions and viewpoints.", -}; - -interface GeneratedImage { - id: string; - fileUrl: string; - width: number; - height: number; - prompt: string; - modelId: string; - format?: string | null; - isFavorite?: boolean; - rating?: number | null; - parameters?: any; - createdAt?: string; - negativePrompt?: string; -} - -interface GenerationJob { - id: string; - status: "pending" | "processing" | "completed" | "failed"; - prompt: string; - modelId: string; - parameters: any; - errorMessage?: string; - createdAt: Date; - startedAt?: Date; - completedAt?: Date; - images?: GeneratedImage[]; -} - -const MODELS = [ - { - id: "flux-pro", - name: "Flux Pro", - description: "Highest quality", - supportsTextToImage: true, - supportsImageToImage: true, - supportsImageToVideo: false, - }, - { - id: "flux-dev", - name: "Flux Dev", - description: "Balanced speed/quality", - supportsTextToImage: true, - supportsImageToImage: true, - supportsImageToVideo: false, - }, - { - id: "flux-schnell", - name: "Flux Schnell", - description: "Fast generation", - supportsTextToImage: true, - supportsImageToImage: true, - supportsImageToVideo: false, - }, - { - id: "sdxl", - name: "Stable Diffusion XL", - description: "Versatile", - supportsTextToImage: true, - supportsImageToImage: true, - supportsImageToVideo: false, - }, - { - id: "wan-25", - name: "WAN 2.5", - description: "Image to Video", - supportsTextToImage: false, - supportsImageToImage: false, - supportsImageToVideo: true, - }, -]; - -// Helper function to get available models for a generation mode -const getAvailableModels = (mode: "text-to-image" | "image-to-image" | "image-to-video") => { - return MODELS.filter((model) => { - switch (mode) { - case "text-to-image": - return model.supportsTextToImage; - case "image-to-image": - return model.supportsImageToImage; - case "image-to-video": - return model.supportsImageToVideo; - default: - return false; - } - }); -}; - -const ASPECT_RATIOS = [ - { id: "square", label: "1:1", w: 5, h: 5 }, - { id: "portrait_4_3", label: "3:4", w: 3, h: 4 }, - { id: "landscape_4_3", label: "4:3", w: 4, h: 3 }, - { id: "landscape_16_9", label: "16:9", w: 7, h: 4 }, -]; - -const STYLE_MODIFIERS = [ - "4K", - "8K", - "Detailed", - "Cinematic", - "Octane Render", - "Ray Tracing", - "Ultra realistic", - "High quality", - "Award winning", - "Professional", -]; - -const CAMERA_MODIFIERS = [ - "DSLR", - "Mirrorless camera", - "Medium format", - "Large format", - "Film camera", - "14mm lens", - "24mm lens", - "35mm lens", - "50mm lens", - "85mm lens", - "135mm lens", - "200mm lens", - "Wide angle lens", - "Telephoto lens", - "Macro lens", - "Fisheye lens", - "Prime lens", - "Zoom lens", - "f/1.2", - "f/1.4", - "f/1.8", - "f/2.8", - "f/4", - "f/5.6", - "Shallow depth of field", - "Deep depth of field", - "Bokeh", - "Tilt-shift", - "Anamorphic", -]; - -const DEPTH_ANGLE_MODIFIERS = [ - "Extreme close-up", - "Close-up shot", - "Medium close-up", - "Medium shot", - "Medium long shot", - "Long shot", - "Extreme long shot", - "Full body shot", - "Cowboy shot", - "Eye level angle", - "High angle", - "Low angle", - "Bird's eye view", - "Worm's eye view", - "Dutch angle", - "Overhead shot", - "Aerial view", - "Ground level", - "Over-the-shoulder", - "Point of view shot", - "First-person view", - "Third-person view", - "Side profile", - "Three-quarter view", - "Front view", - "Back view", - "Isometric view", - "Forced perspective", - "Macro photography", - "Micro lens shot", - "Tracking shot", - "Establishing shot", - "Two-shot", -]; - -function GeneratePageContent() { - const router = useRouter(); - const searchParams = useSearchParams(); - const queryClient = useQueryClient(); - - // TanStack Query hooks - const { data: session, isPending: sessionLoading } = useSession(); - const { data: settingsData } = useSettings(); - const { data: presetsData } = usePresets(!!session); - const { data: promptsData } = usePrompts(!!session); - const { data: historyData } = useImages({ limit: 20 }); - const createPresetMutation = useCreatePreset(); - const createPromptMutation = useCreatePrompt(); - const toggleFavoriteMutation = useToggleFavorite(); - const createVariationMutation = useCreateVariation(); - const submitJobMutation = useSubmitJob(); - const uploadMutation = useUpload(); - - const [prompt, setPrompt] = useState(""); - const [negativePrompt, setNegativePrompt] = useState(""); - const [model, setModel] = useState("flux-pro"); - const [aspectRatio, setAspectRatio] = useState("landscape_16_9"); - const [numImages, setNumImages] = useState(1); - const [steps, setSteps] = useState(28); - const [guidance, setGuidance] = useState(3.5); - const [seed, setSeed] = useState(""); - const [loading, setLoading] = useState(false); - const [error, setError] = useState(""); - const [generatedImages, setGeneratedImages] = useState([]); - const [generationTime, setGenerationTime] = useState(null); - - // Job-based generation state - const [jobs, setJobs] = useState([]); - const processedJobsRef = useRef>(new Set()); - const autoStartTriggeredRef = useRef(false); - - // Job status polling - const pendingJobIds = jobs - .filter((j) => j.status === "pending" || j.status === "processing") - .map((j) => j.id); - const { data: jobStatusData } = useJobStatus(pendingJobIds, { - enabled: pendingJobIds.length > 0, - }); - - // UI States - const [showNegativePrompt, setShowNegativePrompt] = useState(false); - const [mobileSidebarOpen, setMobileSidebarOpen] = useState(false); - const [showAdvanced, setShowAdvanced] = useState(false); - - // Image-to-image mode - const [generationMode, setGenerationMode] = useState< - "text-to-image" | "image-to-image" | "image-to-video" - >("text-to-image"); - const [sourceImage, setSourceImage] = useState(null); // URL or data URL - const [sourceImageFile, setSourceImageFile] = useState(null); - const [strength, setStrength] = useState(0.75); // 0-1, how much to transform - const [isDragging, setIsDragging] = useState(false); - - // Video generation state (for WAN 2.5) - const [resolution, setResolution] = useState<"480p" | "720p" | "1080p">("1080p"); - const [duration, setDuration] = useState<5 | 10>(5); - - // Preset state - const [showSavePresetModal, setShowSavePresetModal] = useState(false); - const [savePresetData, setSavePresetData] = useState({ - name: "", - description: "", - }); - - // Save Prompt state - const [showSaveModal, setShowSaveModal] = useState(false); - const [savePromptData, setSavePromptData] = useState({ - title: "", - category: "", - }); - - // Template/Prompt loading state - const [showLoadPromptModal, setShowLoadPromptModal] = useState(false); - const [showTemplateVariablesModal, setShowTemplateVariablesModal] = - useState(false); - const [selectedTemplate, setSelectedTemplate] = useState(null); - const [templateVariableValues, setTemplateVariableValues] = useState< - Record - >({}); - - // Style modifiers state - const [activeStyles, setActiveStyles] = useState([]); - - // Camera modifiers state - const [activeCameras, setActiveCameras] = useState([]); - - // Depth/Angle modifiers state - const [activeDepthAngles, setActiveDepthAngles] = useState([]); - - // View mode state - const [viewMode, setViewMode] = useState<"grid" | "list">("grid"); - - // Lightbox state - const [lightboxOpen, setLightboxOpen] = useState(false); - const [selectedImageIndex, setSelectedImageIndex] = useState(0); - - // Derived state from queries - const presets = presetsData?.presets || []; - const savedPrompts = promptsData?.prompts || []; - const historyImages = historyData?.images || []; - - const handleAddStyleModifier = (style: string) => { - // Add the style to active styles if not already there - if (!activeStyles.includes(style)) { - setActiveStyles([...activeStyles, style]); - // Add the style to the prompt if it's not already there - const styleText = style.toLowerCase(); - if (!prompt.toLowerCase().includes(styleText)) { - setPrompt((prev) => (prev ? `${prev}, ${style}` : style)); - } - } - }; - - const handleRemoveStyleModifier = (style: string) => { - setActiveStyles(activeStyles.filter((s) => s !== style)); - // Remove the style from the prompt - const styleRegex = new RegExp( - `(,?\\s*${style}\\s*,?|${style}\\s*,|,\\s*${style})`, - "gi" - ); - const updatedPrompt = prompt - .replace(styleRegex, ",") - .replace(/,\s*,/g, ",") - .replace(/^\s*,\s*/, "") - .replace(/\s*,\s*$/, "") - .trim(); - setPrompt(updatedPrompt); - }; - - const handleAddCameraModifier = (camera: string) => { - if (!activeCameras.includes(camera)) { - setActiveCameras([...activeCameras, camera]); - const cameraText = camera.toLowerCase(); - if (!prompt.toLowerCase().includes(cameraText)) { - setPrompt((prev) => (prev ? `${prev}, ${camera}` : camera)); - } - } - }; - - const handleRemoveCameraModifier = (camera: string) => { - setActiveCameras(activeCameras.filter((c) => c !== camera)); - const cameraRegex = new RegExp( - `(,?\\s*${camera}\\s*,?|${camera}\\s*,|,\\s*${camera})`, - "gi" - ); - const updatedPrompt = prompt - .replace(cameraRegex, ",") - .replace(/,\s*,/g, ",") - .replace(/^\s*,\s*/, "") - .replace(/\s*,\s*$/, "") - .trim(); - setPrompt(updatedPrompt); - }; - - const handleAddDepthAngleModifier = (modifier: string) => { - if (!activeDepthAngles.includes(modifier)) { - setActiveDepthAngles([...activeDepthAngles, modifier]); - const modifierText = modifier.toLowerCase(); - if (!prompt.toLowerCase().includes(modifierText)) { - setPrompt((prev) => (prev ? `${prev}, ${modifier}` : modifier)); - } - } - }; - - const handleRemoveDepthAngleModifier = (modifier: string) => { - setActiveDepthAngles(activeDepthAngles.filter((m) => m !== modifier)); - const modifierRegex = new RegExp( - `(,?\\s*${modifier}\\s*,?|${modifier}\\s*,|,\\s*${modifier})`, - "gi" - ); - const updatedPrompt = prompt - .replace(modifierRegex, ",") - .replace(/,\s*,/g, ",") - .replace(/^\s*,\s*/, "") - .replace(/\s*,\s*$/, "") - .trim(); - setPrompt(updatedPrompt); - }; - - const handleSourceImageUpload = async ( - e: React.ChangeEvent - ) => { - const file = e.target.files?.[0]; - if (!file) return; - - // Preview the image - const reader = new FileReader(); - reader.onload = (event) => { - setSourceImage(event.target?.result as string); - setSourceImageFile(file); - }; - reader.readAsDataURL(file); - }; - - const handleDragEnter = (e: React.DragEvent) => { - e.preventDefault(); - e.stopPropagation(); - setIsDragging(true); - }; - - const handleDragLeave = (e: React.DragEvent) => { - e.preventDefault(); - e.stopPropagation(); - setIsDragging(false); - }; - - const handleDragOver = (e: React.DragEvent) => { - e.preventDefault(); - e.stopPropagation(); - }; - - const handleImageDragStart = (e: React.DragEvent, imageUrl: string) => { - e.dataTransfer.setData("image/url", imageUrl); - e.dataTransfer.effectAllowed = "copy"; - }; - - const handleDrop = (e: React.DragEvent) => { - e.preventDefault(); - e.stopPropagation(); - setIsDragging(false); - - // Check if dragging from gallery first - const imageUrl = e.dataTransfer.getData("image/url"); - if (imageUrl) { - setSourceImage(imageUrl); - setSourceImageFile(null); // Clear file if using existing image - return; - } - - // Otherwise handle file drop - const files = e.dataTransfer.files; - if (files && files.length > 0) { - const file = files[0]; - - // Check if it's an image - if (file.type.startsWith("image/")) { - const reader = new FileReader(); - reader.onload = (event) => { - setSourceImage(event.target?.result as string); - setSourceImageFile(file); - }; - reader.readAsDataURL(file); - } else { - toast.error("Invalid file type", { - description: "Please upload an image file", - }); - } - } - }; - - const handleSelectExistingImage = (imageUrl: string) => { - setSourceImage(imageUrl); - setSourceImageFile(null); // Clear file if using existing image - }; - - const handleDownload = async (image: GeneratedImage) => { - try { - const response = await fetch(image.fileUrl); - const blob = await response.blob(); - const url = window.URL.createObjectURL(blob); - const link = document.createElement("a"); - link.href = url; - link.download = `${image.id}.png`; - document.body.appendChild(link); - link.click(); - document.body.removeChild(link); - window.URL.revokeObjectURL(url); - toast.success("Image downloaded!", { - description: "Image saved to your downloads folder", - }); - } catch (error) { - console.error("Failed to download image:", error); - setError("Failed to download image. Please try again."); - toast.error("Download failed", { - description: "Failed to download image. Please try again.", - }); - } - }; - - const handleToggleFavorite = async (imageId: string) => { - toggleFavoriteMutation.mutate(imageId, { - onSuccess: () => { - setGeneratedImages((prev) => - prev.map((img) => - img.id === imageId ? { ...img, isFavorite: true } : img - ) - ); - toast.success("Added to favorites!"); - }, - onError: (error) => { - console.error("Failed to toggle favorite:", error); - toast.error("Failed to add to favorites"); - }, - }); - }; - - const handleGenerateVariation = async (image: GeneratedImage) => { - setError(""); - - createVariationMutation.mutate( - { imageId: image.id }, - { - onSuccess: (data) => { - setGeneratedImages((prev) => [data.image, ...prev]); - setGenerationTime(data.generationTime || null); - toast.success("Variation created!", { - description: "New variation added to your gallery", - }); - }, - onError: (err) => { - console.error("Failed to generate variation:", err); - const errorMessage = - err instanceof Error - ? err.message - : "Failed to generate variation. Please try again."; - setError(errorMessage); - toast.error("Variation failed", { - description: errorMessage, - }); - }, - } - ); - }; - - // Load prompt from URL parameters and handle variation/remix/upscale - useEffect(() => { - const urlPrompt = searchParams.get("prompt"); - const urlNegativePrompt = searchParams.get("negativePrompt"); - const urlModel = searchParams.get("model"); - const variationFrom = searchParams.get("variationFrom"); - const remixFrom = searchParams.get("remixFrom"); - const upscaleFrom = searchParams.get("upscaleFrom"); - - if (urlPrompt) setPrompt(urlPrompt); - if (urlNegativePrompt) setNegativePrompt(urlNegativePrompt); - if (urlModel) { - // Validate that it's a valid model ID - const validModels = MODELS.map((m) => m.id); - if (validModels.includes(urlModel)) { - setModel(urlModel); - } - } - - // Handle variation from existing image - if (variationFrom) { - loadImageForVariation(variationFrom); - } - - // Handle remix from existing image - if (remixFrom) { - loadImageForRemix(remixFrom); - } - - // Handle upscale from existing image - if (upscaleFrom) { - loadImageForUpscale(upscaleFrom); - } - }, [searchParams]); - - // Auto-start generation when variation is loaded - useEffect(() => { - const autoStart = searchParams.get("autoStart"); - - if (autoStart === "true" && prompt && !autoStartTriggeredRef.current) { - autoStartTriggeredRef.current = true; - // Small delay to ensure all state is set - setTimeout(() => { - handleGenerate(); - // Clean up URL to remove query parameters after starting the job - router.replace("/generate/canvas", { scroll: false }); - }, 100); - } - }, [prompt, searchParams, router]); - - const loadImageForVariation = async (imageId: string) => { - try { - const response = await fetch(`/api/images/${imageId}`); - if (response.ok) { - const data = await response.json(); - const image = data.image; - - // Populate form with parent image settings - setPrompt(image.prompt || ""); - setNegativePrompt(image.negativePrompt || ""); - setModel(image.modelId || "flux-pro"); - if (image.parameters) { - if (image.parameters.aspectRatio) - setAspectRatio(image.parameters.aspectRatio); - if (image.parameters.steps) setSteps(image.parameters.steps); - if (image.parameters.guidance) setGuidance(image.parameters.guidance); - // Generate a new seed for variation (slightly different from parent) - if (image.parameters.seed) { - const parentSeed = parseInt(image.parameters.seed) || 0; - const newSeed = parentSeed + Math.floor(Math.random() * 1000) + 1; - setSeed(newSeed.toString()); - } - } - toast.success("Variation settings loaded!", { - description: "Seed has been adjusted. Click Generate to create variation.", - }); - } - } catch (error) { - console.error("Failed to load image for variation:", error); - setError("Failed to load parent image"); - } - }; - - const loadImageForRemix = async (imageId: string) => { - try { - const response = await fetch(`/api/images/${imageId}`); - if (response.ok) { - const data = await response.json(); - const image = data.image; - - // Populate form with parent image settings but clear prompt for remix - setPrompt(""); // User will enter new prompt - setNegativePrompt(image.negativePrompt || ""); - setModel(image.modelId || "flux-pro"); - if (image.parameters) { - if (image.parameters.aspectRatio) - setAspectRatio(image.parameters.aspectRatio); - if (image.parameters.steps) setSteps(image.parameters.steps); - if (image.parameters.guidance) setGuidance(image.parameters.guidance); - if (image.parameters.seed) setSeed(image.parameters.seed); - } - - // Set to image-to-image mode and use the original image as the source - setGenerationMode("image-to-image"); - setSourceImage(image.fileUrl); - setSourceImageFile(null); - - toast.success("Remix settings loaded!", { - description: "Enter a new prompt and click Generate to remix with the original image.", - }); - } - } catch (error) { - console.error("Failed to load image for remix:", error); - setError("Failed to load parent image"); - } - }; - - const loadImageForUpscale = async (imageId: string) => { - try { - const response = await fetch(`/api/images/${imageId}`); - if (response.ok) { - const data = await response.json(); - toast.info("Upscaling feature coming soon!", { - description: "For now, you can download and use external upscaling tools.", - }); - } - } catch (error) { - console.error("Failed to load image for upscale:", error); - setError("Failed to load parent image"); - } - }; - - // Process job status updates from the hook - useEffect(() => { - if (!jobStatusData?.jobs) { - return; - } - - const updatedJobs = jobStatusData.jobs; - - setJobs((prevJobs) => - prevJobs.map((job) => { - const update = updatedJobs.find((u: any) => u.id === job.id); - if (update) { - // If job just completed, show toast notification - if ( - job.status !== "completed" && - update.status === "completed" && - !processedJobsRef.current.has(job.id) - ) { - // Mark this job as processed - processedJobsRef.current.add(job.id); - - toast.success( - `Generation completed for "${update.prompt.substring( - 0, - 30 - )}..."`, - { - description: `Generated ${ - update.images?.length || 0 - } image${update.images?.length !== 1 ? "s" : ""}`, - } - ); - - // Add completed images to generatedImages - if (update.images && update.images.length > 0) { - setGeneratedImages((prev) => [...update.images, ...prev]); - } - - // Refresh history and billing - queryClient.invalidateQueries({ queryKey: ["images"] }); - queryClient.invalidateQueries({ queryKey: ["billing"] }); - } else if ( - job.status !== "failed" && - update.status === "failed" && - !processedJobsRef.current.has(job.id) - ) { - // Mark this job as processed - processedJobsRef.current.add(job.id); - - toast.error( - `Generation failed for "${update.prompt.substring( - 0, - 30 - )}..."`, - { - description: - update.errorMessage || "Unknown error occurred", - } - ); - } - - return { - ...job, - ...update, - }; - } - return job; - }) - ); - }, [jobStatusData]); - - - const handleSavePreset = async (e: React.FormEvent) => { - e.preventDefault(); - - createPresetMutation.mutate( - { - name: savePresetData.name, - description: savePresetData.description || null, - modelId: model, - parameters: { - model, - width: 0, - height: 0, - steps, - guidanceScale: guidance, - aspectRatio, - numImages, - seed: seed || null, - }, - }, - { - onSuccess: () => { - setShowSavePresetModal(false); - setSavePresetData({ name: "", description: "" }); - setError(""); - toast.success("Preset saved!", { - description: `"${savePresetData.name}" is ready to use`, - }); - }, - onError: (err) => { - const errorMessage = - err instanceof Error ? err.message : "Failed to save preset"; - setError(errorMessage); - toast.error("Failed to save preset", { - description: errorMessage, - }); - }, - } - ); - }; - - const handleLoadPreset = (presetId: string) => { - const preset = presets.find((p) => p.id === presetId); - if (preset) { - setModel(preset.modelId); - const params = preset.parameters || {}; - setAspectRatio(params.aspectRatio || "landscape_16_9"); - setNumImages(params.numImages || 1); - setSteps(params.steps || 28); - setGuidance(params.guidance || 3.5); - setSeed(params.seed || ""); - } - }; - - const handleSavePrompt = async (e: React.FormEvent) => { - e.preventDefault(); - - createPromptMutation.mutate( - { - text: prompt, - name: savePromptData.title, - category: savePromptData.category || undefined, - tags: [], - }, - { - onSuccess: () => { - setShowSaveModal(false); - setSavePromptData({ title: "", category: "" }); - setError(""); - toast.success("Prompt saved!", { - description: `"${savePromptData.title}" has been saved to your library`, - }); - }, - onError: (err) => { - const errorMessage = - err instanceof Error ? err.message : "Failed to save prompt"; - setError(errorMessage); - toast.error("Failed to save prompt", { - description: errorMessage, - }); - }, - } - ); - }; - - const handleLoadPrompt = (promptItem: any) => { - // Check if this is a template with variables - if (promptItem.isTemplate && promptItem.templateVariables?.length > 0) { - // Open modal to fill in variables - setSelectedTemplate(promptItem); - // Initialize empty values for all variables - const initialValues: Record = {}; - promptItem.templateVariables.forEach((varName: string) => { - initialValues[varName] = ""; - }); - setTemplateVariableValues(initialValues); - setShowLoadPromptModal(false); - setShowTemplateVariablesModal(true); - } else { - // Regular prompt, just load it - setPrompt(promptItem.promptText); - setNegativePrompt(promptItem.negativePrompt || ""); - setShowLoadPromptModal(false); - } - }; - - const handleApplyTemplate = () => { - if (!selectedTemplate) return; - - // Replace variables in the template - const filledPrompt = replaceVariables( - selectedTemplate.promptText, - templateVariableValues - ); - setPrompt(filledPrompt); - - if (selectedTemplate.negativePrompt) { - const filledNegativePrompt = replaceVariables( - selectedTemplate.negativePrompt, - templateVariableValues - ); - setNegativePrompt(filledNegativePrompt); - } - - // Close modal and reset - setShowTemplateVariablesModal(false); - setSelectedTemplate(null); - setTemplateVariableValues({}); - }; - - const handleHistoryItemClick = (image: GeneratedImage) => { - router.push(`/generate/images/${image.id}?returnTo=/generate/canvas`); - }; - - const handleGenerate = async () => { - if (!session?.user) { - setError("Please sign in to generate images"); - return; - } - if (!prompt) return; - - // Check for source image in img2img mode - if (generationMode === "image-to-image" && !sourceImage) { - setError("Please select or upload a source image"); - return; - } - - // Check for source image in image-to-video mode - if (generationMode === "image-to-video" && !sourceImage) { - setError("Please select or upload a source image for video generation"); - return; - } - - setError(""); - - // Create optimistic job ID - const optimisticJobId = `optimistic-${Date.now()}`; - - // Create optimistic job immediately for instant UI feedback - const optimisticJob: GenerationJob = { - id: optimisticJobId, - status: "pending", - prompt, - modelId: model, - parameters: { - prompt, - negativePrompt: negativePrompt || undefined, - model, - aspectRatio, - numImages, - steps: steps || undefined, - guidance: guidance || undefined, - seed: seed ? parseInt(seed) : undefined, - generationMode, - }, - createdAt: new Date(), - }; - - // Add optimistic job to the queue immediately - setJobs((prev) => [optimisticJob, ...prev]); - - // Show toast immediately - toast.info(`Generation started for "${prompt.substring(0, 30)}..."`, { - description: `Job queued. You can continue working while it processes.`, - }); - - try { - let imageUrl = sourceImage; - - // If using uploaded file, first upload it - if ((generationMode === "image-to-image" || generationMode === "image-to-video") && sourceImageFile) { - const uploadData = await uploadMutation.mutateAsync(sourceImageFile); - imageUrl = uploadData.url; - } - - const requestBody: any = { - prompt, - negativePrompt: negativePrompt || undefined, - model, - aspectRatio, - numImages, - steps: steps || undefined, - guidance: guidance || undefined, - seed: seed ? parseInt(seed) : undefined, - generationMode, - }; - - // Add img2img specific parameters - if (generationMode === "image-to-image") { - requestBody.imageUrl = imageUrl; - requestBody.strength = strength; - } - - // Add video specific parameters for image-to-video mode - if (generationMode === "image-to-video") { - requestBody.imageUrl = imageUrl; - requestBody.resolution = resolution; - requestBody.duration = duration; - } - - // Submit job using mutation - const data = await submitJobMutation.mutateAsync(requestBody); - - // Replace optimistic job with real job data - setJobs((prev) => - prev.map((job) => - job.id === optimisticJobId - ? { - ...job, - id: data.jobId, - status: data.status, - parameters: requestBody, - } - : job - ) - ); - } catch (err: any) { - // Remove optimistic job on error - setJobs((prev) => prev.filter((job) => job.id !== optimisticJobId)); - - const errorMessage = err.message || "Failed to start generation"; - setError(errorMessage); - toast.error("Generation failed to start", { - description: errorMessage, - }); - } - }; - - return ( - <> - - -
- {/* MAIN CONTENT (Canvas/Gallery) */} -
- {/* Page Header */} -
-
-

- Canvas -

-
- -
- {/* Mobile Toggle for Right Sidebar */} - -
-
- - {/* Scrollable Area */} - -
- - {/* 3. RIGHT SIDEBAR (Controls) */} - {/* Mobile Overlay */} - {mobileSidebarOpen && ( -
setMobileSidebarOpen(false)} - >
- )} - -