mirror of
https://github.com/AutoMaker-Org/automaker.git
synced 2026-02-02 20:43:36 +00:00
Merge branch 'main' of github.com:webdevcody/automaker
This commit is contained in:
119
app/electron/services/claude-cli-detector.js
Normal file
119
app/electron/services/claude-cli-detector.js
Normal file
@@ -0,0 +1,119 @@
|
||||
const { execSync } = require('child_process');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const os = require('os');
|
||||
|
||||
class ClaudeCliDetector {
|
||||
/**
|
||||
* Check if Claude Code CLI is installed and accessible
|
||||
* @returns {Object} { installed: boolean, path: string|null, version: string|null, method: 'cli'|'sdk'|'none' }
|
||||
*/
|
||||
static detectClaudeInstallation() {
|
||||
try {
|
||||
// Method 1: Check if 'claude' command is in PATH
|
||||
try {
|
||||
const claudePath = execSync('which claude', { encoding: 'utf-8' }).trim();
|
||||
const version = execSync('claude --version', { encoding: 'utf-8' }).trim();
|
||||
return {
|
||||
installed: true,
|
||||
path: claudePath,
|
||||
version: version,
|
||||
method: 'cli'
|
||||
};
|
||||
} catch (error) {
|
||||
// CLI not in PATH, check local installation
|
||||
}
|
||||
|
||||
// Method 2: Check for local installation
|
||||
const localClaudePath = path.join(os.homedir(), '.claude', 'local', 'claude');
|
||||
if (fs.existsSync(localClaudePath)) {
|
||||
try {
|
||||
const version = execSync(`${localClaudePath} --version`, { encoding: 'utf-8' }).trim();
|
||||
return {
|
||||
installed: true,
|
||||
path: localClaudePath,
|
||||
version: version,
|
||||
method: 'cli-local'
|
||||
};
|
||||
} catch (error) {
|
||||
// Local CLI exists but may not be executable
|
||||
}
|
||||
}
|
||||
|
||||
// Method 3: Check Windows path
|
||||
if (process.platform === 'win32') {
|
||||
try {
|
||||
const claudePath = execSync('where claude', { encoding: 'utf-8' }).trim();
|
||||
const version = execSync('claude --version', { encoding: 'utf-8' }).trim();
|
||||
return {
|
||||
installed: true,
|
||||
path: claudePath,
|
||||
version: version,
|
||||
method: 'cli'
|
||||
};
|
||||
} catch (error) {
|
||||
// Not found
|
||||
}
|
||||
}
|
||||
|
||||
// Method 4: SDK mode (using OAuth token)
|
||||
if (process.env.CLAUDE_CODE_OAUTH_TOKEN) {
|
||||
return {
|
||||
installed: true,
|
||||
path: null,
|
||||
version: 'SDK Mode',
|
||||
method: 'sdk'
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
installed: false,
|
||||
path: null,
|
||||
version: null,
|
||||
method: 'none'
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('[ClaudeCliDetector] Error detecting Claude installation:', error);
|
||||
return {
|
||||
installed: false,
|
||||
path: null,
|
||||
version: null,
|
||||
method: 'none',
|
||||
error: error.message
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get installation recommendations
|
||||
*/
|
||||
static getInstallationInfo() {
|
||||
const detection = this.detectClaudeInstallation();
|
||||
|
||||
if (detection.installed) {
|
||||
return {
|
||||
status: 'installed',
|
||||
method: detection.method,
|
||||
version: detection.version,
|
||||
path: detection.path,
|
||||
recommendation: detection.method === 'cli'
|
||||
? 'Using Claude Code CLI - optimal for long-running tasks'
|
||||
: 'Using SDK mode - works well but CLI may provide better performance'
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
status: 'not_installed',
|
||||
recommendation: 'Consider installing Claude Code CLI for better performance with ultrathink',
|
||||
installCommands: {
|
||||
macos: 'curl -fsSL claude.ai/install.sh | bash',
|
||||
windows: 'irm https://claude.ai/install.ps1 | iex',
|
||||
linux: 'curl -fsSL claude.ai/install.sh | bash',
|
||||
npm: 'npm install -g @anthropic-ai/claude-code'
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = ClaudeCliDetector;
|
||||
|
||||
229
app/electron/services/codex-cli-detector.js
Normal file
229
app/electron/services/codex-cli-detector.js
Normal file
@@ -0,0 +1,229 @@
|
||||
const { execSync } = require('child_process');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const os = require('os');
|
||||
|
||||
/**
|
||||
* Codex CLI Detector - Checks if OpenAI Codex CLI is installed
|
||||
*
|
||||
* Codex CLI is OpenAI's agent CLI tool that allows users to use
|
||||
* GPT-5.1 Codex models (gpt-5.1-codex-max, gpt-5.1-codex, etc.)
|
||||
* for code generation and agentic tasks.
|
||||
*/
|
||||
class CodexCliDetector {
|
||||
/**
|
||||
* Check if Codex CLI is installed and accessible
|
||||
* @returns {Object} { installed: boolean, path: string|null, version: string|null, method: 'cli'|'npm'|'brew'|'none' }
|
||||
*/
|
||||
static detectCodexInstallation() {
|
||||
try {
|
||||
// Method 1: Check if 'codex' command is in PATH
|
||||
try {
|
||||
const codexPath = execSync('which codex 2>/dev/null', { encoding: 'utf-8' }).trim();
|
||||
if (codexPath) {
|
||||
const version = this.getCodexVersion(codexPath);
|
||||
return {
|
||||
installed: true,
|
||||
path: codexPath,
|
||||
version: version,
|
||||
method: 'cli'
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
// CLI not in PATH, continue checking other methods
|
||||
}
|
||||
|
||||
// Method 2: Check for npm global installation
|
||||
try {
|
||||
const npmListOutput = execSync('npm list -g @openai/codex --depth=0 2>/dev/null', { encoding: 'utf-8' });
|
||||
if (npmListOutput && npmListOutput.includes('@openai/codex')) {
|
||||
// Get the path from npm bin
|
||||
const npmBinPath = execSync('npm bin -g', { encoding: 'utf-8' }).trim();
|
||||
const codexPath = path.join(npmBinPath, 'codex');
|
||||
const version = this.getCodexVersion(codexPath);
|
||||
return {
|
||||
installed: true,
|
||||
path: codexPath,
|
||||
version: version,
|
||||
method: 'npm'
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
// npm global not found
|
||||
}
|
||||
|
||||
// Method 3: Check for Homebrew installation on macOS
|
||||
if (process.platform === 'darwin') {
|
||||
try {
|
||||
const brewList = execSync('brew list --formula 2>/dev/null', { encoding: 'utf-8' });
|
||||
if (brewList.includes('codex')) {
|
||||
const brewPrefixOutput = execSync('brew --prefix codex 2>/dev/null', { encoding: 'utf-8' }).trim();
|
||||
const codexPath = path.join(brewPrefixOutput, 'bin', 'codex');
|
||||
const version = this.getCodexVersion(codexPath);
|
||||
return {
|
||||
installed: true,
|
||||
path: codexPath,
|
||||
version: version,
|
||||
method: 'brew'
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
// Homebrew not found or codex not installed via brew
|
||||
}
|
||||
}
|
||||
|
||||
// Method 4: Check Windows path
|
||||
if (process.platform === 'win32') {
|
||||
try {
|
||||
const codexPath = execSync('where codex 2>nul', { encoding: 'utf-8' }).trim().split('\n')[0];
|
||||
if (codexPath) {
|
||||
const version = this.getCodexVersion(codexPath);
|
||||
return {
|
||||
installed: true,
|
||||
path: codexPath,
|
||||
version: version,
|
||||
method: 'cli'
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
// Not found on Windows
|
||||
}
|
||||
}
|
||||
|
||||
// Method 5: Check common installation paths
|
||||
const commonPaths = [
|
||||
path.join(os.homedir(), '.local', 'bin', 'codex'),
|
||||
path.join(os.homedir(), '.npm-global', 'bin', 'codex'),
|
||||
'/usr/local/bin/codex',
|
||||
'/opt/homebrew/bin/codex',
|
||||
];
|
||||
|
||||
for (const checkPath of commonPaths) {
|
||||
if (fs.existsSync(checkPath)) {
|
||||
const version = this.getCodexVersion(checkPath);
|
||||
return {
|
||||
installed: true,
|
||||
path: checkPath,
|
||||
version: version,
|
||||
method: 'cli'
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Method 6: Check if OPENAI_API_KEY is set (can use Codex API directly)
|
||||
if (process.env.OPENAI_API_KEY) {
|
||||
return {
|
||||
installed: false,
|
||||
path: null,
|
||||
version: null,
|
||||
method: 'api-key-only',
|
||||
hasApiKey: true
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
installed: false,
|
||||
path: null,
|
||||
version: null,
|
||||
method: 'none'
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('[CodexCliDetector] Error detecting Codex installation:', error);
|
||||
return {
|
||||
installed: false,
|
||||
path: null,
|
||||
version: null,
|
||||
method: 'none',
|
||||
error: error.message
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get Codex CLI version from executable path
|
||||
* @param {string} codexPath Path to codex executable
|
||||
* @returns {string|null} Version string or null
|
||||
*/
|
||||
static getCodexVersion(codexPath) {
|
||||
try {
|
||||
const version = execSync(`"${codexPath}" --version 2>/dev/null`, { encoding: 'utf-8' }).trim();
|
||||
return version || null;
|
||||
} catch (error) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get installation info and recommendations
|
||||
* @returns {Object} Installation status and recommendations
|
||||
*/
|
||||
static getInstallationInfo() {
|
||||
const detection = this.detectCodexInstallation();
|
||||
|
||||
if (detection.installed) {
|
||||
return {
|
||||
status: 'installed',
|
||||
method: detection.method,
|
||||
version: detection.version,
|
||||
path: detection.path,
|
||||
recommendation: detection.method === 'cli'
|
||||
? 'Using Codex CLI - ready for GPT-5.1 Codex models'
|
||||
: `Using Codex CLI via ${detection.method} - ready for GPT-5.1 Codex models`
|
||||
};
|
||||
}
|
||||
|
||||
// Not installed but has API key
|
||||
if (detection.method === 'api-key-only') {
|
||||
return {
|
||||
status: 'api_key_only',
|
||||
method: 'api-key-only',
|
||||
recommendation: 'OPENAI_API_KEY detected but Codex CLI not installed. Install Codex CLI for full agentic capabilities.',
|
||||
installCommands: this.getInstallCommands()
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
status: 'not_installed',
|
||||
recommendation: 'Install OpenAI Codex CLI to use GPT-5.1 Codex models for agentic tasks',
|
||||
installCommands: this.getInstallCommands()
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get installation commands for different platforms
|
||||
* @returns {Object} Installation commands by platform
|
||||
*/
|
||||
static getInstallCommands() {
|
||||
return {
|
||||
npm: 'npm install -g @openai/codex@latest',
|
||||
macos: 'brew install codex',
|
||||
linux: 'npm install -g @openai/codex@latest',
|
||||
windows: 'npm install -g @openai/codex@latest'
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if Codex CLI supports a specific model
|
||||
* @param {string} model Model name to check
|
||||
* @returns {boolean} Whether the model is supported
|
||||
*/
|
||||
static isModelSupported(model) {
|
||||
const supportedModels = [
|
||||
'gpt-5.1-codex-max',
|
||||
'gpt-5.1-codex',
|
||||
'gpt-5.1-codex-mini',
|
||||
'gpt-5.1'
|
||||
];
|
||||
return supportedModels.includes(model);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get default model for Codex CLI
|
||||
* @returns {string} Default model name
|
||||
*/
|
||||
static getDefaultModel() {
|
||||
return 'gpt-5.1-codex-max';
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = CodexCliDetector;
|
||||
351
app/electron/services/codex-config-manager.js
Normal file
351
app/electron/services/codex-config-manager.js
Normal file
@@ -0,0 +1,351 @@
|
||||
/**
|
||||
* Codex TOML Configuration Manager
|
||||
*
|
||||
* Manages Codex CLI's TOML configuration file to add/update MCP server settings.
|
||||
* Codex CLI looks for config at:
|
||||
* - ~/.codex/config.toml (user-level)
|
||||
* - .codex/config.toml (project-level, takes precedence)
|
||||
*/
|
||||
|
||||
const fs = require('fs/promises');
|
||||
const path = require('path');
|
||||
const os = require('os');
|
||||
|
||||
class CodexConfigManager {
|
||||
constructor() {
|
||||
this.userConfigPath = path.join(os.homedir(), '.codex', 'config.toml');
|
||||
this.projectConfigPath = null; // Will be set per project
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the project path for project-level config
|
||||
*/
|
||||
setProjectPath(projectPath) {
|
||||
this.projectConfigPath = path.join(projectPath, '.codex', 'config.toml');
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the effective config path (project-level if exists, otherwise user-level)
|
||||
*/
|
||||
async getConfigPath() {
|
||||
if (this.projectConfigPath) {
|
||||
try {
|
||||
await fs.access(this.projectConfigPath);
|
||||
return this.projectConfigPath;
|
||||
} catch (e) {
|
||||
// Project config doesn't exist, fall back to user config
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure user config directory exists
|
||||
const userConfigDir = path.dirname(this.userConfigPath);
|
||||
try {
|
||||
await fs.mkdir(userConfigDir, { recursive: true });
|
||||
} catch (e) {
|
||||
// Directory might already exist
|
||||
}
|
||||
|
||||
return this.userConfigPath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read existing TOML config (simple parser for our needs)
|
||||
*/
|
||||
async readConfig(configPath) {
|
||||
try {
|
||||
const content = await fs.readFile(configPath, 'utf-8');
|
||||
return this.parseToml(content);
|
||||
} catch (e) {
|
||||
if (e.code === 'ENOENT') {
|
||||
return {};
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple TOML parser for our specific use case
|
||||
* This is a minimal parser that handles the MCP server config structure
|
||||
*/
|
||||
parseToml(content) {
|
||||
const config = {};
|
||||
let currentSection = null;
|
||||
let currentSubsection = null;
|
||||
|
||||
const lines = content.split('\n');
|
||||
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim();
|
||||
|
||||
// Skip comments and empty lines
|
||||
if (!trimmed || trimmed.startsWith('#')) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Section header: [section]
|
||||
const sectionMatch = trimmed.match(/^\[([^\]]+)\]$/);
|
||||
if (sectionMatch) {
|
||||
const sectionName = sectionMatch[1];
|
||||
const parts = sectionName.split('.');
|
||||
|
||||
if (parts.length === 1) {
|
||||
currentSection = parts[0];
|
||||
currentSubsection = null;
|
||||
if (!config[currentSection]) {
|
||||
config[currentSection] = {};
|
||||
}
|
||||
} else if (parts.length === 2) {
|
||||
currentSection = parts[0];
|
||||
currentSubsection = parts[1];
|
||||
if (!config[currentSection]) {
|
||||
config[currentSection] = {};
|
||||
}
|
||||
if (!config[currentSection][currentSubsection]) {
|
||||
config[currentSection][currentSubsection] = {};
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Key-value pair: key = value
|
||||
const kvMatch = trimmed.match(/^([^=]+)=(.+)$/);
|
||||
if (kvMatch) {
|
||||
const key = kvMatch[1].trim();
|
||||
let value = kvMatch[2].trim();
|
||||
|
||||
// Remove quotes if present
|
||||
if ((value.startsWith('"') && value.endsWith('"')) ||
|
||||
(value.startsWith("'") && value.endsWith("'"))) {
|
||||
value = value.slice(1, -1);
|
||||
}
|
||||
|
||||
// Parse boolean
|
||||
if (value === 'true') value = true;
|
||||
else if (value === 'false') value = false;
|
||||
// Parse number
|
||||
else if (/^-?\d+$/.test(value)) value = parseInt(value, 10);
|
||||
else if (/^-?\d+\.\d+$/.test(value)) value = parseFloat(value);
|
||||
|
||||
if (currentSubsection) {
|
||||
if (!config[currentSection][currentSubsection]) {
|
||||
config[currentSection][currentSubsection] = {};
|
||||
}
|
||||
config[currentSection][currentSubsection][key] = value;
|
||||
} else if (currentSection) {
|
||||
if (!config[currentSection]) {
|
||||
config[currentSection] = {};
|
||||
}
|
||||
config[currentSection][key] = value;
|
||||
} else {
|
||||
config[key] = value;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return config;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert config object back to TOML format
|
||||
*/
|
||||
stringifyToml(config, indent = 0) {
|
||||
const indentStr = ' '.repeat(indent);
|
||||
let result = '';
|
||||
|
||||
for (const [key, value] of Object.entries(config)) {
|
||||
if (typeof value === 'object' && value !== null && !Array.isArray(value)) {
|
||||
// Section
|
||||
result += `${indentStr}[${key}]\n`;
|
||||
result += this.stringifyToml(value, indent);
|
||||
} else {
|
||||
// Key-value
|
||||
let valueStr = value;
|
||||
if (typeof value === 'string') {
|
||||
// Escape quotes and wrap in quotes if needed
|
||||
if (value.includes('"') || value.includes("'") || value.includes(' ')) {
|
||||
valueStr = `"${value.replace(/"/g, '\\"')}"`;
|
||||
}
|
||||
} else if (typeof value === 'boolean') {
|
||||
valueStr = value.toString();
|
||||
}
|
||||
result += `${indentStr}${key} = ${valueStr}\n`;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Configure the automaker-tools MCP server
|
||||
*/
|
||||
async configureMcpServer(projectPath, mcpServerScriptPath) {
|
||||
this.setProjectPath(projectPath);
|
||||
const configPath = await this.getConfigPath();
|
||||
|
||||
// Read existing config
|
||||
const config = await this.readConfig(configPath);
|
||||
|
||||
// Ensure mcp_servers section exists
|
||||
if (!config.mcp_servers) {
|
||||
config.mcp_servers = {};
|
||||
}
|
||||
|
||||
// Configure automaker-tools server
|
||||
config.mcp_servers['automaker-tools'] = {
|
||||
command: 'node',
|
||||
args: [mcpServerScriptPath],
|
||||
env: {
|
||||
AUTOMAKER_PROJECT_PATH: projectPath
|
||||
},
|
||||
startup_timeout_sec: 10,
|
||||
tool_timeout_sec: 60,
|
||||
enabled_tools: ['UpdateFeatureStatus']
|
||||
};
|
||||
|
||||
// Ensure experimental_use_rmcp_client is enabled (if needed)
|
||||
if (!config.experimental_use_rmcp_client) {
|
||||
config.experimental_use_rmcp_client = true;
|
||||
}
|
||||
|
||||
// Write config back
|
||||
await this.writeConfig(configPath, config);
|
||||
|
||||
console.log(`[CodexConfigManager] Configured automaker-tools MCP server in ${configPath}`);
|
||||
return configPath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write config to TOML file
|
||||
*/
|
||||
async writeConfig(configPath, config) {
|
||||
let content = '';
|
||||
|
||||
// Write top-level keys first (preserve existing non-MCP config)
|
||||
for (const [key, value] of Object.entries(config)) {
|
||||
if (key === 'mcp_servers' || key === 'experimental_use_rmcp_client') {
|
||||
continue; // Handle these separately
|
||||
}
|
||||
if (typeof value !== 'object') {
|
||||
content += `${key} = ${this.formatValue(value)}\n`;
|
||||
}
|
||||
}
|
||||
|
||||
// Write experimental flag if enabled
|
||||
if (config.experimental_use_rmcp_client) {
|
||||
if (content && !content.endsWith('\n\n')) {
|
||||
content += '\n';
|
||||
}
|
||||
content += `experimental_use_rmcp_client = true\n`;
|
||||
}
|
||||
|
||||
// Write mcp_servers section
|
||||
if (config.mcp_servers && Object.keys(config.mcp_servers).length > 0) {
|
||||
if (content && !content.endsWith('\n\n')) {
|
||||
content += '\n';
|
||||
}
|
||||
|
||||
for (const [serverName, serverConfig] of Object.entries(config.mcp_servers)) {
|
||||
content += `\n[mcp_servers.${serverName}]\n`;
|
||||
|
||||
// Write command first
|
||||
if (serverConfig.command) {
|
||||
content += `command = "${this.escapeTomlString(serverConfig.command)}"\n`;
|
||||
}
|
||||
|
||||
// Write args
|
||||
if (serverConfig.args && Array.isArray(serverConfig.args)) {
|
||||
const argsStr = serverConfig.args.map(a => `"${this.escapeTomlString(a)}"`).join(', ');
|
||||
content += `args = [${argsStr}]\n`;
|
||||
}
|
||||
|
||||
// Write timeouts (must be before env subsection)
|
||||
if (serverConfig.startup_timeout_sec !== undefined) {
|
||||
content += `startup_timeout_sec = ${serverConfig.startup_timeout_sec}\n`;
|
||||
}
|
||||
|
||||
if (serverConfig.tool_timeout_sec !== undefined) {
|
||||
content += `tool_timeout_sec = ${serverConfig.tool_timeout_sec}\n`;
|
||||
}
|
||||
|
||||
// Write enabled_tools (must be before env subsection - at server level, not env level)
|
||||
if (serverConfig.enabled_tools && Array.isArray(serverConfig.enabled_tools)) {
|
||||
const toolsStr = serverConfig.enabled_tools.map(t => `"${this.escapeTomlString(t)}"`).join(', ');
|
||||
content += `enabled_tools = [${toolsStr}]\n`;
|
||||
}
|
||||
|
||||
// Write env section last (as a separate subsection)
|
||||
// IMPORTANT: In TOML, once we start [mcp_servers.server_name.env],
|
||||
// everything after belongs to that subsection until a new section starts
|
||||
if (serverConfig.env && typeof serverConfig.env === 'object' && Object.keys(serverConfig.env).length > 0) {
|
||||
content += `\n[mcp_servers.${serverName}.env]\n`;
|
||||
for (const [envKey, envValue] of Object.entries(serverConfig.env)) {
|
||||
content += `${envKey} = "${this.escapeTomlString(String(envValue))}"\n`;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure directory exists
|
||||
const configDir = path.dirname(configPath);
|
||||
await fs.mkdir(configDir, { recursive: true });
|
||||
|
||||
// Write file
|
||||
await fs.writeFile(configPath, content, 'utf-8');
|
||||
}
|
||||
|
||||
/**
|
||||
* Escape special characters in TOML strings
|
||||
*/
|
||||
escapeTomlString(str) {
|
||||
return str
|
||||
.replace(/\\/g, '\\\\')
|
||||
.replace(/"/g, '\\"')
|
||||
.replace(/\n/g, '\\n')
|
||||
.replace(/\r/g, '\\r')
|
||||
.replace(/\t/g, '\\t');
|
||||
}
|
||||
|
||||
/**
|
||||
* Format a value for TOML output
|
||||
*/
|
||||
formatValue(value) {
|
||||
if (typeof value === 'string') {
|
||||
// Escape quotes
|
||||
const escaped = value.replace(/\\/g, '\\\\').replace(/"/g, '\\"');
|
||||
return `"${escaped}"`;
|
||||
} else if (typeof value === 'boolean') {
|
||||
return value.toString();
|
||||
} else if (typeof value === 'number') {
|
||||
return value.toString();
|
||||
}
|
||||
return `"${String(value)}"`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove automaker-tools MCP server configuration
|
||||
*/
|
||||
async removeMcpServer(projectPath) {
|
||||
this.setProjectPath(projectPath);
|
||||
const configPath = await this.getConfigPath();
|
||||
|
||||
try {
|
||||
const config = await this.readConfig(configPath);
|
||||
|
||||
if (config.mcp_servers && config.mcp_servers['automaker-tools']) {
|
||||
delete config.mcp_servers['automaker-tools'];
|
||||
|
||||
// If no more MCP servers, remove the section
|
||||
if (Object.keys(config.mcp_servers).length === 0) {
|
||||
delete config.mcp_servers;
|
||||
}
|
||||
|
||||
await this.writeConfig(configPath, config);
|
||||
console.log(`[CodexConfigManager] Removed automaker-tools MCP server from ${configPath}`);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error(`[CodexConfigManager] Error removing MCP server config:`, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = new CodexConfigManager();
|
||||
610
app/electron/services/codex-executor.js
Normal file
610
app/electron/services/codex-executor.js
Normal file
@@ -0,0 +1,610 @@
|
||||
/**
|
||||
* Codex CLI Execution Wrapper
|
||||
*
|
||||
* This module handles spawning and managing Codex CLI processes
|
||||
* for executing OpenAI model queries.
|
||||
*/
|
||||
|
||||
const { spawn } = require('child_process');
|
||||
const { EventEmitter } = require('events');
|
||||
const readline = require('readline');
|
||||
const path = require('path');
|
||||
const CodexCliDetector = require('./codex-cli-detector');
|
||||
const codexConfigManager = require('./codex-config-manager');
|
||||
|
||||
/**
|
||||
* Message types from Codex CLI JSON output
|
||||
*/
|
||||
const CODEX_EVENT_TYPES = {
|
||||
THREAD_STARTED: 'thread.started',
|
||||
ITEM_STARTED: 'item.started',
|
||||
ITEM_COMPLETED: 'item.completed',
|
||||
THREAD_COMPLETED: 'thread.completed',
|
||||
ERROR: 'error'
|
||||
};
|
||||
|
||||
/**
|
||||
* Codex Executor - Manages Codex CLI process execution
|
||||
*/
|
||||
class CodexExecutor extends EventEmitter {
|
||||
constructor() {
|
||||
super();
|
||||
this.currentProcess = null;
|
||||
this.codexPath = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find and cache the Codex CLI path
|
||||
* @returns {string|null} Path to codex executable
|
||||
*/
|
||||
findCodexPath() {
|
||||
if (this.codexPath) {
|
||||
return this.codexPath;
|
||||
}
|
||||
|
||||
const installation = CodexCliDetector.detectCodexInstallation();
|
||||
if (installation.installed && installation.path) {
|
||||
this.codexPath = installation.path;
|
||||
return this.codexPath;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a Codex CLI query
|
||||
* @param {Object} options Execution options
|
||||
* @param {string} options.prompt The prompt to execute
|
||||
* @param {string} options.model Model to use (default: gpt-5.1-codex-max)
|
||||
* @param {string} options.cwd Working directory
|
||||
* @param {string} options.systemPrompt System prompt (optional, will be prepended to prompt)
|
||||
* @param {number} options.maxTurns Not used - Codex CLI doesn't support this parameter
|
||||
* @param {string[]} options.allowedTools Not used - Codex CLI doesn't support this parameter
|
||||
* @param {Object} options.env Environment variables
|
||||
* @param {Object} options.mcpServers MCP servers configuration (for configuring Codex TOML)
|
||||
* @returns {AsyncGenerator} Generator yielding messages
|
||||
*/
|
||||
async *execute(options) {
|
||||
const {
|
||||
prompt,
|
||||
model = 'gpt-5.1-codex-max',
|
||||
cwd = process.cwd(),
|
||||
systemPrompt,
|
||||
maxTurns, // Not used by Codex CLI
|
||||
allowedTools, // Not used by Codex CLI
|
||||
env = {},
|
||||
mcpServers = null
|
||||
} = options;
|
||||
|
||||
const codexPath = this.findCodexPath();
|
||||
if (!codexPath) {
|
||||
yield {
|
||||
type: 'error',
|
||||
error: 'Codex CLI not found. Please install it with: npm install -g @openai/codex@latest'
|
||||
};
|
||||
return;
|
||||
}
|
||||
|
||||
// Configure MCP server if provided
|
||||
if (mcpServers && mcpServers['automaker-tools']) {
|
||||
try {
|
||||
// Get the absolute path to the MCP server script
|
||||
const mcpServerScriptPath = path.resolve(__dirname, 'mcp-server-stdio.js');
|
||||
|
||||
// Verify the script exists
|
||||
const fs = require('fs');
|
||||
if (!fs.existsSync(mcpServerScriptPath)) {
|
||||
console.warn(`[CodexExecutor] MCP server script not found at ${mcpServerScriptPath}, skipping MCP configuration`);
|
||||
} else {
|
||||
// Configure Codex TOML to use the MCP server
|
||||
await codexConfigManager.configureMcpServer(cwd, mcpServerScriptPath);
|
||||
console.log('[CodexExecutor] Configured automaker-tools MCP server for Codex CLI');
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('[CodexExecutor] Failed to configure MCP server:', error);
|
||||
// Continue execution even if MCP config fails - Codex will work without MCP tools
|
||||
}
|
||||
}
|
||||
|
||||
// Combine system prompt with main prompt if provided
|
||||
// Codex CLI doesn't support --system-prompt argument, so we prepend it to the prompt
|
||||
let combinedPrompt = prompt;
|
||||
console.log('[CodexExecutor] Original prompt length:', prompt?.length || 0);
|
||||
if (systemPrompt) {
|
||||
combinedPrompt = `${systemPrompt}\n\n---\n\n${prompt}`;
|
||||
console.log('[CodexExecutor] System prompt prepended to main prompt');
|
||||
console.log('[CodexExecutor] System prompt length:', systemPrompt.length);
|
||||
console.log('[CodexExecutor] Combined prompt length:', combinedPrompt.length);
|
||||
}
|
||||
|
||||
// Build command arguments
|
||||
// Note: maxTurns and allowedTools are not supported by Codex CLI
|
||||
console.log('[CodexExecutor] Building command arguments...');
|
||||
const args = this.buildArgs({
|
||||
prompt: combinedPrompt,
|
||||
model
|
||||
});
|
||||
|
||||
console.log('[CodexExecutor] Executing command:', codexPath);
|
||||
console.log('[CodexExecutor] Number of args:', args.length);
|
||||
console.log('[CodexExecutor] Args (without prompt):', args.slice(0, -1).join(' '));
|
||||
console.log('[CodexExecutor] Prompt length in args:', args[args.length - 1]?.length || 0);
|
||||
console.log('[CodexExecutor] Prompt preview (first 200 chars):', args[args.length - 1]?.substring(0, 200));
|
||||
console.log('[CodexExecutor] Working directory:', cwd);
|
||||
|
||||
// Spawn the process
|
||||
const processEnv = {
|
||||
...process.env,
|
||||
...env,
|
||||
// Ensure OPENAI_API_KEY is available
|
||||
OPENAI_API_KEY: env.OPENAI_API_KEY || process.env.OPENAI_API_KEY
|
||||
};
|
||||
|
||||
// Log API key status (without exposing the key)
|
||||
if (processEnv.OPENAI_API_KEY) {
|
||||
console.log('[CodexExecutor] OPENAI_API_KEY is set (length:', processEnv.OPENAI_API_KEY.length, ')');
|
||||
} else {
|
||||
console.warn('[CodexExecutor] WARNING: OPENAI_API_KEY is not set!');
|
||||
}
|
||||
|
||||
console.log('[CodexExecutor] Spawning process...');
|
||||
const proc = spawn(codexPath, args, {
|
||||
cwd,
|
||||
env: processEnv,
|
||||
stdio: ['pipe', 'pipe', 'pipe']
|
||||
});
|
||||
|
||||
this.currentProcess = proc;
|
||||
console.log('[CodexExecutor] Process spawned with PID:', proc.pid);
|
||||
|
||||
// Track process events
|
||||
proc.on('error', (error) => {
|
||||
console.error('[CodexExecutor] Process error:', error);
|
||||
});
|
||||
|
||||
proc.on('spawn', () => {
|
||||
console.log('[CodexExecutor] Process spawned successfully');
|
||||
});
|
||||
|
||||
// Collect stderr output as it comes in
|
||||
let stderr = '';
|
||||
let hasOutput = false;
|
||||
let stdoutChunks = [];
|
||||
let stderrChunks = [];
|
||||
|
||||
proc.stderr.on('data', (data) => {
|
||||
const errorText = data.toString();
|
||||
stderr += errorText;
|
||||
stderrChunks.push(errorText);
|
||||
hasOutput = true;
|
||||
console.error('[CodexExecutor] stderr chunk received (', data.length, 'bytes):', errorText.substring(0, 200));
|
||||
});
|
||||
|
||||
proc.stderr.on('end', () => {
|
||||
console.log('[CodexExecutor] stderr stream ended. Total chunks:', stderrChunks.length, 'Total length:', stderr.length);
|
||||
});
|
||||
|
||||
proc.stdout.on('data', (data) => {
|
||||
const text = data.toString();
|
||||
stdoutChunks.push(text);
|
||||
hasOutput = true;
|
||||
console.log('[CodexExecutor] stdout chunk received (', data.length, 'bytes):', text.substring(0, 200));
|
||||
});
|
||||
|
||||
proc.stdout.on('end', () => {
|
||||
console.log('[CodexExecutor] stdout stream ended. Total chunks:', stdoutChunks.length);
|
||||
});
|
||||
|
||||
// Create readline interface for parsing JSONL output
|
||||
console.log('[CodexExecutor] Creating readline interface...');
|
||||
const rl = readline.createInterface({
|
||||
input: proc.stdout,
|
||||
crlfDelay: Infinity
|
||||
});
|
||||
|
||||
// Track accumulated content for converting to Claude format
|
||||
let accumulatedText = '';
|
||||
let toolUses = [];
|
||||
let lastOutputTime = Date.now();
|
||||
const OUTPUT_TIMEOUT = 30000; // 30 seconds timeout for no output
|
||||
let lineCount = 0;
|
||||
let jsonParseErrors = 0;
|
||||
|
||||
// Set up timeout check
|
||||
const checkTimeout = setInterval(() => {
|
||||
const timeSinceLastOutput = Date.now() - lastOutputTime;
|
||||
if (timeSinceLastOutput > OUTPUT_TIMEOUT && !hasOutput) {
|
||||
console.warn('[CodexExecutor] No output received for', timeSinceLastOutput, 'ms. Process still alive:', !proc.killed);
|
||||
}
|
||||
}, 5000);
|
||||
|
||||
console.log('[CodexExecutor] Starting to read lines from stdout...');
|
||||
|
||||
// Process stdout line by line (JSONL format)
|
||||
try {
|
||||
for await (const line of rl) {
|
||||
hasOutput = true;
|
||||
lastOutputTime = Date.now();
|
||||
lineCount++;
|
||||
|
||||
console.log('[CodexExecutor] Line', lineCount, 'received (length:', line.length, '):', line.substring(0, 100));
|
||||
|
||||
if (!line.trim()) {
|
||||
console.log('[CodexExecutor] Skipping empty line');
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
const event = JSON.parse(line);
|
||||
console.log('[CodexExecutor] Successfully parsed JSON event. Type:', event.type, 'Keys:', Object.keys(event));
|
||||
|
||||
const convertedMsg = this.convertToClaudeFormat(event);
|
||||
console.log('[CodexExecutor] Converted message:', convertedMsg ? { type: convertedMsg.type } : 'null');
|
||||
|
||||
if (convertedMsg) {
|
||||
// Accumulate text content
|
||||
if (convertedMsg.type === 'assistant' && convertedMsg.message?.content) {
|
||||
for (const block of convertedMsg.message.content) {
|
||||
if (block.type === 'text') {
|
||||
accumulatedText += block.text;
|
||||
console.log('[CodexExecutor] Accumulated text block (total length:', accumulatedText.length, ')');
|
||||
} else if (block.type === 'tool_use') {
|
||||
toolUses.push(block);
|
||||
console.log('[CodexExecutor] Tool use detected:', block.name);
|
||||
}
|
||||
}
|
||||
}
|
||||
console.log('[CodexExecutor] Yielding message of type:', convertedMsg.type);
|
||||
yield convertedMsg;
|
||||
} else {
|
||||
console.log('[CodexExecutor] Converted message is null, skipping');
|
||||
}
|
||||
} catch (parseError) {
|
||||
jsonParseErrors++;
|
||||
// Non-JSON output, yield as text
|
||||
console.log('[CodexExecutor] JSON parse error (', jsonParseErrors, 'total):', parseError.message);
|
||||
console.log('[CodexExecutor] Non-JSON line content:', line.substring(0, 200));
|
||||
yield {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{ type: 'text', text: line + '\n' }]
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
console.log('[CodexExecutor] Finished reading all lines. Total lines:', lineCount, 'JSON errors:', jsonParseErrors);
|
||||
} catch (readError) {
|
||||
console.error('[CodexExecutor] Error reading from readline:', readError);
|
||||
throw readError;
|
||||
} finally {
|
||||
clearInterval(checkTimeout);
|
||||
console.log('[CodexExecutor] Cleaned up timeout checker');
|
||||
}
|
||||
|
||||
// Handle process completion
|
||||
console.log('[CodexExecutor] Waiting for process to close...');
|
||||
const exitCode = await new Promise((resolve) => {
|
||||
proc.on('close', (code, signal) => {
|
||||
console.log('[CodexExecutor] Process closed with code:', code, 'signal:', signal);
|
||||
resolve(code);
|
||||
});
|
||||
});
|
||||
|
||||
this.currentProcess = null;
|
||||
console.log('[CodexExecutor] Process completed. Exit code:', exitCode, 'Has output:', hasOutput, 'Stderr length:', stderr.length);
|
||||
|
||||
// Wait a bit for any remaining stderr data to be collected
|
||||
console.log('[CodexExecutor] Waiting 200ms for any remaining stderr data...');
|
||||
await new Promise(resolve => setTimeout(resolve, 200));
|
||||
console.log('[CodexExecutor] Final stderr length:', stderr.length, 'Final stdout chunks:', stdoutChunks.length);
|
||||
|
||||
if (exitCode !== 0) {
|
||||
const errorMessage = stderr.trim()
|
||||
? `Codex CLI exited with code ${exitCode}.\n\nError output:\n${stderr}`
|
||||
: `Codex CLI exited with code ${exitCode}. No error output captured.`;
|
||||
|
||||
console.error('[CodexExecutor] Process failed with exit code', exitCode);
|
||||
console.error('[CodexExecutor] Error message:', errorMessage);
|
||||
console.error('[CodexExecutor] Stderr chunks:', stderrChunks.length, 'Stdout chunks:', stdoutChunks.length);
|
||||
|
||||
yield {
|
||||
type: 'error',
|
||||
error: errorMessage
|
||||
};
|
||||
} else if (!hasOutput && !stderr) {
|
||||
// Process exited successfully but produced no output - might be API key issue
|
||||
const warningMessage = 'Codex CLI completed but produced no output. This might indicate:\n' +
|
||||
'- Missing or invalid OPENAI_API_KEY\n' +
|
||||
'- Codex CLI configuration issue\n' +
|
||||
'- The process completed without generating any response\n\n' +
|
||||
`Debug info: Exit code ${exitCode}, stdout chunks: ${stdoutChunks.length}, stderr chunks: ${stderrChunks.length}, lines read: ${lineCount}`;
|
||||
|
||||
console.warn('[CodexExecutor] No output detected:', warningMessage);
|
||||
console.warn('[CodexExecutor] Stdout chunks:', stdoutChunks);
|
||||
console.warn('[CodexExecutor] Stderr chunks:', stderrChunks);
|
||||
|
||||
yield {
|
||||
type: 'error',
|
||||
error: warningMessage
|
||||
};
|
||||
} else {
|
||||
console.log('[CodexExecutor] Process completed successfully. Exit code:', exitCode, 'Lines processed:', lineCount);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build command arguments for Codex CLI
|
||||
* Only includes supported arguments based on Codex CLI help:
|
||||
* - --model: Model to use
|
||||
* - --json: JSON output format
|
||||
* - --full-auto: Non-interactive automatic execution
|
||||
*
|
||||
* Note: Codex CLI does NOT support:
|
||||
* - --system-prompt (system prompt is prepended to main prompt)
|
||||
* - --max-turns (not available in CLI)
|
||||
* - --tools (not available in CLI)
|
||||
*
|
||||
* @param {Object} options Options
|
||||
* @returns {string[]} Command arguments
|
||||
*/
|
||||
buildArgs(options) {
|
||||
const { prompt, model } = options;
|
||||
|
||||
console.log('[CodexExecutor] buildArgs called with model:', model, 'prompt length:', prompt?.length || 0);
|
||||
|
||||
const args = ['exec'];
|
||||
|
||||
// Add model (required for most use cases)
|
||||
if (model) {
|
||||
args.push('--model', model);
|
||||
console.log('[CodexExecutor] Added model argument:', model);
|
||||
}
|
||||
|
||||
// Add JSON output flag for structured parsing
|
||||
args.push('--json');
|
||||
console.log('[CodexExecutor] Added --json flag');
|
||||
|
||||
// Add full-auto mode (non-interactive)
|
||||
// This enables automatic execution with workspace-write sandbox
|
||||
args.push('--full-auto');
|
||||
console.log('[CodexExecutor] Added --full-auto flag');
|
||||
|
||||
// Add the prompt at the end
|
||||
args.push(prompt);
|
||||
console.log('[CodexExecutor] Added prompt (length:', prompt?.length || 0, ')');
|
||||
|
||||
console.log('[CodexExecutor] Final args count:', args.length);
|
||||
return args;
|
||||
}
|
||||
|
||||
/**
|
||||
* Map Claude tool names to Codex tool names
|
||||
* @param {string[]} tools Array of tool names
|
||||
* @returns {string[]} Mapped tool names
|
||||
*/
|
||||
mapToolsToCodex(tools) {
|
||||
const toolMap = {
|
||||
'Read': 'read',
|
||||
'Write': 'write',
|
||||
'Edit': 'edit',
|
||||
'Bash': 'bash',
|
||||
'Glob': 'glob',
|
||||
'Grep': 'grep',
|
||||
'WebSearch': 'web-search',
|
||||
'WebFetch': 'web-fetch'
|
||||
};
|
||||
|
||||
return tools
|
||||
.map(tool => toolMap[tool] || tool.toLowerCase())
|
||||
.filter(tool => tool); // Remove undefined
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert Codex JSONL event to Claude SDK message format
|
||||
* @param {Object} event Codex event object
|
||||
* @returns {Object|null} Claude-format message or null
|
||||
*/
|
||||
convertToClaudeFormat(event) {
|
||||
console.log('[CodexExecutor] Converting event:', JSON.stringify(event).substring(0, 200));
|
||||
const { type, data, item, thread_id } = event;
|
||||
|
||||
switch (type) {
|
||||
case CODEX_EVENT_TYPES.THREAD_STARTED:
|
||||
case 'thread.started':
|
||||
// Session initialization
|
||||
return {
|
||||
type: 'session_start',
|
||||
sessionId: thread_id || data?.thread_id || event.thread_id
|
||||
};
|
||||
|
||||
case CODEX_EVENT_TYPES.ITEM_COMPLETED:
|
||||
case 'item.completed':
|
||||
// Codex uses 'item' field, not 'data'
|
||||
return this.convertItemCompleted(item || data);
|
||||
|
||||
case CODEX_EVENT_TYPES.ITEM_STARTED:
|
||||
case 'item.started':
|
||||
// Convert item.started events - these indicate tool/command usage
|
||||
const startedItem = item || data;
|
||||
if (startedItem?.type === 'command_execution' && startedItem?.command) {
|
||||
return {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{
|
||||
type: 'tool_use',
|
||||
name: 'bash',
|
||||
input: { command: startedItem.command }
|
||||
}]
|
||||
}
|
||||
};
|
||||
}
|
||||
// For other item.started types, return null (we'll show the completed version)
|
||||
return null;
|
||||
|
||||
case CODEX_EVENT_TYPES.THREAD_COMPLETED:
|
||||
case 'thread.completed':
|
||||
return {
|
||||
type: 'complete',
|
||||
sessionId: thread_id || data?.thread_id || event.thread_id
|
||||
};
|
||||
|
||||
case CODEX_EVENT_TYPES.ERROR:
|
||||
case 'error':
|
||||
return {
|
||||
type: 'error',
|
||||
error: data?.message || item?.message || event.message || 'Unknown error from Codex CLI'
|
||||
};
|
||||
|
||||
case 'turn.started':
|
||||
// Turn started - just a marker, no need to convert
|
||||
return null;
|
||||
|
||||
default:
|
||||
// Pass through other events
|
||||
console.log('[CodexExecutor] Unhandled event type:', type);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert item.completed event to Claude format
|
||||
* @param {Object} item Event item data
|
||||
* @returns {Object|null} Claude-format message
|
||||
*/
|
||||
convertItemCompleted(item) {
|
||||
if (!item) {
|
||||
console.log('[CodexExecutor] convertItemCompleted: item is null/undefined');
|
||||
return null;
|
||||
}
|
||||
|
||||
const itemType = item.type || item.item_type;
|
||||
console.log('[CodexExecutor] convertItemCompleted: itemType =', itemType, 'item keys:', Object.keys(item));
|
||||
|
||||
switch (itemType) {
|
||||
case 'reasoning':
|
||||
// Thinking/reasoning output - Codex uses 'text' field
|
||||
const reasoningText = item.text || item.content || '';
|
||||
console.log('[CodexExecutor] Converting reasoning, text length:', reasoningText.length);
|
||||
return {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{
|
||||
type: 'thinking',
|
||||
thinking: reasoningText
|
||||
}]
|
||||
}
|
||||
};
|
||||
|
||||
case 'agent_message':
|
||||
case 'message':
|
||||
// Assistant text message
|
||||
const messageText = item.content || item.text || '';
|
||||
console.log('[CodexExecutor] Converting message, text length:', messageText.length);
|
||||
return {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{
|
||||
type: 'text',
|
||||
text: messageText
|
||||
}]
|
||||
}
|
||||
};
|
||||
|
||||
case 'command_execution':
|
||||
// Command execution - show both the command and its output
|
||||
const command = item.command || '';
|
||||
const output = item.aggregated_output || item.output || '';
|
||||
console.log('[CodexExecutor] Converting command_execution, command:', command.substring(0, 50), 'output length:', output.length);
|
||||
|
||||
// Return as text message showing the command and output
|
||||
return {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{
|
||||
type: 'text',
|
||||
text: `\`\`\`bash\n${command}\n\`\`\`\n\n${output}`
|
||||
}]
|
||||
}
|
||||
};
|
||||
|
||||
case 'tool_use':
|
||||
// Tool use
|
||||
return {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{
|
||||
type: 'tool_use',
|
||||
name: item.tool || item.command || 'unknown',
|
||||
input: item.input || item.args || {}
|
||||
}]
|
||||
}
|
||||
};
|
||||
|
||||
case 'tool_result':
|
||||
// Tool result
|
||||
return {
|
||||
type: 'tool_result',
|
||||
tool_use_id: item.tool_use_id,
|
||||
content: item.output || item.result
|
||||
};
|
||||
|
||||
case 'todo_list':
|
||||
// Todo list - convert to text format
|
||||
const todos = item.items || [];
|
||||
const todoText = todos.map((t, i) => `${i + 1}. ${t.text || t}`).join('\n');
|
||||
console.log('[CodexExecutor] Converting todo_list, items:', todos.length);
|
||||
return {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{
|
||||
type: 'text',
|
||||
text: `**Todo List:**\n${todoText}`
|
||||
}]
|
||||
}
|
||||
};
|
||||
|
||||
default:
|
||||
// Generic text output
|
||||
const text = item.text || item.content || item.aggregated_output;
|
||||
if (text) {
|
||||
console.log('[CodexExecutor] Converting default item type, text length:', text.length);
|
||||
return {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{
|
||||
type: 'text',
|
||||
text: String(text)
|
||||
}]
|
||||
}
|
||||
};
|
||||
}
|
||||
console.log('[CodexExecutor] convertItemCompleted: No text content found, returning null');
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Abort current execution
|
||||
*/
|
||||
abort() {
|
||||
if (this.currentProcess) {
|
||||
console.log('[CodexExecutor] Aborting current process');
|
||||
this.currentProcess.kill('SIGTERM');
|
||||
this.currentProcess = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if execution is in progress
|
||||
* @returns {boolean} Whether execution is in progress
|
||||
*/
|
||||
isRunning() {
|
||||
return this.currentProcess !== null;
|
||||
}
|
||||
}
|
||||
|
||||
// Singleton instance
|
||||
const codexExecutor = new CodexExecutor();
|
||||
|
||||
module.exports = codexExecutor;
|
||||
@@ -3,11 +3,176 @@ const promptBuilder = require("./prompt-builder");
|
||||
const contextManager = require("./context-manager");
|
||||
const featureLoader = require("./feature-loader");
|
||||
const mcpServerFactory = require("./mcp-server-factory");
|
||||
const { ModelRegistry } = require("./model-registry");
|
||||
const { ModelProviderFactory } = require("./model-provider");
|
||||
|
||||
// Model name mappings for Claude (legacy - kept for backwards compatibility)
|
||||
const MODEL_MAP = {
|
||||
haiku: "claude-haiku-4-5",
|
||||
sonnet: "claude-sonnet-4-20250514",
|
||||
opus: "claude-opus-4-5-20251101",
|
||||
};
|
||||
|
||||
// Thinking level to budget_tokens mapping
|
||||
// These values control how much "thinking time" the model gets for extended thinking
|
||||
const THINKING_BUDGET_MAP = {
|
||||
none: null, // No extended thinking
|
||||
low: 4096, // Light thinking
|
||||
medium: 16384, // Moderate thinking
|
||||
high: 65536, // Deep thinking
|
||||
ultrathink: 262144, // Ultra-deep thinking (maximum reasoning)
|
||||
};
|
||||
|
||||
/**
|
||||
* Feature Executor - Handles feature implementation using Claude Agent SDK
|
||||
* Now supports multiple model providers (Claude, Codex/OpenAI)
|
||||
*/
|
||||
class FeatureExecutor {
|
||||
/**
|
||||
* Get the model string based on feature's model setting
|
||||
* Supports both Claude and Codex/OpenAI models
|
||||
*/
|
||||
getModelString(feature) {
|
||||
const modelKey = feature.model || "opus"; // Default to opus
|
||||
|
||||
// First check if this is a Codex model - they use the model key directly as the string
|
||||
if (ModelRegistry.isCodexModel(modelKey)) {
|
||||
const model = ModelRegistry.getModel(modelKey);
|
||||
if (model && model.modelString) {
|
||||
console.log(
|
||||
`[FeatureExecutor] getModelString: modelKey=${modelKey}, modelString=${model.modelString} (Codex model)`
|
||||
);
|
||||
return model.modelString;
|
||||
}
|
||||
// If model exists in registry but somehow no modelString, use the key itself
|
||||
console.log(
|
||||
`[FeatureExecutor] getModelString: modelKey=${modelKey}, modelString=${modelKey} (Codex fallback)`
|
||||
);
|
||||
return modelKey;
|
||||
}
|
||||
|
||||
// For Claude models, use the registry lookup
|
||||
let modelString = ModelRegistry.getModelString(modelKey);
|
||||
|
||||
// Fallback to MODEL_MAP if registry doesn't have it (legacy support)
|
||||
if (!modelString) {
|
||||
modelString = MODEL_MAP[modelKey];
|
||||
}
|
||||
|
||||
// Final fallback to opus for Claude models only
|
||||
if (!modelString) {
|
||||
modelString = MODEL_MAP.opus;
|
||||
}
|
||||
|
||||
// Validate model string format - ensure it's not incorrectly constructed
|
||||
// Prevent incorrect formats like "claude-haiku-4-20250514" (mixing haiku with sonnet date)
|
||||
if (modelString.includes("haiku") && modelString.includes("20250514")) {
|
||||
console.error(
|
||||
`[FeatureExecutor] Invalid model string detected: ${modelString}, using correct format`
|
||||
);
|
||||
modelString = MODEL_MAP.haiku || "claude-haiku-4-5";
|
||||
}
|
||||
|
||||
console.log(
|
||||
`[FeatureExecutor] getModelString: modelKey=${modelKey}, modelString=${modelString}`
|
||||
);
|
||||
return modelString;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine if the feature uses a Codex/OpenAI model
|
||||
*/
|
||||
isCodexModel(feature) {
|
||||
const modelKey = feature.model || "opus";
|
||||
return ModelRegistry.isCodexModel(modelKey);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the appropriate provider for the feature's model
|
||||
*/
|
||||
getProvider(feature) {
|
||||
const modelKey = feature.model || "opus";
|
||||
return ModelProviderFactory.getProviderForModel(modelKey);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get thinking configuration based on feature's thinkingLevel
|
||||
*/
|
||||
getThinkingConfig(feature) {
|
||||
const modelId = feature.model || "opus";
|
||||
// Skip thinking config for models that don't support it (e.g., Codex CLI)
|
||||
if (!ModelRegistry.modelSupportsThinking(modelId)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const level = feature.thinkingLevel || "none";
|
||||
const budgetTokens = THINKING_BUDGET_MAP[level];
|
||||
|
||||
if (budgetTokens === null) {
|
||||
return null; // No extended thinking
|
||||
}
|
||||
|
||||
return {
|
||||
type: "enabled",
|
||||
budget_tokens: budgetTokens,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare for ultrathink execution - validate and warn
|
||||
*/
|
||||
prepareForUltrathink(feature, thinkingConfig) {
|
||||
if (feature.thinkingLevel !== "ultrathink") {
|
||||
return { ready: true };
|
||||
}
|
||||
|
||||
const warnings = [];
|
||||
const recommendations = [];
|
||||
|
||||
// Check CLI installation
|
||||
const claudeCliDetector = require("./claude-cli-detector");
|
||||
const cliInfo = claudeCliDetector.getInstallationInfo();
|
||||
|
||||
if (cliInfo.status === "not_installed") {
|
||||
warnings.push(
|
||||
"Claude Code CLI not detected - ultrathink may have timeout issues"
|
||||
);
|
||||
recommendations.push(
|
||||
"Install Claude Code CLI for optimal ultrathink performance"
|
||||
);
|
||||
}
|
||||
|
||||
// Validate budget tokens
|
||||
if (thinkingConfig && thinkingConfig.budget_tokens > 32000) {
|
||||
warnings.push(
|
||||
`Ultrathink budget (${thinkingConfig.budget_tokens} tokens) exceeds recommended 32K - may cause long-running requests`
|
||||
);
|
||||
recommendations.push(
|
||||
"Consider using batch processing for budgets above 32K"
|
||||
);
|
||||
}
|
||||
|
||||
// Cost estimate (rough)
|
||||
const estimatedCost = ((thinkingConfig?.budget_tokens || 0) / 1000) * 0.015; // Rough estimate
|
||||
if (estimatedCost > 1.0) {
|
||||
warnings.push(
|
||||
`Estimated cost: ~$${estimatedCost.toFixed(2)} per execution`
|
||||
);
|
||||
}
|
||||
|
||||
// Time estimate
|
||||
warnings.push("Ultrathink tasks typically take 45-180 seconds");
|
||||
|
||||
return {
|
||||
ready: true,
|
||||
warnings,
|
||||
recommendations,
|
||||
estimatedCost,
|
||||
estimatedTime: "45-180 seconds",
|
||||
cliInfo,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Sleep helper
|
||||
*/
|
||||
@@ -22,6 +187,11 @@ class FeatureExecutor {
|
||||
async implementFeature(feature, projectPath, sendToRenderer, execution) {
|
||||
console.log(`[FeatureExecutor] Implementing: ${feature.description}`);
|
||||
|
||||
// Declare variables outside try block so they're available in catch
|
||||
let modelString;
|
||||
let providerName;
|
||||
let isCodex;
|
||||
|
||||
try {
|
||||
// ========================================
|
||||
// PHASE 1: PLANNING
|
||||
@@ -52,13 +222,59 @@ class FeatureExecutor {
|
||||
projectPath
|
||||
);
|
||||
|
||||
// Determine if we're in TDD mode (skipTests=false means TDD mode)
|
||||
const isTDD = !feature.skipTests;
|
||||
// Ensure feature has a model set (for backward compatibility with old features)
|
||||
if (!feature.model) {
|
||||
console.warn(
|
||||
`[FeatureExecutor] Feature ${feature.id} missing model property, defaulting to 'opus'`
|
||||
);
|
||||
feature.model = "opus";
|
||||
}
|
||||
|
||||
// Get model and thinking configuration from feature settings
|
||||
const modelString = this.getModelString(feature);
|
||||
const thinkingConfig = this.getThinkingConfig(feature);
|
||||
|
||||
// Prepare for ultrathink if needed
|
||||
if (feature.thinkingLevel === "ultrathink") {
|
||||
const preparation = this.prepareForUltrathink(feature, thinkingConfig);
|
||||
|
||||
console.log(`[FeatureExecutor] Ultrathink preparation:`, preparation);
|
||||
|
||||
// Log warnings
|
||||
if (preparation.warnings && preparation.warnings.length > 0) {
|
||||
preparation.warnings.forEach((warning) => {
|
||||
console.warn(`[FeatureExecutor] ⚠️ ${warning}`);
|
||||
});
|
||||
}
|
||||
|
||||
// Send preparation info to renderer
|
||||
sendToRenderer({
|
||||
type: "auto_mode_ultrathink_preparation",
|
||||
featureId: feature.id,
|
||||
warnings: preparation.warnings || [],
|
||||
recommendations: preparation.recommendations || [],
|
||||
estimatedCost: preparation.estimatedCost,
|
||||
estimatedTime: preparation.estimatedTime,
|
||||
});
|
||||
}
|
||||
|
||||
providerName = this.isCodexModel(feature) ? "Codex/OpenAI" : "Claude";
|
||||
console.log(
|
||||
`[FeatureExecutor] Using provider: ${providerName}, model: ${modelString}, thinking: ${
|
||||
feature.thinkingLevel || "none"
|
||||
}`
|
||||
);
|
||||
|
||||
// Note: Claude Agent SDK handles authentication automatically - it can use:
|
||||
// 1. CLAUDE_CODE_OAUTH_TOKEN env var (for SDK mode)
|
||||
// 2. Claude CLI's own authentication (if CLI is installed)
|
||||
// 3. ANTHROPIC_API_KEY (fallback)
|
||||
// We don't need to validate here - let the SDK/CLI handle auth errors
|
||||
|
||||
// Configure options for the SDK query
|
||||
const options = {
|
||||
model: "claude-opus-4-5-20251101",
|
||||
systemPrompt: await promptBuilder.getCodingPrompt(projectPath, isTDD),
|
||||
model: modelString,
|
||||
systemPrompt: promptBuilder.getCodingPrompt(),
|
||||
maxTurns: 1000,
|
||||
cwd: projectPath,
|
||||
mcpServers: {
|
||||
@@ -83,6 +299,11 @@ class FeatureExecutor {
|
||||
abortController: abortController,
|
||||
};
|
||||
|
||||
// Add thinking configuration if enabled
|
||||
if (thinkingConfig) {
|
||||
options.thinking = thinkingConfig;
|
||||
}
|
||||
|
||||
// Build the prompt for this specific feature
|
||||
let prompt = await promptBuilder.buildFeaturePrompt(feature, projectPath);
|
||||
|
||||
@@ -135,8 +356,18 @@ class FeatureExecutor {
|
||||
}
|
||||
}
|
||||
|
||||
// Use content blocks instead of plain text
|
||||
prompt = contentBlocks;
|
||||
// Wrap content blocks in async generator for SDK (required format for multimodal prompts)
|
||||
prompt = (async function* () {
|
||||
yield {
|
||||
type: "user",
|
||||
session_id: "",
|
||||
message: {
|
||||
role: "user",
|
||||
content: contentBlocks,
|
||||
},
|
||||
parent_tool_use_id: null,
|
||||
};
|
||||
})();
|
||||
}
|
||||
|
||||
// Planning: Analyze the codebase and create implementation plan
|
||||
@@ -168,8 +399,85 @@ class FeatureExecutor {
|
||||
});
|
||||
console.log(`[FeatureExecutor] Phase: ACTION for ${feature.description}`);
|
||||
|
||||
// Send query
|
||||
const currentQuery = query({ prompt, options });
|
||||
// Send query - use appropriate provider based on model
|
||||
let currentQuery;
|
||||
isCodex = this.isCodexModel(feature);
|
||||
|
||||
// Ensure provider auth is available (especially for Claude SDK)
|
||||
const provider = this.getProvider(feature);
|
||||
if (provider?.ensureAuthEnv && !provider.ensureAuthEnv()) {
|
||||
// Check if CLI is installed to provide better error message
|
||||
let authMsg =
|
||||
"Missing Anthropic auth. Set ANTHROPIC_API_KEY or CLAUDE_CODE_OAUTH_TOKEN environment variable.";
|
||||
try {
|
||||
const claudeCliDetector = require("./claude-cli-detector");
|
||||
const detection = claudeCliDetector.detectClaudeInstallation();
|
||||
if (detection.installed && detection.method === "cli") {
|
||||
authMsg =
|
||||
"Claude CLI is installed but not authenticated. Run `claude login` to authenticate, or set ANTHROPIC_API_KEY or CLAUDE_CODE_OAUTH_TOKEN environment variable.";
|
||||
} else {
|
||||
authMsg =
|
||||
"Missing Anthropic auth. Set ANTHROPIC_API_KEY or CLAUDE_CODE_OAUTH_TOKEN, or install Claude CLI and run `claude login`.";
|
||||
}
|
||||
} catch (err) {
|
||||
// Fallback to default message
|
||||
}
|
||||
console.error(`[FeatureExecutor] ${authMsg}`);
|
||||
throw new Error(authMsg);
|
||||
}
|
||||
|
||||
// Validate that model string matches the provider
|
||||
if (isCodex) {
|
||||
// Ensure model string is actually a Codex model, not a Claude model
|
||||
if (modelString.startsWith("claude-")) {
|
||||
console.error(
|
||||
`[FeatureExecutor] ERROR: Codex provider selected but Claude model string detected: ${modelString}`
|
||||
);
|
||||
console.error(
|
||||
`[FeatureExecutor] Feature model: ${
|
||||
feature.model || "not set"
|
||||
}, modelString: ${modelString}`
|
||||
);
|
||||
throw new Error(
|
||||
`Invalid model configuration: Codex provider cannot use Claude model '${modelString}'. Please check feature model setting.`
|
||||
);
|
||||
}
|
||||
|
||||
// Use Codex provider for OpenAI models
|
||||
console.log(
|
||||
`[FeatureExecutor] Using Codex provider for model: ${modelString}`
|
||||
);
|
||||
// Pass MCP server config to Codex provider so it can configure Codex CLI TOML
|
||||
currentQuery = provider.executeQuery({
|
||||
prompt,
|
||||
model: modelString,
|
||||
cwd: projectPath,
|
||||
systemPrompt: promptBuilder.getCodingPrompt(),
|
||||
maxTurns: 20, // Codex CLI typically uses fewer turns
|
||||
allowedTools: options.allowedTools,
|
||||
mcpServers: {
|
||||
"automaker-tools": featureToolsServer,
|
||||
},
|
||||
abortController: abortController,
|
||||
env: {
|
||||
OPENAI_API_KEY: process.env.OPENAI_API_KEY,
|
||||
},
|
||||
});
|
||||
} else {
|
||||
// Ensure model string is actually a Claude model, not a Codex model
|
||||
if (
|
||||
!modelString.startsWith("claude-") &&
|
||||
!modelString.match(/^(gpt-|o\d)/)
|
||||
) {
|
||||
console.warn(
|
||||
`[FeatureExecutor] WARNING: Claude provider selected but unexpected model string: ${modelString}`
|
||||
);
|
||||
}
|
||||
|
||||
// Use Claude SDK (original implementation)
|
||||
currentQuery = query({ prompt, options });
|
||||
}
|
||||
|
||||
execution.query = currentQuery;
|
||||
|
||||
// Stream responses
|
||||
@@ -179,6 +487,22 @@ class FeatureExecutor {
|
||||
// Check if this specific feature was aborted
|
||||
if (!execution.isActive()) break;
|
||||
|
||||
// Handle error messages
|
||||
if (msg.type === "error") {
|
||||
const errorMsg = `\n❌ Error: ${msg.error}\n`;
|
||||
await contextManager.writeToContextFile(
|
||||
projectPath,
|
||||
feature.id,
|
||||
errorMsg
|
||||
);
|
||||
sendToRenderer({
|
||||
type: "auto_mode_error",
|
||||
featureId: feature.id,
|
||||
error: msg.error,
|
||||
});
|
||||
throw new Error(msg.error);
|
||||
}
|
||||
|
||||
if (msg.type === "assistant" && msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === "text") {
|
||||
@@ -197,6 +521,22 @@ class FeatureExecutor {
|
||||
featureId: feature.id,
|
||||
content: block.text,
|
||||
});
|
||||
} else if (block.type === "thinking") {
|
||||
// Handle thinking output from Codex O-series models
|
||||
const thinkingMsg = `\n💭 Thinking: ${block.thinking?.substring(
|
||||
0,
|
||||
200
|
||||
)}...\n`;
|
||||
await contextManager.writeToContextFile(
|
||||
projectPath,
|
||||
feature.id,
|
||||
thinkingMsg
|
||||
);
|
||||
sendToRenderer({
|
||||
type: "auto_mode_progress",
|
||||
featureId: feature.id,
|
||||
content: thinkingMsg,
|
||||
});
|
||||
} else if (block.type === "tool_use") {
|
||||
// First tool use indicates we're actively implementing
|
||||
if (!hasStartedToolUse) {
|
||||
@@ -314,6 +654,54 @@ class FeatureExecutor {
|
||||
|
||||
console.error("[FeatureExecutor] Error implementing feature:", error);
|
||||
|
||||
// Safely get model info for error logging (may not be set if error occurred early)
|
||||
const modelInfo = modelString
|
||||
? {
|
||||
message: error.message,
|
||||
stack: error.stack,
|
||||
name: error.name,
|
||||
code: error.code,
|
||||
model: modelString,
|
||||
provider: providerName || "unknown",
|
||||
isCodex: isCodex !== undefined ? isCodex : "unknown",
|
||||
}
|
||||
: {
|
||||
message: error.message,
|
||||
stack: error.stack,
|
||||
name: error.name,
|
||||
code: error.code,
|
||||
model: "not initialized",
|
||||
provider: "unknown",
|
||||
isCodex: "unknown",
|
||||
};
|
||||
|
||||
console.error("[FeatureExecutor] Error details:", modelInfo);
|
||||
|
||||
// Check if this is a Claude CLI process error
|
||||
if (error.message && error.message.includes("process exited with code")) {
|
||||
const modelDisplay = modelString
|
||||
? `Model: ${modelString}`
|
||||
: "Model: not initialized";
|
||||
const errorMsg =
|
||||
`Claude Code CLI failed with exit code 1. This might be due to:\n` +
|
||||
`- Invalid or unsupported model (${modelDisplay})\n` +
|
||||
`- Missing or invalid CLAUDE_CODE_OAUTH_TOKEN\n` +
|
||||
`- Claude CLI configuration issue\n` +
|
||||
`- Model not available in your Claude account\n\n` +
|
||||
`Original error: ${error.message}`;
|
||||
|
||||
await contextManager.writeToContextFile(
|
||||
projectPath,
|
||||
feature.id,
|
||||
`\n❌ ${errorMsg}\n`
|
||||
);
|
||||
sendToRenderer({
|
||||
type: "auto_mode_error",
|
||||
featureId: feature.id,
|
||||
error: errorMsg,
|
||||
});
|
||||
}
|
||||
|
||||
// Clean up
|
||||
if (execution) {
|
||||
execution.abortController = null;
|
||||
@@ -365,9 +753,53 @@ class FeatureExecutor {
|
||||
projectPath
|
||||
);
|
||||
|
||||
// Ensure feature has a model set (for backward compatibility with old features)
|
||||
if (!feature.model) {
|
||||
console.warn(
|
||||
`[FeatureExecutor] Feature ${feature.id} missing model property, defaulting to 'opus'`
|
||||
);
|
||||
feature.model = "opus";
|
||||
}
|
||||
|
||||
// Get model and thinking configuration from feature settings
|
||||
const modelString = this.getModelString(feature);
|
||||
const thinkingConfig = this.getThinkingConfig(feature);
|
||||
|
||||
// Prepare for ultrathink if needed
|
||||
if (feature.thinkingLevel === "ultrathink") {
|
||||
const preparation = this.prepareForUltrathink(feature, thinkingConfig);
|
||||
|
||||
console.log(`[FeatureExecutor] Ultrathink preparation:`, preparation);
|
||||
|
||||
// Log warnings
|
||||
if (preparation.warnings && preparation.warnings.length > 0) {
|
||||
preparation.warnings.forEach((warning) => {
|
||||
console.warn(`[FeatureExecutor] ⚠️ ${warning}`);
|
||||
});
|
||||
}
|
||||
|
||||
// Send preparation info to renderer
|
||||
sendToRenderer({
|
||||
type: "auto_mode_ultrathink_preparation",
|
||||
featureId: feature.id,
|
||||
warnings: preparation.warnings || [],
|
||||
recommendations: preparation.recommendations || [],
|
||||
estimatedCost: preparation.estimatedCost,
|
||||
estimatedTime: preparation.estimatedTime,
|
||||
});
|
||||
}
|
||||
|
||||
const isCodex = this.isCodexModel(feature);
|
||||
const providerName = isCodex ? "Codex/OpenAI" : "Claude";
|
||||
console.log(
|
||||
`[FeatureExecutor] Resuming with provider: ${providerName}, model: ${modelString}, thinking: ${
|
||||
feature.thinkingLevel || "none"
|
||||
}`
|
||||
);
|
||||
|
||||
const options = {
|
||||
model: "claude-opus-4-5-20251101",
|
||||
systemPrompt: await promptBuilder.getVerificationPrompt(projectPath, isTDD),
|
||||
model: modelString,
|
||||
systemPrompt: promptBuilder.getVerificationPrompt(),
|
||||
maxTurns: 1000,
|
||||
cwd: projectPath,
|
||||
mcpServers: {
|
||||
@@ -392,6 +824,11 @@ class FeatureExecutor {
|
||||
abortController: abortController,
|
||||
};
|
||||
|
||||
// Add thinking configuration if enabled
|
||||
if (thinkingConfig) {
|
||||
options.thinking = thinkingConfig;
|
||||
}
|
||||
|
||||
// Build prompt with previous context
|
||||
let prompt = await promptBuilder.buildResumePrompt(
|
||||
feature,
|
||||
@@ -459,11 +896,53 @@ class FeatureExecutor {
|
||||
}
|
||||
}
|
||||
|
||||
// Use content blocks instead of plain text
|
||||
prompt = contentBlocks;
|
||||
// Wrap content blocks in async generator for SDK (required format for multimodal prompts)
|
||||
prompt = (async function* () {
|
||||
yield {
|
||||
type: "user",
|
||||
session_id: "",
|
||||
message: {
|
||||
role: "user",
|
||||
content: contentBlocks,
|
||||
},
|
||||
parent_tool_use_id: null,
|
||||
};
|
||||
})();
|
||||
}
|
||||
|
||||
const currentQuery = query({ prompt, options });
|
||||
// Use appropriate provider based on model type
|
||||
let currentQuery;
|
||||
if (isCodex) {
|
||||
// Validate that model string is actually a Codex model
|
||||
if (modelString.startsWith("claude-")) {
|
||||
console.error(
|
||||
`[FeatureExecutor] ERROR: Codex provider selected but Claude model string detected: ${modelString}`
|
||||
);
|
||||
throw new Error(
|
||||
`Invalid model configuration: Codex provider cannot use Claude model '${modelString}'. Please check feature model setting.`
|
||||
);
|
||||
}
|
||||
|
||||
console.log(
|
||||
`[FeatureExecutor] Using Codex provider for resume with model: ${modelString}`
|
||||
);
|
||||
const provider = this.getProvider(feature);
|
||||
currentQuery = provider.executeQuery({
|
||||
prompt,
|
||||
model: modelString,
|
||||
cwd: projectPath,
|
||||
systemPrompt: promptBuilder.getVerificationPrompt(),
|
||||
maxTurns: 20,
|
||||
allowedTools: options.allowedTools,
|
||||
abortController: abortController,
|
||||
env: {
|
||||
OPENAI_API_KEY: process.env.OPENAI_API_KEY,
|
||||
},
|
||||
});
|
||||
} else {
|
||||
// Use Claude SDK
|
||||
currentQuery = query({ prompt, options });
|
||||
}
|
||||
execution.query = currentQuery;
|
||||
|
||||
let responseText = "";
|
||||
|
||||
@@ -132,9 +132,22 @@ class FeatureLoader {
|
||||
if (f.summary !== undefined) {
|
||||
featureData.summary = f.summary;
|
||||
}
|
||||
if (f.model !== undefined) {
|
||||
featureData.model = f.model;
|
||||
}
|
||||
if (f.thinkingLevel !== undefined) {
|
||||
featureData.thinkingLevel = f.thinkingLevel;
|
||||
}
|
||||
if (f.error !== undefined) {
|
||||
featureData.error = f.error;
|
||||
}
|
||||
// Preserve worktree info
|
||||
if (f.worktreePath !== undefined) {
|
||||
featureData.worktreePath = f.worktreePath;
|
||||
}
|
||||
if (f.branchName !== undefined) {
|
||||
featureData.branchName = f.branchName;
|
||||
}
|
||||
return featureData;
|
||||
});
|
||||
|
||||
@@ -157,6 +170,69 @@ class FeatureLoader {
|
||||
// Skip verified and waiting_approval (which needs user input)
|
||||
return features.find((f) => f.status !== "verified" && f.status !== "waiting_approval");
|
||||
}
|
||||
|
||||
/**
|
||||
* Update worktree info for a feature
|
||||
* @param {string} featureId - The ID of the feature to update
|
||||
* @param {string} projectPath - Path to the project
|
||||
* @param {string|null} worktreePath - Path to the worktree (null to clear)
|
||||
* @param {string|null} branchName - Name of the feature branch (null to clear)
|
||||
*/
|
||||
async updateFeatureWorktree(featureId, projectPath, worktreePath, branchName) {
|
||||
const featuresPath = path.join(
|
||||
projectPath,
|
||||
".automaker",
|
||||
"feature_list.json"
|
||||
);
|
||||
|
||||
const features = await this.loadFeatures(projectPath);
|
||||
|
||||
if (!Array.isArray(features) || features.length === 0) {
|
||||
console.error("[FeatureLoader] Cannot update worktree: feature list is empty");
|
||||
return;
|
||||
}
|
||||
|
||||
const feature = features.find((f) => f.id === featureId);
|
||||
|
||||
if (!feature) {
|
||||
console.error(`[FeatureLoader] Feature ${featureId} not found`);
|
||||
return;
|
||||
}
|
||||
|
||||
// Update or clear worktree info
|
||||
if (worktreePath) {
|
||||
feature.worktreePath = worktreePath;
|
||||
feature.branchName = branchName;
|
||||
} else {
|
||||
delete feature.worktreePath;
|
||||
delete feature.branchName;
|
||||
}
|
||||
|
||||
// Save back to file (reuse the same mapping logic)
|
||||
const toSave = features.map((f) => {
|
||||
const featureData = {
|
||||
id: f.id,
|
||||
category: f.category,
|
||||
description: f.description,
|
||||
steps: f.steps,
|
||||
status: f.status,
|
||||
};
|
||||
if (f.skipTests !== undefined) featureData.skipTests = f.skipTests;
|
||||
if (f.images !== undefined) featureData.images = f.images;
|
||||
if (f.imagePaths !== undefined) featureData.imagePaths = f.imagePaths;
|
||||
if (f.startedAt !== undefined) featureData.startedAt = f.startedAt;
|
||||
if (f.summary !== undefined) featureData.summary = f.summary;
|
||||
if (f.model !== undefined) featureData.model = f.model;
|
||||
if (f.thinkingLevel !== undefined) featureData.thinkingLevel = f.thinkingLevel;
|
||||
if (f.error !== undefined) featureData.error = f.error;
|
||||
if (f.worktreePath !== undefined) featureData.worktreePath = f.worktreePath;
|
||||
if (f.branchName !== undefined) featureData.branchName = f.branchName;
|
||||
return featureData;
|
||||
});
|
||||
|
||||
await fs.writeFile(featuresPath, JSON.stringify(toSave, null, 2), "utf-8");
|
||||
console.log(`[FeatureLoader] Updated feature ${featureId}: worktreePath=${worktreePath}, branchName=${branchName}`);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = new FeatureLoader();
|
||||
|
||||
347
app/electron/services/mcp-server-stdio.js
Normal file
347
app/electron/services/mcp-server-stdio.js
Normal file
@@ -0,0 +1,347 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Standalone STDIO MCP Server for Automaker Tools
|
||||
*
|
||||
* This script runs as a standalone process and communicates via JSON-RPC 2.0
|
||||
* over stdin/stdout. It implements the MCP protocol to expose the UpdateFeatureStatus
|
||||
* tool to Codex CLI.
|
||||
*
|
||||
* Environment variables:
|
||||
* - AUTOMAKER_PROJECT_PATH: Path to the project directory
|
||||
* - AUTOMAKER_IPC_CHANNEL: IPC channel name for callback communication (optional, uses default)
|
||||
*/
|
||||
|
||||
const readline = require('readline');
|
||||
const path = require('path');
|
||||
|
||||
// Redirect all console.log output to stderr to avoid polluting MCP stdout
|
||||
const originalConsoleLog = console.log;
|
||||
console.log = (...args) => {
|
||||
console.error(...args);
|
||||
};
|
||||
|
||||
// Set up readline interface for line-by-line JSON-RPC input
|
||||
// IMPORTANT: Use a separate output stream for readline to avoid interfering with JSON-RPC stdout
|
||||
// We'll write JSON-RPC responses directly to stdout, not through readline
|
||||
const rl = readline.createInterface({
|
||||
input: process.stdin,
|
||||
output: null, // Don't use stdout for readline output
|
||||
terminal: false
|
||||
});
|
||||
|
||||
let initialized = false;
|
||||
let projectPath = null;
|
||||
let ipcChannel = null;
|
||||
|
||||
// Get configuration from environment
|
||||
projectPath = process.env.AUTOMAKER_PROJECT_PATH || process.cwd();
|
||||
ipcChannel = process.env.AUTOMAKER_IPC_CHANNEL || 'mcp:update-feature-status';
|
||||
|
||||
// Load dependencies (these will be available in the Electron app context)
|
||||
let featureLoader;
|
||||
let electron;
|
||||
|
||||
// Try to load Electron IPC if available (when running from Electron app)
|
||||
try {
|
||||
// In Electron, we can use IPC directly
|
||||
if (typeof require !== 'undefined') {
|
||||
// Check if we're in Electron context
|
||||
const electronModule = require('electron');
|
||||
if (electronModule && electronModule.ipcMain) {
|
||||
electron = electronModule;
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
// Not in Electron context, will use alternative method
|
||||
}
|
||||
|
||||
// Load feature loader
|
||||
// Try multiple paths since this script might be run from different contexts
|
||||
try {
|
||||
// First try relative path (when run from electron/services/)
|
||||
featureLoader = require('./feature-loader');
|
||||
} catch (e) {
|
||||
try {
|
||||
// Try absolute path resolution
|
||||
const featureLoaderPath = path.resolve(__dirname, 'feature-loader.js');
|
||||
delete require.cache[require.resolve(featureLoaderPath)];
|
||||
featureLoader = require(featureLoaderPath);
|
||||
} catch (e2) {
|
||||
// If still fails, try from parent directory
|
||||
try {
|
||||
featureLoader = require(path.join(__dirname, '..', 'services', 'feature-loader'));
|
||||
} catch (e3) {
|
||||
console.error('[McpServerStdio] Error loading feature-loader:', e3.message);
|
||||
console.error('[McpServerStdio] Tried paths:', [
|
||||
'./feature-loader',
|
||||
path.resolve(__dirname, 'feature-loader.js'),
|
||||
path.join(__dirname, '..', 'services', 'feature-loader')
|
||||
]);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Send JSON-RPC response
|
||||
* CRITICAL: Must write directly to stdout, not via console.log
|
||||
* MCP protocol requires ONLY JSON-RPC messages on stdout
|
||||
*/
|
||||
function sendResponse(id, result, error = null) {
|
||||
const response = {
|
||||
jsonrpc: '2.0',
|
||||
id
|
||||
};
|
||||
|
||||
if (error) {
|
||||
response.error = error;
|
||||
} else {
|
||||
response.result = result;
|
||||
}
|
||||
|
||||
// Write directly to stdout with newline (MCP uses line-delimited JSON)
|
||||
process.stdout.write(JSON.stringify(response) + '\n');
|
||||
}
|
||||
|
||||
/**
|
||||
* Send JSON-RPC notification
|
||||
* CRITICAL: Must write directly to stdout, not via console.log
|
||||
*/
|
||||
function sendNotification(method, params) {
|
||||
const notification = {
|
||||
jsonrpc: '2.0',
|
||||
method,
|
||||
params
|
||||
};
|
||||
|
||||
// Write directly to stdout with newline (MCP uses line-delimited JSON)
|
||||
process.stdout.write(JSON.stringify(notification) + '\n');
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle MCP initialize request
|
||||
*/
|
||||
async function handleInitialize(params, id) {
|
||||
initialized = true;
|
||||
|
||||
sendResponse(id, {
|
||||
protocolVersion: '2024-11-05',
|
||||
capabilities: {
|
||||
tools: {}
|
||||
},
|
||||
serverInfo: {
|
||||
name: 'automaker-tools',
|
||||
version: '1.0.0'
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle tools/list request
|
||||
*/
|
||||
async function handleToolsList(params, id) {
|
||||
sendResponse(id, {
|
||||
tools: [
|
||||
{
|
||||
name: 'UpdateFeatureStatus',
|
||||
description: 'Update the status of a feature in the feature list. Use this tool instead of directly modifying feature_list.json to safely update feature status. IMPORTANT: If the feature has skipTests=true, you should NOT mark it as verified - instead it will automatically go to waiting_approval status for manual review. Always include a summary of what was done.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
featureId: {
|
||||
type: 'string',
|
||||
description: 'The ID of the feature to update'
|
||||
},
|
||||
status: {
|
||||
type: 'string',
|
||||
enum: ['backlog', 'in_progress', 'verified'],
|
||||
description: 'The new status for the feature. Note: If skipTests=true, verified will be converted to waiting_approval automatically.'
|
||||
},
|
||||
summary: {
|
||||
type: 'string',
|
||||
description: 'A brief summary of what was implemented/changed. This will be displayed on the Kanban card. Example: "Added dark mode toggle. Modified: settings.tsx, theme-provider.tsx"'
|
||||
}
|
||||
},
|
||||
required: ['featureId', 'status']
|
||||
}
|
||||
}
|
||||
]
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle tools/call request
|
||||
*/
|
||||
async function handleToolsCall(params, id) {
|
||||
const { name, arguments: args } = params;
|
||||
|
||||
if (name !== 'UpdateFeatureStatus') {
|
||||
sendResponse(id, null, {
|
||||
code: -32601,
|
||||
message: `Unknown tool: ${name}`
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const { featureId, status, summary } = args;
|
||||
|
||||
if (!featureId || !status) {
|
||||
sendResponse(id, null, {
|
||||
code: -32602,
|
||||
message: 'Missing required parameters: featureId and status are required'
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Load the feature to check skipTests flag
|
||||
const features = await featureLoader.loadFeatures(projectPath);
|
||||
const feature = features.find((f) => f.id === featureId);
|
||||
|
||||
if (!feature) {
|
||||
sendResponse(id, null, {
|
||||
code: -32602,
|
||||
message: `Feature ${featureId} not found`
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// If agent tries to mark as verified but feature has skipTests=true, convert to waiting_approval
|
||||
let finalStatus = status;
|
||||
if (status === 'verified' && feature.skipTests === true) {
|
||||
finalStatus = 'waiting_approval';
|
||||
}
|
||||
|
||||
// Call the update callback via IPC or direct call
|
||||
// Since we're in a separate process, we need to use IPC to communicate back
|
||||
// For now, we'll call the feature loader directly since it has the update method
|
||||
await featureLoader.updateFeatureStatus(featureId, finalStatus, projectPath, summary);
|
||||
|
||||
const statusMessage = finalStatus !== status
|
||||
? `Successfully updated feature ${featureId} to status "${finalStatus}" (converted from "${status}" because skipTests=true)${summary ? ` with summary: "${summary}"` : ''}`
|
||||
: `Successfully updated feature ${featureId} to status "${finalStatus}"${summary ? ` with summary: "${summary}"` : ''}`;
|
||||
|
||||
sendResponse(id, {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: statusMessage
|
||||
}
|
||||
]
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('[McpServerStdio] UpdateFeatureStatus error:', error);
|
||||
sendResponse(id, null, {
|
||||
code: -32603,
|
||||
message: `Failed to update feature status: ${error.message}`
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle JSON-RPC request
|
||||
*/
|
||||
async function handleRequest(line) {
|
||||
let request;
|
||||
|
||||
try {
|
||||
request = JSON.parse(line);
|
||||
} catch (e) {
|
||||
sendResponse(null, null, {
|
||||
code: -32700,
|
||||
message: 'Parse error'
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Validate JSON-RPC 2.0 structure
|
||||
if (request.jsonrpc !== '2.0') {
|
||||
sendResponse(request.id || null, null, {
|
||||
code: -32600,
|
||||
message: 'Invalid Request'
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const { method, params, id } = request;
|
||||
|
||||
// Handle notifications (no id)
|
||||
if (id === undefined) {
|
||||
// Handle notifications if needed
|
||||
return;
|
||||
}
|
||||
|
||||
// Handle requests
|
||||
try {
|
||||
switch (method) {
|
||||
case 'initialize':
|
||||
await handleInitialize(params, id);
|
||||
break;
|
||||
|
||||
case 'tools/list':
|
||||
if (!initialized) {
|
||||
sendResponse(id, null, {
|
||||
code: -32002,
|
||||
message: 'Server not initialized'
|
||||
});
|
||||
return;
|
||||
}
|
||||
await handleToolsList(params, id);
|
||||
break;
|
||||
|
||||
case 'tools/call':
|
||||
if (!initialized) {
|
||||
sendResponse(id, null, {
|
||||
code: -32002,
|
||||
message: 'Server not initialized'
|
||||
});
|
||||
return;
|
||||
}
|
||||
await handleToolsCall(params, id);
|
||||
break;
|
||||
|
||||
default:
|
||||
sendResponse(id, null, {
|
||||
code: -32601,
|
||||
message: `Method not found: ${method}`
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('[McpServerStdio] Error handling request:', error);
|
||||
sendResponse(id, null, {
|
||||
code: -32603,
|
||||
message: `Internal error: ${error.message}`
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Process stdin line by line
|
||||
rl.on('line', async (line) => {
|
||||
if (!line.trim()) {
|
||||
return;
|
||||
}
|
||||
|
||||
await handleRequest(line);
|
||||
});
|
||||
|
||||
// Handle errors
|
||||
rl.on('error', (error) => {
|
||||
console.error('[McpServerStdio] Readline error:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// Handle process termination
|
||||
process.on('SIGTERM', () => {
|
||||
rl.close();
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
process.on('SIGINT', () => {
|
||||
rl.close();
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
// Log startup
|
||||
console.error('[McpServerStdio] Starting MCP server for automaker-tools');
|
||||
console.error(`[McpServerStdio] Project path: ${projectPath}`);
|
||||
console.error(`[McpServerStdio] IPC channel: ${ipcChannel}`);
|
||||
477
app/electron/services/model-provider.js
Normal file
477
app/electron/services/model-provider.js
Normal file
@@ -0,0 +1,477 @@
|
||||
/**
|
||||
* Model Provider Abstraction Layer
|
||||
*
|
||||
* This module provides an abstract interface for model providers (Claude, Codex, etc.)
|
||||
* allowing the application to use different AI models through a unified API.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Base class for model providers
|
||||
* Concrete implementations should extend this class
|
||||
*/
|
||||
class ModelProvider {
|
||||
constructor(config = {}) {
|
||||
this.config = config;
|
||||
this.name = 'base';
|
||||
}
|
||||
|
||||
/**
|
||||
* Get provider name
|
||||
* @returns {string} Provider name
|
||||
*/
|
||||
getName() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a query with the model provider
|
||||
* @param {Object} options Query options
|
||||
* @param {string} options.prompt The prompt to send
|
||||
* @param {string} options.model The model to use
|
||||
* @param {string} options.systemPrompt System prompt
|
||||
* @param {string} options.cwd Working directory
|
||||
* @param {number} options.maxTurns Maximum turns
|
||||
* @param {string[]} options.allowedTools Allowed tools
|
||||
* @param {Object} options.mcpServers MCP servers configuration
|
||||
* @param {AbortController} options.abortController Abort controller
|
||||
* @param {Object} options.thinking Thinking configuration
|
||||
* @returns {AsyncGenerator} Async generator yielding messages
|
||||
*/
|
||||
async *executeQuery(options) {
|
||||
throw new Error('executeQuery must be implemented by subclass');
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect if this provider's CLI/SDK is installed
|
||||
* @returns {Promise<Object>} Installation status
|
||||
*/
|
||||
async detectInstallation() {
|
||||
throw new Error('detectInstallation must be implemented by subclass');
|
||||
}
|
||||
|
||||
/**
|
||||
* Get list of available models for this provider
|
||||
* @returns {Array<Object>} Array of model definitions
|
||||
*/
|
||||
getAvailableModels() {
|
||||
throw new Error('getAvailableModels must be implemented by subclass');
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate provider configuration
|
||||
* @returns {Object} Validation result { valid: boolean, errors: string[] }
|
||||
*/
|
||||
validateConfig() {
|
||||
throw new Error('validateConfig must be implemented by subclass');
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the full model string for a model key
|
||||
* @param {string} modelKey Short model key (e.g., 'opus', 'gpt-5.1-codex')
|
||||
* @returns {string} Full model string
|
||||
*/
|
||||
getModelString(modelKey) {
|
||||
throw new Error('getModelString must be implemented by subclass');
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if provider supports a specific feature
|
||||
* @param {string} feature Feature name (e.g., 'thinking', 'tools', 'streaming')
|
||||
* @returns {boolean} Whether the feature is supported
|
||||
*/
|
||||
supportsFeature(feature) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Claude Provider - Uses Anthropic Claude Agent SDK
|
||||
*/
|
||||
class ClaudeProvider extends ModelProvider {
|
||||
constructor(config = {}) {
|
||||
super(config);
|
||||
this.name = 'claude';
|
||||
this.sdk = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to load a Claude OAuth token from the local CLI config (~/.claude/config.json).
|
||||
* Returns the token string or null if not found.
|
||||
*/
|
||||
loadTokenFromCliConfig() {
|
||||
try {
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const configPath = path.join(require('os').homedir(), '.claude', 'config.json');
|
||||
if (!fs.existsSync(configPath)) {
|
||||
return null;
|
||||
}
|
||||
const raw = fs.readFileSync(configPath, 'utf-8');
|
||||
const parsed = JSON.parse(raw);
|
||||
// CLI config stores token as oauth_token (newer) or token (older)
|
||||
return parsed.oauth_token || parsed.token || null;
|
||||
} catch (err) {
|
||||
console.warn('[ClaudeProvider] Failed to read CLI config token:', err?.message);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
ensureAuthEnv() {
|
||||
// If API key or token already present, keep as-is.
|
||||
if (process.env.ANTHROPIC_API_KEY || process.env.CLAUDE_CODE_OAUTH_TOKEN) {
|
||||
console.log('[ClaudeProvider] Auth already present in environment');
|
||||
return true;
|
||||
}
|
||||
// Try to hydrate from CLI login config
|
||||
const token = this.loadTokenFromCliConfig();
|
||||
if (token) {
|
||||
process.env.CLAUDE_CODE_OAUTH_TOKEN = token;
|
||||
console.log('[ClaudeProvider] Loaded CLAUDE_CODE_OAUTH_TOKEN from ~/.claude/config.json');
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check if CLI is installed but not logged in
|
||||
try {
|
||||
const claudeCliDetector = require('./claude-cli-detector');
|
||||
const detection = claudeCliDetector.detectClaudeInstallation();
|
||||
if (detection.installed && detection.method === 'cli') {
|
||||
console.error('[ClaudeProvider] Claude CLI is installed but not logged in. Run `claude login` to authenticate.');
|
||||
} else {
|
||||
console.error('[ClaudeProvider] No Anthropic auth found (env empty, ~/.claude/config.json missing token)');
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('[ClaudeProvider] No Anthropic auth found (env empty, ~/.claude/config.json missing token)');
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Lazily load the Claude SDK
|
||||
*/
|
||||
loadSdk() {
|
||||
if (!this.sdk) {
|
||||
this.sdk = require('@anthropic-ai/claude-agent-sdk');
|
||||
}
|
||||
return this.sdk;
|
||||
}
|
||||
|
||||
async *executeQuery(options) {
|
||||
// Ensure we have auth; fall back to CLI login token if available.
|
||||
if (!this.ensureAuthEnv()) {
|
||||
// Check if CLI is installed to provide better error message
|
||||
let msg = 'Missing Anthropic auth. Set ANTHROPIC_API_KEY or CLAUDE_CODE_OAUTH_TOKEN environment variable.';
|
||||
try {
|
||||
const claudeCliDetector = require('./claude-cli-detector');
|
||||
const detection = claudeCliDetector.detectClaudeInstallation();
|
||||
if (detection.installed && detection.method === 'cli') {
|
||||
msg = 'Claude CLI is installed but not authenticated. Run `claude login` to authenticate, or set ANTHROPIC_API_KEY or CLAUDE_CODE_OAUTH_TOKEN environment variable.';
|
||||
} else {
|
||||
msg = 'Missing Anthropic auth. Set ANTHROPIC_API_KEY or CLAUDE_CODE_OAUTH_TOKEN, or install Claude CLI and run `claude login`.';
|
||||
}
|
||||
} catch (err) {
|
||||
// Fallback to default message
|
||||
}
|
||||
console.error(`[ClaudeProvider] ${msg}`);
|
||||
yield { type: 'error', error: msg };
|
||||
return;
|
||||
}
|
||||
|
||||
const { query } = this.loadSdk();
|
||||
|
||||
const sdkOptions = {
|
||||
model: options.model,
|
||||
systemPrompt: options.systemPrompt,
|
||||
maxTurns: options.maxTurns || 1000,
|
||||
cwd: options.cwd,
|
||||
mcpServers: options.mcpServers,
|
||||
allowedTools: options.allowedTools,
|
||||
permissionMode: options.permissionMode || 'acceptEdits',
|
||||
sandbox: options.sandbox,
|
||||
abortController: options.abortController,
|
||||
};
|
||||
|
||||
// Add thinking configuration if enabled
|
||||
if (options.thinking) {
|
||||
sdkOptions.thinking = options.thinking;
|
||||
}
|
||||
|
||||
const currentQuery = query({ prompt: options.prompt, options: sdkOptions });
|
||||
|
||||
for await (const msg of currentQuery) {
|
||||
yield msg;
|
||||
}
|
||||
}
|
||||
|
||||
async detectInstallation() {
|
||||
const claudeCliDetector = require('./claude-cli-detector');
|
||||
return claudeCliDetector.getInstallationInfo();
|
||||
}
|
||||
|
||||
getAvailableModels() {
|
||||
return [
|
||||
{
|
||||
id: 'haiku',
|
||||
name: 'Claude Haiku',
|
||||
modelString: 'claude-haiku-4-5',
|
||||
provider: 'claude',
|
||||
description: 'Fast and efficient for simple tasks',
|
||||
tier: 'basic'
|
||||
},
|
||||
{
|
||||
id: 'sonnet',
|
||||
name: 'Claude Sonnet',
|
||||
modelString: 'claude-sonnet-4-20250514',
|
||||
provider: 'claude',
|
||||
description: 'Balanced performance and capabilities',
|
||||
tier: 'standard'
|
||||
},
|
||||
{
|
||||
id: 'opus',
|
||||
name: 'Claude Opus 4.5',
|
||||
modelString: 'claude-opus-4-5-20251101',
|
||||
provider: 'claude',
|
||||
description: 'Most capable model for complex tasks',
|
||||
tier: 'premium'
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
validateConfig() {
|
||||
const errors = [];
|
||||
|
||||
// Ensure auth is available (try to auto-load from CLI config)
|
||||
this.ensureAuthEnv();
|
||||
|
||||
if (!process.env.CLAUDE_CODE_OAUTH_TOKEN && !process.env.ANTHROPIC_API_KEY) {
|
||||
errors.push('No Claude authentication found. Set CLAUDE_CODE_OAUTH_TOKEN or ANTHROPIC_API_KEY, or run `claude login` to populate ~/.claude/config.json.');
|
||||
}
|
||||
|
||||
return {
|
||||
valid: errors.length === 0,
|
||||
errors
|
||||
};
|
||||
}
|
||||
|
||||
getModelString(modelKey) {
|
||||
const modelMap = {
|
||||
haiku: 'claude-haiku-4-5',
|
||||
sonnet: 'claude-sonnet-4-20250514',
|
||||
opus: 'claude-opus-4-5-20251101'
|
||||
};
|
||||
return modelMap[modelKey] || modelMap.opus;
|
||||
}
|
||||
|
||||
supportsFeature(feature) {
|
||||
const supportedFeatures = ['thinking', 'tools', 'streaming', 'mcp'];
|
||||
return supportedFeatures.includes(feature);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Codex Provider - Uses OpenAI Codex CLI
|
||||
*/
|
||||
class CodexProvider extends ModelProvider {
|
||||
constructor(config = {}) {
|
||||
super(config);
|
||||
this.name = 'codex';
|
||||
}
|
||||
|
||||
async *executeQuery(options) {
|
||||
const codexExecutor = require('./codex-executor');
|
||||
|
||||
// Validate that we're not receiving a Claude model string
|
||||
if (options.model && options.model.startsWith('claude-')) {
|
||||
const errorMsg = `Codex provider cannot use Claude model '${options.model}'. Codex only supports OpenAI models (gpt-5.1-codex-max, gpt-5.1-codex, gpt-5.1-codex-mini, gpt-5.1).`;
|
||||
console.error(`[CodexProvider] ${errorMsg}`);
|
||||
yield {
|
||||
type: 'error',
|
||||
error: errorMsg
|
||||
};
|
||||
return;
|
||||
}
|
||||
|
||||
const executeOptions = {
|
||||
prompt: options.prompt,
|
||||
model: options.model,
|
||||
cwd: options.cwd,
|
||||
systemPrompt: options.systemPrompt,
|
||||
maxTurns: options.maxTurns || 20,
|
||||
allowedTools: options.allowedTools,
|
||||
mcpServers: options.mcpServers, // Pass MCP servers config to executor
|
||||
env: {
|
||||
...process.env,
|
||||
OPENAI_API_KEY: process.env.OPENAI_API_KEY
|
||||
}
|
||||
};
|
||||
|
||||
// Execute and yield results
|
||||
const generator = codexExecutor.execute(executeOptions);
|
||||
for await (const msg of generator) {
|
||||
yield msg;
|
||||
}
|
||||
}
|
||||
|
||||
async detectInstallation() {
|
||||
const codexCliDetector = require('./codex-cli-detector');
|
||||
return codexCliDetector.getInstallationInfo();
|
||||
}
|
||||
|
||||
getAvailableModels() {
|
||||
return [
|
||||
{
|
||||
id: 'gpt-5.1-codex-max',
|
||||
name: 'GPT-5.1 Codex Max',
|
||||
modelString: 'gpt-5.1-codex-max',
|
||||
provider: 'codex',
|
||||
description: 'Latest flagship - deep and fast reasoning for coding',
|
||||
tier: 'premium',
|
||||
default: true
|
||||
},
|
||||
{
|
||||
id: 'gpt-5.1-codex',
|
||||
name: 'GPT-5.1 Codex',
|
||||
modelString: 'gpt-5.1-codex',
|
||||
provider: 'codex',
|
||||
description: 'Optimized for code generation',
|
||||
tier: 'standard'
|
||||
},
|
||||
{
|
||||
id: 'gpt-5.1-codex-mini',
|
||||
name: 'GPT-5.1 Codex Mini',
|
||||
modelString: 'gpt-5.1-codex-mini',
|
||||
provider: 'codex',
|
||||
description: 'Faster and cheaper option',
|
||||
tier: 'basic'
|
||||
},
|
||||
{
|
||||
id: 'gpt-5.1',
|
||||
name: 'GPT-5.1',
|
||||
modelString: 'gpt-5.1',
|
||||
provider: 'codex',
|
||||
description: 'Broad world knowledge with strong reasoning',
|
||||
tier: 'standard'
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
validateConfig() {
|
||||
const errors = [];
|
||||
const codexCliDetector = require('./codex-cli-detector');
|
||||
const installation = codexCliDetector.detectCodexInstallation();
|
||||
|
||||
if (!installation.installed && !process.env.OPENAI_API_KEY) {
|
||||
errors.push('Codex CLI not installed and no OPENAI_API_KEY found.');
|
||||
}
|
||||
|
||||
return {
|
||||
valid: errors.length === 0,
|
||||
errors
|
||||
};
|
||||
}
|
||||
|
||||
getModelString(modelKey) {
|
||||
// Codex models use the key directly as the model string
|
||||
const modelMap = {
|
||||
'gpt-5.1-codex-max': 'gpt-5.1-codex-max',
|
||||
'gpt-5.1-codex': 'gpt-5.1-codex',
|
||||
'gpt-5.1-codex-mini': 'gpt-5.1-codex-mini',
|
||||
'gpt-5.1': 'gpt-5.1'
|
||||
};
|
||||
return modelMap[modelKey] || 'gpt-5.1-codex-max';
|
||||
}
|
||||
|
||||
supportsFeature(feature) {
|
||||
const supportedFeatures = ['tools', 'streaming'];
|
||||
return supportedFeatures.includes(feature);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Model Provider Factory
|
||||
* Creates the appropriate provider based on model or provider name
|
||||
*/
|
||||
class ModelProviderFactory {
|
||||
static providers = {
|
||||
claude: ClaudeProvider,
|
||||
codex: CodexProvider
|
||||
};
|
||||
|
||||
/**
|
||||
* Get provider for a specific model
|
||||
* @param {string} modelId Model ID (e.g., 'opus', 'gpt-5.1-codex')
|
||||
* @returns {ModelProvider} Provider instance
|
||||
*/
|
||||
static getProviderForModel(modelId) {
|
||||
// Check if it's a Claude model
|
||||
const claudeModels = ['haiku', 'sonnet', 'opus'];
|
||||
if (claudeModels.includes(modelId)) {
|
||||
return new ClaudeProvider();
|
||||
}
|
||||
|
||||
// Check if it's a Codex/OpenAI model
|
||||
const codexModels = [
|
||||
'gpt-5.1-codex-max', 'gpt-5.1-codex', 'gpt-5.1-codex-mini', 'gpt-5.1'
|
||||
];
|
||||
if (codexModels.includes(modelId)) {
|
||||
return new CodexProvider();
|
||||
}
|
||||
|
||||
// Default to Claude
|
||||
return new ClaudeProvider();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get provider by name
|
||||
* @param {string} providerName Provider name ('claude' or 'codex')
|
||||
* @returns {ModelProvider} Provider instance
|
||||
*/
|
||||
static getProvider(providerName) {
|
||||
const ProviderClass = this.providers[providerName];
|
||||
if (!ProviderClass) {
|
||||
throw new Error(`Unknown provider: ${providerName}`);
|
||||
}
|
||||
return new ProviderClass();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all available providers
|
||||
* @returns {string[]} List of provider names
|
||||
*/
|
||||
static getAvailableProviders() {
|
||||
return Object.keys(this.providers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all available models across all providers
|
||||
* @returns {Array<Object>} All available models
|
||||
*/
|
||||
static getAllModels() {
|
||||
const allModels = [];
|
||||
for (const providerName of this.getAvailableProviders()) {
|
||||
const provider = this.getProvider(providerName);
|
||||
const models = provider.getAvailableModels();
|
||||
allModels.push(...models);
|
||||
}
|
||||
return allModels;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check installation status for all providers
|
||||
* @returns {Promise<Object>} Installation status for each provider
|
||||
*/
|
||||
static async checkAllProviders() {
|
||||
const status = {};
|
||||
for (const providerName of this.getAvailableProviders()) {
|
||||
const provider = this.getProvider(providerName);
|
||||
status[providerName] = await provider.detectInstallation();
|
||||
}
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
ModelProvider,
|
||||
ClaudeProvider,
|
||||
CodexProvider,
|
||||
ModelProviderFactory
|
||||
};
|
||||
320
app/electron/services/model-registry.js
Normal file
320
app/electron/services/model-registry.js
Normal file
@@ -0,0 +1,320 @@
|
||||
/**
|
||||
* Model Registry - Centralized model definitions and metadata
|
||||
*
|
||||
* This module provides a central registry of all available models
|
||||
* across different providers (Claude, Codex/OpenAI).
|
||||
*/
|
||||
|
||||
/**
|
||||
* Model Categories
|
||||
*/
|
||||
const MODEL_CATEGORIES = {
|
||||
CLAUDE: 'claude',
|
||||
OPENAI: 'openai',
|
||||
CODEX: 'codex'
|
||||
};
|
||||
|
||||
/**
|
||||
* Model Tiers (capability levels)
|
||||
*/
|
||||
const MODEL_TIERS = {
|
||||
BASIC: 'basic', // Fast, cheap, simple tasks
|
||||
STANDARD: 'standard', // Balanced performance
|
||||
PREMIUM: 'premium' // Most capable, complex tasks
|
||||
};
|
||||
|
||||
const CODEX_MODEL_IDS = [
|
||||
'gpt-5.1-codex-max',
|
||||
'gpt-5.1-codex',
|
||||
'gpt-5.1-codex-mini',
|
||||
'gpt-5.1'
|
||||
];
|
||||
|
||||
/**
|
||||
* All available models with full metadata
|
||||
*/
|
||||
const MODELS = {
|
||||
// Claude Models
|
||||
haiku: {
|
||||
id: 'haiku',
|
||||
name: 'Claude Haiku',
|
||||
modelString: 'claude-haiku-4-5',
|
||||
provider: 'claude',
|
||||
category: MODEL_CATEGORIES.CLAUDE,
|
||||
tier: MODEL_TIERS.BASIC,
|
||||
description: 'Fast and efficient for simple tasks',
|
||||
capabilities: ['code', 'text', 'tools'],
|
||||
maxTokens: 8192,
|
||||
contextWindow: 200000,
|
||||
supportsThinking: true,
|
||||
requiresAuth: 'CLAUDE_CODE_OAUTH_TOKEN'
|
||||
},
|
||||
sonnet: {
|
||||
id: 'sonnet',
|
||||
name: 'Claude Sonnet',
|
||||
modelString: 'claude-sonnet-4-20250514',
|
||||
provider: 'claude',
|
||||
category: MODEL_CATEGORIES.CLAUDE,
|
||||
tier: MODEL_TIERS.STANDARD,
|
||||
description: 'Balanced performance and capabilities',
|
||||
capabilities: ['code', 'text', 'tools', 'analysis'],
|
||||
maxTokens: 8192,
|
||||
contextWindow: 200000,
|
||||
supportsThinking: true,
|
||||
requiresAuth: 'CLAUDE_CODE_OAUTH_TOKEN'
|
||||
},
|
||||
opus: {
|
||||
id: 'opus',
|
||||
name: 'Claude Opus 4.5',
|
||||
modelString: 'claude-opus-4-5-20251101',
|
||||
provider: 'claude',
|
||||
category: MODEL_CATEGORIES.CLAUDE,
|
||||
tier: MODEL_TIERS.PREMIUM,
|
||||
description: 'Most capable model for complex tasks',
|
||||
capabilities: ['code', 'text', 'tools', 'analysis', 'reasoning'],
|
||||
maxTokens: 8192,
|
||||
contextWindow: 200000,
|
||||
supportsThinking: true,
|
||||
requiresAuth: 'CLAUDE_CODE_OAUTH_TOKEN',
|
||||
default: true
|
||||
},
|
||||
|
||||
// OpenAI GPT-5.1 Codex Models
|
||||
'gpt-5.1-codex-max': {
|
||||
id: 'gpt-5.1-codex-max',
|
||||
name: 'GPT-5.1 Codex Max',
|
||||
modelString: 'gpt-5.1-codex-max',
|
||||
provider: 'codex',
|
||||
category: MODEL_CATEGORIES.OPENAI,
|
||||
tier: MODEL_TIERS.PREMIUM,
|
||||
description: 'Latest flagship - deep and fast reasoning for coding',
|
||||
capabilities: ['code', 'text', 'tools', 'reasoning'],
|
||||
maxTokens: 32768,
|
||||
contextWindow: 128000,
|
||||
supportsThinking: false,
|
||||
requiresAuth: 'OPENAI_API_KEY',
|
||||
codexDefault: true
|
||||
},
|
||||
'gpt-5.1-codex': {
|
||||
id: 'gpt-5.1-codex',
|
||||
name: 'GPT-5.1 Codex',
|
||||
modelString: 'gpt-5.1-codex',
|
||||
provider: 'codex',
|
||||
category: MODEL_CATEGORIES.OPENAI,
|
||||
tier: MODEL_TIERS.STANDARD,
|
||||
description: 'Optimized for code generation',
|
||||
capabilities: ['code', 'text', 'tools'],
|
||||
maxTokens: 32768,
|
||||
contextWindow: 128000,
|
||||
supportsThinking: false,
|
||||
requiresAuth: 'OPENAI_API_KEY'
|
||||
},
|
||||
'gpt-5.1-codex-mini': {
|
||||
id: 'gpt-5.1-codex-mini',
|
||||
name: 'GPT-5.1 Codex Mini',
|
||||
modelString: 'gpt-5.1-codex-mini',
|
||||
provider: 'codex',
|
||||
category: MODEL_CATEGORIES.OPENAI,
|
||||
tier: MODEL_TIERS.BASIC,
|
||||
description: 'Faster and cheaper option',
|
||||
capabilities: ['code', 'text'],
|
||||
maxTokens: 16384,
|
||||
contextWindow: 128000,
|
||||
supportsThinking: false,
|
||||
requiresAuth: 'OPENAI_API_KEY'
|
||||
},
|
||||
'gpt-5.1': {
|
||||
id: 'gpt-5.1',
|
||||
name: 'GPT-5.1',
|
||||
modelString: 'gpt-5.1',
|
||||
provider: 'codex',
|
||||
category: MODEL_CATEGORIES.OPENAI,
|
||||
tier: MODEL_TIERS.STANDARD,
|
||||
description: 'Broad world knowledge with strong reasoning',
|
||||
capabilities: ['code', 'text', 'reasoning'],
|
||||
maxTokens: 32768,
|
||||
contextWindow: 128000,
|
||||
supportsThinking: false,
|
||||
requiresAuth: 'OPENAI_API_KEY'
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Model Registry class for querying and managing models
|
||||
*/
|
||||
class ModelRegistry {
|
||||
/**
|
||||
* Get all registered models
|
||||
* @returns {Object} All models
|
||||
*/
|
||||
static getAllModels() {
|
||||
return MODELS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get model by ID
|
||||
* @param {string} modelId Model ID
|
||||
* @returns {Object|null} Model definition or null
|
||||
*/
|
||||
static getModel(modelId) {
|
||||
return MODELS[modelId] || null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get models by provider
|
||||
* @param {string} provider Provider name ('claude' or 'codex')
|
||||
* @returns {Object[]} Array of models for the provider
|
||||
*/
|
||||
static getModelsByProvider(provider) {
|
||||
return Object.values(MODELS).filter(m => m.provider === provider);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get models by category
|
||||
* @param {string} category Category name
|
||||
* @returns {Object[]} Array of models in the category
|
||||
*/
|
||||
static getModelsByCategory(category) {
|
||||
return Object.values(MODELS).filter(m => m.category === category);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get models by tier
|
||||
* @param {string} tier Tier name
|
||||
* @returns {Object[]} Array of models in the tier
|
||||
*/
|
||||
static getModelsByTier(tier) {
|
||||
return Object.values(MODELS).filter(m => m.tier === tier);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get default model for a provider
|
||||
* @param {string} provider Provider name
|
||||
* @returns {Object|null} Default model or null
|
||||
*/
|
||||
static getDefaultModel(provider = 'claude') {
|
||||
const models = this.getModelsByProvider(provider);
|
||||
if (provider === 'claude') {
|
||||
return models.find(m => m.default) || models[0];
|
||||
}
|
||||
if (provider === 'codex') {
|
||||
return models.find(m => m.codexDefault) || models[0];
|
||||
}
|
||||
return models[0];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get model string (full model name) for a model ID
|
||||
* @param {string} modelId Model ID
|
||||
* @returns {string} Full model string
|
||||
*/
|
||||
static getModelString(modelId) {
|
||||
const model = this.getModel(modelId);
|
||||
return model ? model.modelString : modelId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine provider for a model ID
|
||||
* @param {string} modelId Model ID
|
||||
* @returns {string} Provider name ('claude' or 'codex')
|
||||
*/
|
||||
static getProviderForModel(modelId) {
|
||||
const model = this.getModel(modelId);
|
||||
if (model) {
|
||||
return model.provider;
|
||||
}
|
||||
|
||||
// Fallback detection for models not explicitly registered (keeps legacy Codex IDs working)
|
||||
if (CODEX_MODEL_IDS.includes(modelId)) {
|
||||
return 'codex';
|
||||
}
|
||||
|
||||
return 'claude';
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a model is a Claude model
|
||||
* @param {string} modelId Model ID
|
||||
* @returns {boolean} Whether it's a Claude model
|
||||
*/
|
||||
static isClaudeModel(modelId) {
|
||||
return this.getProviderForModel(modelId) === 'claude';
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a model is a Codex/OpenAI model
|
||||
* @param {string} modelId Model ID
|
||||
* @returns {boolean} Whether it's a Codex model
|
||||
*/
|
||||
static isCodexModel(modelId) {
|
||||
return this.getProviderForModel(modelId) === 'codex';
|
||||
}
|
||||
|
||||
/**
|
||||
* Get models grouped by provider for UI display
|
||||
* @returns {Object} Models grouped by provider
|
||||
*/
|
||||
static getModelsGroupedByProvider() {
|
||||
return {
|
||||
claude: this.getModelsByProvider('claude'),
|
||||
codex: this.getModelsByProvider('codex')
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all model IDs as an array
|
||||
* @returns {string[]} Array of model IDs
|
||||
*/
|
||||
static getAllModelIds() {
|
||||
return Object.keys(MODELS);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if model supports a specific capability
|
||||
* @param {string} modelId Model ID
|
||||
* @param {string} capability Capability name
|
||||
* @returns {boolean} Whether the model supports the capability
|
||||
*/
|
||||
static modelSupportsCapability(modelId, capability) {
|
||||
const model = this.getModel(modelId);
|
||||
return model ? model.capabilities.includes(capability) : false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if model supports extended thinking
|
||||
* @param {string} modelId Model ID
|
||||
* @returns {boolean} Whether the model supports thinking
|
||||
*/
|
||||
static modelSupportsThinking(modelId) {
|
||||
const model = this.getModel(modelId);
|
||||
return model ? model.supportsThinking : false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get required authentication for a model
|
||||
* @param {string} modelId Model ID
|
||||
* @returns {string|null} Required auth env variable name
|
||||
*/
|
||||
static getRequiredAuth(modelId) {
|
||||
const model = this.getModel(modelId);
|
||||
return model ? model.requiresAuth : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if authentication is available for a model
|
||||
* @param {string} modelId Model ID
|
||||
* @returns {boolean} Whether auth is available
|
||||
*/
|
||||
static hasAuthForModel(modelId) {
|
||||
const authVar = this.getRequiredAuth(modelId);
|
||||
if (!authVar) return false;
|
||||
return !!process.env[authVar];
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
MODEL_CATEGORIES,
|
||||
MODEL_TIERS,
|
||||
MODELS,
|
||||
ModelRegistry
|
||||
};
|
||||
576
app/electron/services/worktree-manager.js
Normal file
576
app/electron/services/worktree-manager.js
Normal file
@@ -0,0 +1,576 @@
|
||||
const path = require("path");
|
||||
const fs = require("fs/promises");
|
||||
const { exec, spawn } = require("child_process");
|
||||
const { promisify } = require("util");
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
|
||||
/**
|
||||
* Worktree Manager - Handles git worktrees for feature isolation
|
||||
*
|
||||
* This service creates isolated git worktrees for each feature, allowing:
|
||||
* - Features to be worked on in isolation without affecting the main branch
|
||||
* - Easy rollback/revert by simply deleting the worktree
|
||||
* - Checkpointing - user can see changes in the worktree before merging
|
||||
*/
|
||||
class WorktreeManager {
|
||||
constructor() {
|
||||
// Cache for worktree info
|
||||
this.worktreeCache = new Map();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the base worktree directory path
|
||||
*/
|
||||
getWorktreeBasePath(projectPath) {
|
||||
return path.join(projectPath, ".automaker", "worktrees");
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a safe branch name from feature description
|
||||
*/
|
||||
generateBranchName(feature) {
|
||||
// Create a slug from the description
|
||||
const slug = feature.description
|
||||
.toLowerCase()
|
||||
.replace(/[^a-z0-9\s-]/g, "") // Remove special chars
|
||||
.replace(/\s+/g, "-") // Replace spaces with hyphens
|
||||
.substring(0, 40); // Limit length
|
||||
|
||||
// Add feature ID for uniqueness
|
||||
const shortId = feature.id.replace("feature-", "").substring(0, 12);
|
||||
return `feature/${shortId}-${slug}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the project is a git repository
|
||||
*/
|
||||
async isGitRepo(projectPath) {
|
||||
try {
|
||||
await execAsync("git rev-parse --is-inside-work-tree", { cwd: projectPath });
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current branch name
|
||||
*/
|
||||
async getCurrentBranch(projectPath) {
|
||||
try {
|
||||
const { stdout } = await execAsync("git rev-parse --abbrev-ref HEAD", { cwd: projectPath });
|
||||
return stdout.trim();
|
||||
} catch (error) {
|
||||
console.error("[WorktreeManager] Failed to get current branch:", error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a branch exists (local or remote)
|
||||
*/
|
||||
async branchExists(projectPath, branchName) {
|
||||
try {
|
||||
await execAsync(`git rev-parse --verify ${branchName}`, { cwd: projectPath });
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* List all existing worktrees
|
||||
*/
|
||||
async listWorktrees(projectPath) {
|
||||
try {
|
||||
const { stdout } = await execAsync("git worktree list --porcelain", { cwd: projectPath });
|
||||
const worktrees = [];
|
||||
const lines = stdout.split("\n");
|
||||
|
||||
let currentWorktree = null;
|
||||
for (const line of lines) {
|
||||
if (line.startsWith("worktree ")) {
|
||||
if (currentWorktree) {
|
||||
worktrees.push(currentWorktree);
|
||||
}
|
||||
currentWorktree = { path: line.replace("worktree ", "") };
|
||||
} else if (line.startsWith("branch ") && currentWorktree) {
|
||||
currentWorktree.branch = line.replace("branch refs/heads/", "");
|
||||
} else if (line.startsWith("HEAD ") && currentWorktree) {
|
||||
currentWorktree.head = line.replace("HEAD ", "");
|
||||
}
|
||||
}
|
||||
if (currentWorktree) {
|
||||
worktrees.push(currentWorktree);
|
||||
}
|
||||
|
||||
return worktrees;
|
||||
} catch (error) {
|
||||
console.error("[WorktreeManager] Failed to list worktrees:", error);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a worktree for a feature
|
||||
* @param {string} projectPath - Path to the main project
|
||||
* @param {object} feature - Feature object with id and description
|
||||
* @returns {object} - { success, worktreePath, branchName, error }
|
||||
*/
|
||||
async createWorktree(projectPath, feature) {
|
||||
console.log(`[WorktreeManager] Creating worktree for feature: ${feature.id}`);
|
||||
|
||||
// Check if project is a git repo
|
||||
if (!await this.isGitRepo(projectPath)) {
|
||||
return { success: false, error: "Project is not a git repository" };
|
||||
}
|
||||
|
||||
const branchName = this.generateBranchName(feature);
|
||||
const worktreeBasePath = this.getWorktreeBasePath(projectPath);
|
||||
const worktreePath = path.join(worktreeBasePath, branchName.replace("feature/", ""));
|
||||
|
||||
try {
|
||||
// Ensure worktree directory exists
|
||||
await fs.mkdir(worktreeBasePath, { recursive: true });
|
||||
|
||||
// Check if worktree already exists
|
||||
const worktrees = await this.listWorktrees(projectPath);
|
||||
const existingWorktree = worktrees.find(
|
||||
w => w.path === worktreePath || w.branch === branchName
|
||||
);
|
||||
|
||||
if (existingWorktree) {
|
||||
console.log(`[WorktreeManager] Worktree already exists for feature: ${feature.id}`);
|
||||
return {
|
||||
success: true,
|
||||
worktreePath: existingWorktree.path,
|
||||
branchName: existingWorktree.branch,
|
||||
existed: true,
|
||||
};
|
||||
}
|
||||
|
||||
// Get current branch to base the new branch on
|
||||
const baseBranch = await this.getCurrentBranch(projectPath);
|
||||
if (!baseBranch) {
|
||||
return { success: false, error: "Could not determine current branch" };
|
||||
}
|
||||
|
||||
// Check if branch already exists
|
||||
const branchExists = await this.branchExists(projectPath, branchName);
|
||||
|
||||
if (branchExists) {
|
||||
// Use existing branch
|
||||
console.log(`[WorktreeManager] Using existing branch: ${branchName}`);
|
||||
await execAsync(`git worktree add "${worktreePath}" ${branchName}`, { cwd: projectPath });
|
||||
} else {
|
||||
// Create new worktree with new branch
|
||||
console.log(`[WorktreeManager] Creating new branch: ${branchName} based on ${baseBranch}`);
|
||||
await execAsync(`git worktree add -b ${branchName} "${worktreePath}" ${baseBranch}`, { cwd: projectPath });
|
||||
}
|
||||
|
||||
// Copy .automaker directory to worktree (except worktrees directory itself to avoid recursion)
|
||||
const automakerSrc = path.join(projectPath, ".automaker");
|
||||
const automakerDst = path.join(worktreePath, ".automaker");
|
||||
|
||||
try {
|
||||
await fs.mkdir(automakerDst, { recursive: true });
|
||||
|
||||
// Copy feature_list.json
|
||||
const featureListSrc = path.join(automakerSrc, "feature_list.json");
|
||||
const featureListDst = path.join(automakerDst, "feature_list.json");
|
||||
try {
|
||||
const content = await fs.readFile(featureListSrc, "utf-8");
|
||||
await fs.writeFile(featureListDst, content, "utf-8");
|
||||
} catch {
|
||||
// Feature list might not exist yet
|
||||
}
|
||||
|
||||
// Copy app_spec.txt if it exists
|
||||
const appSpecSrc = path.join(automakerSrc, "app_spec.txt");
|
||||
const appSpecDst = path.join(automakerDst, "app_spec.txt");
|
||||
try {
|
||||
const content = await fs.readFile(appSpecSrc, "utf-8");
|
||||
await fs.writeFile(appSpecDst, content, "utf-8");
|
||||
} catch {
|
||||
// App spec might not exist yet
|
||||
}
|
||||
|
||||
// Copy categories.json if it exists
|
||||
const categoriesSrc = path.join(automakerSrc, "categories.json");
|
||||
const categoriesDst = path.join(automakerDst, "categories.json");
|
||||
try {
|
||||
const content = await fs.readFile(categoriesSrc, "utf-8");
|
||||
await fs.writeFile(categoriesDst, content, "utf-8");
|
||||
} catch {
|
||||
// Categories might not exist yet
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn("[WorktreeManager] Failed to copy .automaker directory:", error);
|
||||
}
|
||||
|
||||
// Store worktree info in cache
|
||||
this.worktreeCache.set(feature.id, {
|
||||
worktreePath,
|
||||
branchName,
|
||||
createdAt: new Date().toISOString(),
|
||||
baseBranch,
|
||||
});
|
||||
|
||||
console.log(`[WorktreeManager] Worktree created at: ${worktreePath}`);
|
||||
return {
|
||||
success: true,
|
||||
worktreePath,
|
||||
branchName,
|
||||
baseBranch,
|
||||
existed: false,
|
||||
};
|
||||
} catch (error) {
|
||||
console.error("[WorktreeManager] Failed to create worktree:", error);
|
||||
return { success: false, error: error.message };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get worktree info for a feature
|
||||
*/
|
||||
async getWorktreeInfo(projectPath, featureId) {
|
||||
// Check cache first
|
||||
if (this.worktreeCache.has(featureId)) {
|
||||
return { success: true, ...this.worktreeCache.get(featureId) };
|
||||
}
|
||||
|
||||
// Scan worktrees to find matching one
|
||||
const worktrees = await this.listWorktrees(projectPath);
|
||||
const worktreeBasePath = this.getWorktreeBasePath(projectPath);
|
||||
|
||||
for (const worktree of worktrees) {
|
||||
// Check if this worktree is in our worktree directory
|
||||
if (worktree.path.startsWith(worktreeBasePath)) {
|
||||
// Check if the feature ID is in the branch name
|
||||
const shortId = featureId.replace("feature-", "").substring(0, 12);
|
||||
if (worktree.branch && worktree.branch.includes(shortId)) {
|
||||
const info = {
|
||||
worktreePath: worktree.path,
|
||||
branchName: worktree.branch,
|
||||
head: worktree.head,
|
||||
};
|
||||
this.worktreeCache.set(featureId, info);
|
||||
return { success: true, ...info };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { success: false, error: "Worktree not found" };
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a worktree for a feature
|
||||
* This effectively reverts all changes made by the agent
|
||||
*/
|
||||
async removeWorktree(projectPath, featureId, deleteBranch = false) {
|
||||
console.log(`[WorktreeManager] Removing worktree for feature: ${featureId}`);
|
||||
|
||||
const worktreeInfo = await this.getWorktreeInfo(projectPath, featureId);
|
||||
if (!worktreeInfo.success) {
|
||||
console.log(`[WorktreeManager] No worktree found for feature: ${featureId}`);
|
||||
return { success: true, message: "No worktree to remove" };
|
||||
}
|
||||
|
||||
const { worktreePath, branchName } = worktreeInfo;
|
||||
|
||||
try {
|
||||
// Remove the worktree
|
||||
await execAsync(`git worktree remove "${worktreePath}" --force`, { cwd: projectPath });
|
||||
console.log(`[WorktreeManager] Worktree removed: ${worktreePath}`);
|
||||
|
||||
// Optionally delete the branch too
|
||||
if (deleteBranch && branchName) {
|
||||
try {
|
||||
await execAsync(`git branch -D ${branchName}`, { cwd: projectPath });
|
||||
console.log(`[WorktreeManager] Branch deleted: ${branchName}`);
|
||||
} catch (error) {
|
||||
console.warn(`[WorktreeManager] Could not delete branch ${branchName}:`, error.message);
|
||||
}
|
||||
}
|
||||
|
||||
// Remove from cache
|
||||
this.worktreeCache.delete(featureId);
|
||||
|
||||
return { success: true, removedPath: worktreePath, removedBranch: deleteBranch ? branchName : null };
|
||||
} catch (error) {
|
||||
console.error("[WorktreeManager] Failed to remove worktree:", error);
|
||||
return { success: false, error: error.message };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get status of changes in a worktree
|
||||
*/
|
||||
async getWorktreeStatus(worktreePath) {
|
||||
try {
|
||||
const { stdout: statusOutput } = await execAsync("git status --porcelain", { cwd: worktreePath });
|
||||
const { stdout: diffStat } = await execAsync("git diff --stat", { cwd: worktreePath });
|
||||
const { stdout: commitLog } = await execAsync("git log --oneline -10", { cwd: worktreePath });
|
||||
|
||||
const files = statusOutput.trim().split("\n").filter(Boolean);
|
||||
const commits = commitLog.trim().split("\n").filter(Boolean);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
modifiedFiles: files.length,
|
||||
files: files.slice(0, 20), // Limit to 20 files
|
||||
diffStat: diffStat.trim(),
|
||||
recentCommits: commits.slice(0, 5), // Last 5 commits
|
||||
};
|
||||
} catch (error) {
|
||||
console.error("[WorktreeManager] Failed to get worktree status:", error);
|
||||
return { success: false, error: error.message };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get detailed file diff content for a worktree
|
||||
* Returns unified diff format for all changes
|
||||
*/
|
||||
async getFileDiffs(worktreePath) {
|
||||
try {
|
||||
// Get both staged and unstaged diffs
|
||||
const { stdout: unstagedDiff } = await execAsync("git diff --no-color", {
|
||||
cwd: worktreePath,
|
||||
maxBuffer: 10 * 1024 * 1024 // 10MB buffer for large diffs
|
||||
});
|
||||
const { stdout: stagedDiff } = await execAsync("git diff --cached --no-color", {
|
||||
cwd: worktreePath,
|
||||
maxBuffer: 10 * 1024 * 1024
|
||||
});
|
||||
|
||||
// Get list of files with their status
|
||||
const { stdout: statusOutput } = await execAsync("git status --porcelain", { cwd: worktreePath });
|
||||
const files = statusOutput.trim().split("\n").filter(Boolean);
|
||||
|
||||
// Parse file statuses
|
||||
const fileStatuses = files.map(line => {
|
||||
const status = line.substring(0, 2);
|
||||
const filePath = line.substring(3);
|
||||
return {
|
||||
status: status.trim() || 'M',
|
||||
path: filePath,
|
||||
statusText: this.getStatusText(status)
|
||||
};
|
||||
});
|
||||
|
||||
// Combine diffs
|
||||
const combinedDiff = [stagedDiff, unstagedDiff].filter(Boolean).join("\n");
|
||||
|
||||
return {
|
||||
success: true,
|
||||
diff: combinedDiff,
|
||||
files: fileStatuses,
|
||||
hasChanges: files.length > 0
|
||||
};
|
||||
} catch (error) {
|
||||
console.error("[WorktreeManager] Failed to get file diffs:", error);
|
||||
return { success: false, error: error.message };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get human-readable status text from git status code
|
||||
*/
|
||||
getStatusText(status) {
|
||||
const statusMap = {
|
||||
'M': 'Modified',
|
||||
'A': 'Added',
|
||||
'D': 'Deleted',
|
||||
'R': 'Renamed',
|
||||
'C': 'Copied',
|
||||
'U': 'Updated',
|
||||
'?': 'Untracked',
|
||||
'!': 'Ignored'
|
||||
};
|
||||
const firstChar = status.charAt(0);
|
||||
const secondChar = status.charAt(1);
|
||||
return statusMap[firstChar] || statusMap[secondChar] || 'Changed';
|
||||
}
|
||||
|
||||
/**
|
||||
* Get diff for a specific file in a worktree
|
||||
*/
|
||||
async getFileDiff(worktreePath, filePath) {
|
||||
try {
|
||||
// Try to get unstaged diff first, then staged if no unstaged changes
|
||||
let diff = '';
|
||||
try {
|
||||
const { stdout } = await execAsync(`git diff --no-color -- "${filePath}"`, {
|
||||
cwd: worktreePath,
|
||||
maxBuffer: 5 * 1024 * 1024
|
||||
});
|
||||
diff = stdout;
|
||||
} catch {
|
||||
// File might be staged
|
||||
}
|
||||
|
||||
if (!diff) {
|
||||
try {
|
||||
const { stdout } = await execAsync(`git diff --cached --no-color -- "${filePath}"`, {
|
||||
cwd: worktreePath,
|
||||
maxBuffer: 5 * 1024 * 1024
|
||||
});
|
||||
diff = stdout;
|
||||
} catch {
|
||||
// File might be untracked, show the content
|
||||
}
|
||||
}
|
||||
|
||||
// If still no diff, might be an untracked file - show the content
|
||||
if (!diff) {
|
||||
try {
|
||||
const fullPath = path.join(worktreePath, filePath);
|
||||
const content = await fs.readFile(fullPath, 'utf-8');
|
||||
diff = `+++ ${filePath} (new file)\n${content.split('\n').map(l => '+' + l).join('\n')}`;
|
||||
} catch {
|
||||
diff = '(Unable to read file content)';
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
diff,
|
||||
filePath
|
||||
};
|
||||
} catch (error) {
|
||||
console.error(`[WorktreeManager] Failed to get diff for ${filePath}:`, error);
|
||||
return { success: false, error: error.message };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge worktree changes back to the main branch
|
||||
*/
|
||||
async mergeWorktree(projectPath, featureId, options = {}) {
|
||||
console.log(`[WorktreeManager] Merging worktree for feature: ${featureId}`);
|
||||
|
||||
const worktreeInfo = await this.getWorktreeInfo(projectPath, featureId);
|
||||
if (!worktreeInfo.success) {
|
||||
return { success: false, error: "Worktree not found" };
|
||||
}
|
||||
|
||||
const { branchName, worktreePath } = worktreeInfo;
|
||||
const baseBranch = await this.getCurrentBranch(projectPath);
|
||||
|
||||
try {
|
||||
// First commit any uncommitted changes in the worktree
|
||||
const { stdout: status } = await execAsync("git status --porcelain", { cwd: worktreePath });
|
||||
if (status.trim()) {
|
||||
// There are uncommitted changes - commit them
|
||||
await execAsync("git add -A", { cwd: worktreePath });
|
||||
const commitMsg = options.commitMessage || `feat: complete ${featureId}`;
|
||||
await execAsync(`git commit -m "${commitMsg}"`, { cwd: worktreePath });
|
||||
}
|
||||
|
||||
// Merge the feature branch into the current branch in the main repo
|
||||
if (options.squash) {
|
||||
await execAsync(`git merge --squash ${branchName}`, { cwd: projectPath });
|
||||
const squashMsg = options.squashMessage || `feat: ${featureId} - squashed merge`;
|
||||
await execAsync(`git commit -m "${squashMsg}"`, { cwd: projectPath });
|
||||
} else {
|
||||
await execAsync(`git merge ${branchName} --no-ff -m "Merge ${branchName}"`, { cwd: projectPath });
|
||||
}
|
||||
|
||||
console.log(`[WorktreeManager] Successfully merged ${branchName} into ${baseBranch}`);
|
||||
|
||||
// Optionally cleanup worktree after merge
|
||||
if (options.cleanup) {
|
||||
await this.removeWorktree(projectPath, featureId, true);
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
mergedBranch: branchName,
|
||||
intoBranch: baseBranch,
|
||||
};
|
||||
} catch (error) {
|
||||
console.error("[WorktreeManager] Failed to merge worktree:", error);
|
||||
return { success: false, error: error.message };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sync changes from main branch to worktree (rebase or merge)
|
||||
*/
|
||||
async syncWorktree(projectPath, featureId, method = "rebase") {
|
||||
console.log(`[WorktreeManager] Syncing worktree for feature: ${featureId}`);
|
||||
|
||||
const worktreeInfo = await this.getWorktreeInfo(projectPath, featureId);
|
||||
if (!worktreeInfo.success) {
|
||||
return { success: false, error: "Worktree not found" };
|
||||
}
|
||||
|
||||
const { worktreePath, baseBranch } = worktreeInfo;
|
||||
|
||||
try {
|
||||
if (method === "rebase") {
|
||||
await execAsync(`git rebase ${baseBranch}`, { cwd: worktreePath });
|
||||
} else {
|
||||
await execAsync(`git merge ${baseBranch}`, { cwd: worktreePath });
|
||||
}
|
||||
|
||||
return { success: true, method };
|
||||
} catch (error) {
|
||||
console.error("[WorktreeManager] Failed to sync worktree:", error);
|
||||
return { success: false, error: error.message };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get list of all feature worktrees
|
||||
*/
|
||||
async getAllFeatureWorktrees(projectPath) {
|
||||
const worktrees = await this.listWorktrees(projectPath);
|
||||
const worktreeBasePath = this.getWorktreeBasePath(projectPath);
|
||||
|
||||
return worktrees.filter(w =>
|
||||
w.path.startsWith(worktreeBasePath) &&
|
||||
w.branch &&
|
||||
w.branch.startsWith("feature/")
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleanup orphaned worktrees (worktrees without matching features)
|
||||
*/
|
||||
async cleanupOrphanedWorktrees(projectPath, activeFeatureIds) {
|
||||
console.log("[WorktreeManager] Cleaning up orphaned worktrees...");
|
||||
|
||||
const worktrees = await this.getAllFeatureWorktrees(projectPath);
|
||||
const cleaned = [];
|
||||
|
||||
for (const worktree of worktrees) {
|
||||
// Extract feature ID from branch name
|
||||
const branchParts = worktree.branch.replace("feature/", "").split("-");
|
||||
const shortId = branchParts[0];
|
||||
|
||||
// Check if any active feature has this short ID
|
||||
const hasMatchingFeature = activeFeatureIds.some(id => {
|
||||
const featureShortId = id.replace("feature-", "").substring(0, 12);
|
||||
return featureShortId === shortId;
|
||||
});
|
||||
|
||||
if (!hasMatchingFeature) {
|
||||
console.log(`[WorktreeManager] Removing orphaned worktree: ${worktree.path}`);
|
||||
try {
|
||||
await execAsync(`git worktree remove "${worktree.path}" --force`, { cwd: projectPath });
|
||||
await execAsync(`git branch -D ${worktree.branch}`, { cwd: projectPath });
|
||||
cleaned.push(worktree.path);
|
||||
} catch (error) {
|
||||
console.warn(`[WorktreeManager] Failed to cleanup worktree ${worktree.path}:`, error.message);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { success: true, cleaned };
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = new WorktreeManager();
|
||||
Reference in New Issue
Block a user