mirror of
https://github.com/AutoMaker-Org/automaker.git
synced 2026-01-31 06:42:03 +00:00
Merge branch 'v0.9.0rc' into feat/subagents-skills
This commit is contained in:
@@ -53,6 +53,8 @@ import { SettingsService } from './services/settings-service.js';
|
||||
import { createSpecRegenerationRoutes } from './routes/app-spec/index.js';
|
||||
import { createClaudeRoutes } from './routes/claude/index.js';
|
||||
import { ClaudeUsageService } from './services/claude-usage-service.js';
|
||||
import { createCodexRoutes } from './routes/codex/index.js';
|
||||
import { CodexUsageService } from './services/codex-usage-service.js';
|
||||
import { createGitHubRoutes } from './routes/github/index.js';
|
||||
import { createContextRoutes } from './routes/context/index.js';
|
||||
import { createBacklogPlanRoutes } from './routes/backlog-plan/index.js';
|
||||
@@ -166,6 +168,7 @@ const agentService = new AgentService(DATA_DIR, events, settingsService);
|
||||
const featureLoader = new FeatureLoader();
|
||||
const autoModeService = new AutoModeService(events, settingsService);
|
||||
const claudeUsageService = new ClaudeUsageService();
|
||||
const codexUsageService = new CodexUsageService();
|
||||
const mcpTestService = new MCPTestService(settingsService);
|
||||
const ideationService = new IdeationService(events, settingsService, featureLoader);
|
||||
|
||||
@@ -188,9 +191,10 @@ setInterval(() => {
|
||||
// This helps prevent CSRF and content-type confusion attacks
|
||||
app.use('/api', requireJsonContentType);
|
||||
|
||||
// Mount API routes - health and auth are unauthenticated
|
||||
// Mount API routes - health, auth, and setup are unauthenticated
|
||||
app.use('/api/health', createHealthRoutes());
|
||||
app.use('/api/auth', createAuthRoutes());
|
||||
app.use('/api/setup', createSetupRoutes());
|
||||
|
||||
// Apply authentication to all other routes
|
||||
app.use('/api', authMiddleware);
|
||||
@@ -206,7 +210,6 @@ app.use('/api/auto-mode', createAutoModeRoutes(autoModeService));
|
||||
app.use('/api/enhance-prompt', createEnhancePromptRoutes(settingsService));
|
||||
app.use('/api/worktree', createWorktreeRoutes());
|
||||
app.use('/api/git', createGitRoutes());
|
||||
app.use('/api/setup', createSetupRoutes());
|
||||
app.use('/api/suggestions', createSuggestionsRoutes(events, settingsService));
|
||||
app.use('/api/models', createModelsRoutes());
|
||||
app.use('/api/spec-regeneration', createSpecRegenerationRoutes(events, settingsService));
|
||||
@@ -216,6 +219,7 @@ app.use('/api/templates', createTemplatesRoutes());
|
||||
app.use('/api/terminal', createTerminalRoutes());
|
||||
app.use('/api/settings', createSettingsRoutes(settingsService));
|
||||
app.use('/api/claude', createClaudeRoutes(claudeUsageService));
|
||||
app.use('/api/codex', createCodexRoutes(codexUsageService));
|
||||
app.use('/api/github', createGitHubRoutes(events, settingsService));
|
||||
app.use('/api/context', createContextRoutes(settingsService));
|
||||
app.use('/api/backlog-plan', createBacklogPlanRoutes(events, settingsService));
|
||||
|
||||
263
apps/server/src/lib/auth-utils.ts
Normal file
263
apps/server/src/lib/auth-utils.ts
Normal file
@@ -0,0 +1,263 @@
|
||||
/**
|
||||
* Secure authentication utilities that avoid environment variable race conditions
|
||||
*/
|
||||
|
||||
import { spawn } from 'child_process';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
|
||||
const logger = createLogger('AuthUtils');
|
||||
|
||||
export interface SecureAuthEnv {
|
||||
[key: string]: string | undefined;
|
||||
}
|
||||
|
||||
export interface AuthValidationResult {
|
||||
isValid: boolean;
|
||||
error?: string;
|
||||
normalizedKey?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates API key format without modifying process.env
|
||||
*/
|
||||
export function validateApiKey(
|
||||
key: string,
|
||||
provider: 'anthropic' | 'openai' | 'cursor'
|
||||
): AuthValidationResult {
|
||||
if (!key || typeof key !== 'string' || key.trim().length === 0) {
|
||||
return { isValid: false, error: 'API key is required' };
|
||||
}
|
||||
|
||||
const trimmedKey = key.trim();
|
||||
|
||||
switch (provider) {
|
||||
case 'anthropic':
|
||||
if (!trimmedKey.startsWith('sk-ant-')) {
|
||||
return {
|
||||
isValid: false,
|
||||
error: 'Invalid Anthropic API key format. Should start with "sk-ant-"',
|
||||
};
|
||||
}
|
||||
if (trimmedKey.length < 20) {
|
||||
return { isValid: false, error: 'Anthropic API key too short' };
|
||||
}
|
||||
break;
|
||||
|
||||
case 'openai':
|
||||
if (!trimmedKey.startsWith('sk-')) {
|
||||
return { isValid: false, error: 'Invalid OpenAI API key format. Should start with "sk-"' };
|
||||
}
|
||||
if (trimmedKey.length < 20) {
|
||||
return { isValid: false, error: 'OpenAI API key too short' };
|
||||
}
|
||||
break;
|
||||
|
||||
case 'cursor':
|
||||
// Cursor API keys might have different format
|
||||
if (trimmedKey.length < 10) {
|
||||
return { isValid: false, error: 'Cursor API key too short' };
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return { isValid: true, normalizedKey: trimmedKey };
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a secure environment object for authentication testing
|
||||
* without modifying the global process.env
|
||||
*/
|
||||
export function createSecureAuthEnv(
|
||||
authMethod: 'cli' | 'api_key',
|
||||
apiKey?: string,
|
||||
provider: 'anthropic' | 'openai' | 'cursor' = 'anthropic'
|
||||
): SecureAuthEnv {
|
||||
const env: SecureAuthEnv = { ...process.env };
|
||||
|
||||
if (authMethod === 'cli') {
|
||||
// For CLI auth, remove the API key to force CLI authentication
|
||||
const envKey = provider === 'openai' ? 'OPENAI_API_KEY' : 'ANTHROPIC_API_KEY';
|
||||
delete env[envKey];
|
||||
} else if (authMethod === 'api_key' && apiKey) {
|
||||
// For API key auth, validate and set the provided key
|
||||
const validation = validateApiKey(apiKey, provider);
|
||||
if (!validation.isValid) {
|
||||
throw new Error(validation.error);
|
||||
}
|
||||
const envKey = provider === 'openai' ? 'OPENAI_API_KEY' : 'ANTHROPIC_API_KEY';
|
||||
env[envKey] = validation.normalizedKey;
|
||||
}
|
||||
|
||||
return env;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a temporary environment override for the current process
|
||||
* WARNING: This should only be used in isolated contexts and immediately cleaned up
|
||||
*/
|
||||
export function createTempEnvOverride(authEnv: SecureAuthEnv): () => void {
|
||||
const originalEnv = { ...process.env };
|
||||
|
||||
// Apply the auth environment
|
||||
Object.assign(process.env, authEnv);
|
||||
|
||||
// Return cleanup function
|
||||
return () => {
|
||||
// Restore original environment
|
||||
Object.keys(process.env).forEach((key) => {
|
||||
if (!(key in originalEnv)) {
|
||||
delete process.env[key];
|
||||
}
|
||||
});
|
||||
Object.assign(process.env, originalEnv);
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Spawns a process with secure environment isolation
|
||||
*/
|
||||
export function spawnSecureAuth(
|
||||
command: string,
|
||||
args: string[],
|
||||
authEnv: SecureAuthEnv,
|
||||
options: {
|
||||
cwd?: string;
|
||||
timeout?: number;
|
||||
} = {}
|
||||
): Promise<{ stdout: string; stderr: string; exitCode: number | null }> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const { cwd = process.cwd(), timeout = 30000 } = options;
|
||||
|
||||
logger.debug(`Spawning secure auth process: ${command} ${args.join(' ')}`);
|
||||
|
||||
const child = spawn(command, args, {
|
||||
cwd,
|
||||
env: authEnv,
|
||||
stdio: 'pipe',
|
||||
shell: false,
|
||||
});
|
||||
|
||||
let stdout = '';
|
||||
let stderr = '';
|
||||
let isResolved = false;
|
||||
|
||||
const timeoutId = setTimeout(() => {
|
||||
if (!isResolved) {
|
||||
child.kill('SIGTERM');
|
||||
isResolved = true;
|
||||
reject(new Error('Authentication process timed out'));
|
||||
}
|
||||
}, timeout);
|
||||
|
||||
child.stdout?.on('data', (data) => {
|
||||
stdout += data.toString();
|
||||
});
|
||||
|
||||
child.stderr?.on('data', (data) => {
|
||||
stderr += data.toString();
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
clearTimeout(timeoutId);
|
||||
if (!isResolved) {
|
||||
isResolved = true;
|
||||
resolve({ stdout, stderr, exitCode: code });
|
||||
}
|
||||
});
|
||||
|
||||
child.on('error', (error) => {
|
||||
clearTimeout(timeoutId);
|
||||
if (!isResolved) {
|
||||
isResolved = true;
|
||||
reject(error);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Safely extracts environment variable without race conditions
|
||||
*/
|
||||
export function safeGetEnv(key: string): string | undefined {
|
||||
return process.env[key];
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if an environment variable would be modified without actually modifying it
|
||||
*/
|
||||
export function wouldModifyEnv(key: string, newValue: string): boolean {
|
||||
const currentValue = safeGetEnv(key);
|
||||
return currentValue !== newValue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Secure auth session management
|
||||
*/
|
||||
export class AuthSessionManager {
|
||||
private static activeSessions = new Map<string, SecureAuthEnv>();
|
||||
|
||||
static createSession(
|
||||
sessionId: string,
|
||||
authMethod: 'cli' | 'api_key',
|
||||
apiKey?: string,
|
||||
provider: 'anthropic' | 'openai' | 'cursor' = 'anthropic'
|
||||
): SecureAuthEnv {
|
||||
const env = createSecureAuthEnv(authMethod, apiKey, provider);
|
||||
this.activeSessions.set(sessionId, env);
|
||||
return env;
|
||||
}
|
||||
|
||||
static getSession(sessionId: string): SecureAuthEnv | undefined {
|
||||
return this.activeSessions.get(sessionId);
|
||||
}
|
||||
|
||||
static destroySession(sessionId: string): void {
|
||||
this.activeSessions.delete(sessionId);
|
||||
}
|
||||
|
||||
static cleanup(): void {
|
||||
this.activeSessions.clear();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Rate limiting for auth attempts to prevent abuse
|
||||
*/
|
||||
export class AuthRateLimiter {
|
||||
private attempts = new Map<string, { count: number; lastAttempt: number }>();
|
||||
|
||||
constructor(
|
||||
private maxAttempts = 5,
|
||||
private windowMs = 60000
|
||||
) {}
|
||||
|
||||
canAttempt(identifier: string): boolean {
|
||||
const now = Date.now();
|
||||
const record = this.attempts.get(identifier);
|
||||
|
||||
if (!record || now - record.lastAttempt > this.windowMs) {
|
||||
this.attempts.set(identifier, { count: 1, lastAttempt: now });
|
||||
return true;
|
||||
}
|
||||
|
||||
if (record.count >= this.maxAttempts) {
|
||||
return false;
|
||||
}
|
||||
|
||||
record.count++;
|
||||
record.lastAttempt = now;
|
||||
return true;
|
||||
}
|
||||
|
||||
getRemainingAttempts(identifier: string): number {
|
||||
const record = this.attempts.get(identifier);
|
||||
if (!record) return this.maxAttempts;
|
||||
return Math.max(0, this.maxAttempts - record.count);
|
||||
}
|
||||
|
||||
getResetTime(identifier: string): Date | null {
|
||||
const record = this.attempts.get(identifier);
|
||||
if (!record) return null;
|
||||
return new Date(record.lastAttempt + this.windowMs);
|
||||
}
|
||||
}
|
||||
@@ -262,7 +262,7 @@ export function getSessionCookieOptions(): {
|
||||
return {
|
||||
httpOnly: true, // JavaScript cannot access this cookie
|
||||
secure: process.env.NODE_ENV === 'production', // HTTPS only in production
|
||||
sameSite: 'strict', // Only sent for same-site requests (CSRF protection)
|
||||
sameSite: 'lax', // Sent for same-site requests and top-level navigations, but not cross-origin fetch/XHR
|
||||
maxAge: SESSION_MAX_AGE_MS,
|
||||
path: '/',
|
||||
};
|
||||
|
||||
447
apps/server/src/lib/cli-detection.ts
Normal file
447
apps/server/src/lib/cli-detection.ts
Normal file
@@ -0,0 +1,447 @@
|
||||
/**
|
||||
* Unified CLI Detection Framework
|
||||
*
|
||||
* Provides consistent CLI detection and management across all providers
|
||||
*/
|
||||
|
||||
import { spawn, execSync } from 'child_process';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import * as os from 'os';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
|
||||
const logger = createLogger('CliDetection');
|
||||
|
||||
export interface CliInfo {
|
||||
name: string;
|
||||
command: string;
|
||||
version?: string;
|
||||
path?: string;
|
||||
installed: boolean;
|
||||
authenticated: boolean;
|
||||
authMethod: 'cli' | 'api_key' | 'none';
|
||||
platform?: string;
|
||||
architectures?: string[];
|
||||
}
|
||||
|
||||
export interface CliDetectionOptions {
|
||||
timeout?: number;
|
||||
includeWsl?: boolean;
|
||||
wslDistribution?: string;
|
||||
}
|
||||
|
||||
export interface CliDetectionResult {
|
||||
cli: CliInfo;
|
||||
detected: boolean;
|
||||
issues: string[];
|
||||
}
|
||||
|
||||
export interface UnifiedCliDetection {
|
||||
claude?: CliDetectionResult;
|
||||
codex?: CliDetectionResult;
|
||||
cursor?: CliDetectionResult;
|
||||
}
|
||||
|
||||
/**
|
||||
* CLI Configuration for different providers
|
||||
*/
|
||||
const CLI_CONFIGS = {
|
||||
claude: {
|
||||
name: 'Claude CLI',
|
||||
commands: ['claude'],
|
||||
versionArgs: ['--version'],
|
||||
installCommands: {
|
||||
darwin: 'brew install anthropics/claude/claude',
|
||||
linux: 'curl -fsSL https://claude.ai/install.sh | sh',
|
||||
win32: 'iwr https://claude.ai/install.ps1 -UseBasicParsing | iex',
|
||||
},
|
||||
},
|
||||
codex: {
|
||||
name: 'Codex CLI',
|
||||
commands: ['codex', 'openai'],
|
||||
versionArgs: ['--version'],
|
||||
installCommands: {
|
||||
darwin: 'npm install -g @openai/codex-cli',
|
||||
linux: 'npm install -g @openai/codex-cli',
|
||||
win32: 'npm install -g @openai/codex-cli',
|
||||
},
|
||||
},
|
||||
cursor: {
|
||||
name: 'Cursor CLI',
|
||||
commands: ['cursor-agent', 'cursor'],
|
||||
versionArgs: ['--version'],
|
||||
installCommands: {
|
||||
darwin: 'brew install cursor/cursor/cursor-agent',
|
||||
linux: 'curl -fsSL https://cursor.sh/install.sh | sh',
|
||||
win32: 'iwr https://cursor.sh/install.ps1 -UseBasicParsing | iex',
|
||||
},
|
||||
},
|
||||
} as const;
|
||||
|
||||
/**
|
||||
* Detect if a CLI is installed and available
|
||||
*/
|
||||
export async function detectCli(
|
||||
provider: keyof typeof CLI_CONFIGS,
|
||||
options: CliDetectionOptions = {}
|
||||
): Promise<CliDetectionResult> {
|
||||
const config = CLI_CONFIGS[provider];
|
||||
const { timeout = 5000, includeWsl = false, wslDistribution } = options;
|
||||
const issues: string[] = [];
|
||||
|
||||
const cliInfo: CliInfo = {
|
||||
name: config.name,
|
||||
command: '',
|
||||
installed: false,
|
||||
authenticated: false,
|
||||
authMethod: 'none',
|
||||
};
|
||||
|
||||
try {
|
||||
// Find the command in PATH
|
||||
const command = await findCommand([...config.commands]);
|
||||
if (command) {
|
||||
cliInfo.command = command;
|
||||
}
|
||||
|
||||
if (!cliInfo.command) {
|
||||
issues.push(`${config.name} not found in PATH`);
|
||||
return { cli: cliInfo, detected: false, issues };
|
||||
}
|
||||
|
||||
cliInfo.path = cliInfo.command;
|
||||
cliInfo.installed = true;
|
||||
|
||||
// Get version
|
||||
try {
|
||||
cliInfo.version = await getCliVersion(cliInfo.command, [...config.versionArgs], timeout);
|
||||
} catch (error) {
|
||||
issues.push(`Failed to get ${config.name} version: ${error}`);
|
||||
}
|
||||
|
||||
// Check authentication
|
||||
cliInfo.authMethod = await checkCliAuth(provider, cliInfo.command);
|
||||
cliInfo.authenticated = cliInfo.authMethod !== 'none';
|
||||
|
||||
return { cli: cliInfo, detected: true, issues };
|
||||
} catch (error) {
|
||||
issues.push(`Error detecting ${config.name}: ${error}`);
|
||||
return { cli: cliInfo, detected: false, issues };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect all CLIs in the system
|
||||
*/
|
||||
export async function detectAllCLis(
|
||||
options: CliDetectionOptions = {}
|
||||
): Promise<UnifiedCliDetection> {
|
||||
const results: UnifiedCliDetection = {};
|
||||
|
||||
// Detect all providers in parallel
|
||||
const providers = Object.keys(CLI_CONFIGS) as Array<keyof typeof CLI_CONFIGS>;
|
||||
const detectionPromises = providers.map(async (provider) => {
|
||||
const result = await detectCli(provider, options);
|
||||
return { provider, result };
|
||||
});
|
||||
|
||||
const detections = await Promise.all(detectionPromises);
|
||||
|
||||
for (const { provider, result } of detections) {
|
||||
results[provider] = result;
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the first available command from a list of alternatives
|
||||
*/
|
||||
export async function findCommand(commands: string[]): Promise<string | null> {
|
||||
for (const command of commands) {
|
||||
try {
|
||||
const whichCommand = process.platform === 'win32' ? 'where' : 'which';
|
||||
const result = execSync(`${whichCommand} ${command}`, {
|
||||
encoding: 'utf8',
|
||||
timeout: 2000,
|
||||
}).trim();
|
||||
|
||||
if (result) {
|
||||
return result.split('\n')[0]; // Take first result on Windows
|
||||
}
|
||||
} catch {
|
||||
// Command not found, try next
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get CLI version
|
||||
*/
|
||||
export async function getCliVersion(
|
||||
command: string,
|
||||
args: string[],
|
||||
timeout: number = 5000
|
||||
): Promise<string> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const child = spawn(command, args, {
|
||||
stdio: 'pipe',
|
||||
timeout,
|
||||
});
|
||||
|
||||
let stdout = '';
|
||||
let stderr = '';
|
||||
|
||||
child.stdout?.on('data', (data) => {
|
||||
stdout += data.toString();
|
||||
});
|
||||
|
||||
child.stderr?.on('data', (data) => {
|
||||
stderr += data.toString();
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
if (code === 0 && stdout) {
|
||||
resolve(stdout.trim());
|
||||
} else if (stderr) {
|
||||
reject(stderr.trim());
|
||||
} else {
|
||||
reject(`Command exited with code ${code}`);
|
||||
}
|
||||
});
|
||||
|
||||
child.on('error', reject);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Check authentication status for a CLI
|
||||
*/
|
||||
export async function checkCliAuth(
|
||||
provider: keyof typeof CLI_CONFIGS,
|
||||
command: string
|
||||
): Promise<'cli' | 'api_key' | 'none'> {
|
||||
try {
|
||||
switch (provider) {
|
||||
case 'claude':
|
||||
return await checkClaudeAuth(command);
|
||||
case 'codex':
|
||||
return await checkCodexAuth(command);
|
||||
case 'cursor':
|
||||
return await checkCursorAuth(command);
|
||||
default:
|
||||
return 'none';
|
||||
}
|
||||
} catch {
|
||||
return 'none';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check Claude CLI authentication
|
||||
*/
|
||||
async function checkClaudeAuth(command: string): Promise<'cli' | 'api_key' | 'none'> {
|
||||
try {
|
||||
// Check for environment variable
|
||||
if (process.env.ANTHROPIC_API_KEY) {
|
||||
return 'api_key';
|
||||
}
|
||||
|
||||
// Try running a simple command to check CLI auth
|
||||
const result = await getCliVersion(command, ['--version'], 3000);
|
||||
if (result) {
|
||||
return 'cli'; // If version works, assume CLI is authenticated
|
||||
}
|
||||
} catch {
|
||||
// Version command might work even without auth, so we need a better check
|
||||
}
|
||||
|
||||
// Try a more specific auth check
|
||||
return new Promise((resolve) => {
|
||||
const child = spawn(command, ['whoami'], {
|
||||
stdio: 'pipe',
|
||||
timeout: 3000,
|
||||
});
|
||||
|
||||
let stdout = '';
|
||||
let stderr = '';
|
||||
|
||||
child.stdout?.on('data', (data) => {
|
||||
stdout += data.toString();
|
||||
});
|
||||
|
||||
child.stderr?.on('data', (data) => {
|
||||
stderr += data.toString();
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
if (code === 0 && stdout && !stderr.includes('not authenticated')) {
|
||||
resolve('cli');
|
||||
} else {
|
||||
resolve('none');
|
||||
}
|
||||
});
|
||||
|
||||
child.on('error', () => {
|
||||
resolve('none');
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Check Codex CLI authentication
|
||||
*/
|
||||
async function checkCodexAuth(command: string): Promise<'cli' | 'api_key' | 'none'> {
|
||||
// Check for environment variable
|
||||
if (process.env.OPENAI_API_KEY) {
|
||||
return 'api_key';
|
||||
}
|
||||
|
||||
try {
|
||||
// Try a simple auth check
|
||||
const result = await getCliVersion(command, ['--version'], 3000);
|
||||
if (result) {
|
||||
return 'cli';
|
||||
}
|
||||
} catch {
|
||||
// Version check failed
|
||||
}
|
||||
|
||||
return 'none';
|
||||
}
|
||||
|
||||
/**
|
||||
* Check Cursor CLI authentication
|
||||
*/
|
||||
async function checkCursorAuth(command: string): Promise<'cli' | 'api_key' | 'none'> {
|
||||
// Check for environment variable
|
||||
if (process.env.CURSOR_API_KEY) {
|
||||
return 'api_key';
|
||||
}
|
||||
|
||||
// Check for credentials files
|
||||
const credentialPaths = [
|
||||
path.join(os.homedir(), '.cursor', 'credentials.json'),
|
||||
path.join(os.homedir(), '.config', 'cursor', 'credentials.json'),
|
||||
path.join(os.homedir(), '.cursor', 'auth.json'),
|
||||
path.join(os.homedir(), '.config', 'cursor', 'auth.json'),
|
||||
];
|
||||
|
||||
for (const credPath of credentialPaths) {
|
||||
try {
|
||||
if (fs.existsSync(credPath)) {
|
||||
const content = fs.readFileSync(credPath, 'utf8');
|
||||
const creds = JSON.parse(content);
|
||||
if (creds.accessToken || creds.token || creds.apiKey) {
|
||||
return 'cli';
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Invalid credentials file
|
||||
}
|
||||
}
|
||||
|
||||
// Try a simple command
|
||||
try {
|
||||
const result = await getCliVersion(command, ['--version'], 3000);
|
||||
if (result) {
|
||||
return 'cli';
|
||||
}
|
||||
} catch {
|
||||
// Version check failed
|
||||
}
|
||||
|
||||
return 'none';
|
||||
}
|
||||
|
||||
/**
|
||||
* Get installation instructions for a provider
|
||||
*/
|
||||
export function getInstallInstructions(
|
||||
provider: keyof typeof CLI_CONFIGS,
|
||||
platform: NodeJS.Platform = process.platform
|
||||
): string {
|
||||
const config = CLI_CONFIGS[provider];
|
||||
const command = config.installCommands[platform as keyof typeof config.installCommands];
|
||||
|
||||
if (!command) {
|
||||
return `No installation instructions available for ${provider} on ${platform}`;
|
||||
}
|
||||
|
||||
return command;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get platform-specific CLI paths and versions
|
||||
*/
|
||||
export function getPlatformCliPaths(provider: keyof typeof CLI_CONFIGS): string[] {
|
||||
const config = CLI_CONFIGS[provider];
|
||||
const platform = process.platform;
|
||||
|
||||
switch (platform) {
|
||||
case 'darwin':
|
||||
return [
|
||||
`/usr/local/bin/${config.commands[0]}`,
|
||||
`/opt/homebrew/bin/${config.commands[0]}`,
|
||||
path.join(os.homedir(), '.local', 'bin', config.commands[0]),
|
||||
];
|
||||
|
||||
case 'linux':
|
||||
return [
|
||||
`/usr/bin/${config.commands[0]}`,
|
||||
`/usr/local/bin/${config.commands[0]}`,
|
||||
path.join(os.homedir(), '.local', 'bin', config.commands[0]),
|
||||
path.join(os.homedir(), '.npm', 'global', 'bin', config.commands[0]),
|
||||
];
|
||||
|
||||
case 'win32':
|
||||
return [
|
||||
path.join(
|
||||
os.homedir(),
|
||||
'AppData',
|
||||
'Local',
|
||||
'Programs',
|
||||
config.commands[0],
|
||||
`${config.commands[0]}.exe`
|
||||
),
|
||||
path.join(process.env.ProgramFiles || '', config.commands[0], `${config.commands[0]}.exe`),
|
||||
path.join(
|
||||
process.env.ProgramFiles || '',
|
||||
config.commands[0],
|
||||
'bin',
|
||||
`${config.commands[0]}.exe`
|
||||
),
|
||||
];
|
||||
|
||||
default:
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate CLI installation
|
||||
*/
|
||||
export function validateCliInstallation(cliInfo: CliInfo): {
|
||||
valid: boolean;
|
||||
issues: string[];
|
||||
} {
|
||||
const issues: string[] = [];
|
||||
|
||||
if (!cliInfo.installed) {
|
||||
issues.push('CLI is not installed');
|
||||
}
|
||||
|
||||
if (cliInfo.installed && !cliInfo.version) {
|
||||
issues.push('Could not determine CLI version');
|
||||
}
|
||||
|
||||
if (cliInfo.installed && cliInfo.authMethod === 'none') {
|
||||
issues.push('CLI is not authenticated');
|
||||
}
|
||||
|
||||
return {
|
||||
valid: issues.length === 0,
|
||||
issues,
|
||||
};
|
||||
}
|
||||
98
apps/server/src/lib/codex-auth.ts
Normal file
98
apps/server/src/lib/codex-auth.ts
Normal file
@@ -0,0 +1,98 @@
|
||||
/**
|
||||
* Shared utility for checking Codex CLI authentication status
|
||||
*
|
||||
* Uses 'codex login status' command to verify authentication.
|
||||
* Never assumes authenticated - only returns true if CLI confirms.
|
||||
*/
|
||||
|
||||
import { spawnProcess, getCodexAuthPath } from '@automaker/platform';
|
||||
import { findCodexCliPath } from '@automaker/platform';
|
||||
import * as fs from 'fs';
|
||||
|
||||
const CODEX_COMMAND = 'codex';
|
||||
const OPENAI_API_KEY_ENV = 'OPENAI_API_KEY';
|
||||
|
||||
export interface CodexAuthCheckResult {
|
||||
authenticated: boolean;
|
||||
method: 'api_key_env' | 'cli_authenticated' | 'none';
|
||||
}
|
||||
|
||||
/**
|
||||
* Check Codex authentication status using 'codex login status' command
|
||||
*
|
||||
* @param cliPath Optional CLI path. If not provided, will attempt to find it.
|
||||
* @returns Authentication status and method
|
||||
*/
|
||||
export async function checkCodexAuthentication(
|
||||
cliPath?: string | null
|
||||
): Promise<CodexAuthCheckResult> {
|
||||
console.log('[CodexAuth] checkCodexAuthentication called with cliPath:', cliPath);
|
||||
|
||||
const resolvedCliPath = cliPath || (await findCodexCliPath());
|
||||
const hasApiKey = !!process.env[OPENAI_API_KEY_ENV];
|
||||
|
||||
console.log('[CodexAuth] resolvedCliPath:', resolvedCliPath);
|
||||
console.log('[CodexAuth] hasApiKey:', hasApiKey);
|
||||
|
||||
// Debug: Check auth file
|
||||
const authFilePath = getCodexAuthPath();
|
||||
console.log('[CodexAuth] Auth file path:', authFilePath);
|
||||
try {
|
||||
const authFileExists = fs.existsSync(authFilePath);
|
||||
console.log('[CodexAuth] Auth file exists:', authFileExists);
|
||||
if (authFileExists) {
|
||||
const authContent = fs.readFileSync(authFilePath, 'utf-8');
|
||||
console.log('[CodexAuth] Auth file content:', authContent.substring(0, 500)); // First 500 chars
|
||||
}
|
||||
} catch (error) {
|
||||
console.log('[CodexAuth] Error reading auth file:', error);
|
||||
}
|
||||
|
||||
// If CLI is not installed, cannot be authenticated
|
||||
if (!resolvedCliPath) {
|
||||
console.log('[CodexAuth] No CLI path found, returning not authenticated');
|
||||
return { authenticated: false, method: 'none' };
|
||||
}
|
||||
|
||||
try {
|
||||
console.log('[CodexAuth] Running: ' + resolvedCliPath + ' login status');
|
||||
const result = await spawnProcess({
|
||||
command: resolvedCliPath || CODEX_COMMAND,
|
||||
args: ['login', 'status'],
|
||||
cwd: process.cwd(),
|
||||
env: {
|
||||
...process.env,
|
||||
TERM: 'dumb', // Avoid interactive output
|
||||
},
|
||||
});
|
||||
|
||||
console.log('[CodexAuth] Command result:');
|
||||
console.log('[CodexAuth] exitCode:', result.exitCode);
|
||||
console.log('[CodexAuth] stdout:', JSON.stringify(result.stdout));
|
||||
console.log('[CodexAuth] stderr:', JSON.stringify(result.stderr));
|
||||
|
||||
// Check both stdout and stderr for "logged in" - Codex CLI outputs to stderr
|
||||
const combinedOutput = (result.stdout + result.stderr).toLowerCase();
|
||||
const isLoggedIn = combinedOutput.includes('logged in');
|
||||
console.log('[CodexAuth] isLoggedIn (contains "logged in" in stdout or stderr):', isLoggedIn);
|
||||
|
||||
if (result.exitCode === 0 && isLoggedIn) {
|
||||
// Determine auth method based on what we know
|
||||
const method = hasApiKey ? 'api_key_env' : 'cli_authenticated';
|
||||
console.log('[CodexAuth] Authenticated! method:', method);
|
||||
return { authenticated: true, method };
|
||||
}
|
||||
|
||||
console.log(
|
||||
'[CodexAuth] Not authenticated. exitCode:',
|
||||
result.exitCode,
|
||||
'isLoggedIn:',
|
||||
isLoggedIn
|
||||
);
|
||||
} catch (error) {
|
||||
console.log('[CodexAuth] Error running command:', error);
|
||||
}
|
||||
|
||||
console.log('[CodexAuth] Returning not authenticated');
|
||||
return { authenticated: false, method: 'none' };
|
||||
}
|
||||
414
apps/server/src/lib/error-handler.ts
Normal file
414
apps/server/src/lib/error-handler.ts
Normal file
@@ -0,0 +1,414 @@
|
||||
/**
|
||||
* Unified Error Handling System for CLI Providers
|
||||
*
|
||||
* Provides consistent error classification, user-friendly messages, and debugging support
|
||||
* across all AI providers (Claude, Codex, Cursor)
|
||||
*/
|
||||
|
||||
import { createLogger } from '@automaker/utils';
|
||||
|
||||
const logger = createLogger('ErrorHandler');
|
||||
|
||||
export enum ErrorType {
|
||||
AUTHENTICATION = 'authentication',
|
||||
BILLING = 'billing',
|
||||
RATE_LIMIT = 'rate_limit',
|
||||
NETWORK = 'network',
|
||||
TIMEOUT = 'timeout',
|
||||
VALIDATION = 'validation',
|
||||
PERMISSION = 'permission',
|
||||
CLI_NOT_FOUND = 'cli_not_found',
|
||||
CLI_NOT_INSTALLED = 'cli_not_installed',
|
||||
MODEL_NOT_SUPPORTED = 'model_not_supported',
|
||||
INVALID_REQUEST = 'invalid_request',
|
||||
SERVER_ERROR = 'server_error',
|
||||
UNKNOWN = 'unknown',
|
||||
}
|
||||
|
||||
export enum ErrorSeverity {
|
||||
LOW = 'low',
|
||||
MEDIUM = 'medium',
|
||||
HIGH = 'high',
|
||||
CRITICAL = 'critical',
|
||||
}
|
||||
|
||||
export interface ErrorClassification {
|
||||
type: ErrorType;
|
||||
severity: ErrorSeverity;
|
||||
userMessage: string;
|
||||
technicalMessage: string;
|
||||
suggestedAction?: string;
|
||||
retryable: boolean;
|
||||
provider?: string;
|
||||
context?: Record<string, any>;
|
||||
}
|
||||
|
||||
export interface ErrorPattern {
|
||||
type: ErrorType;
|
||||
severity: ErrorSeverity;
|
||||
patterns: RegExp[];
|
||||
userMessage: string;
|
||||
suggestedAction?: string;
|
||||
retryable: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Error patterns for different types of errors
|
||||
*/
|
||||
const ERROR_PATTERNS: ErrorPattern[] = [
|
||||
// Authentication errors
|
||||
{
|
||||
type: ErrorType.AUTHENTICATION,
|
||||
severity: ErrorSeverity.HIGH,
|
||||
patterns: [
|
||||
/unauthorized/i,
|
||||
/authentication.*fail/i,
|
||||
/invalid_api_key/i,
|
||||
/invalid api key/i,
|
||||
/not authenticated/i,
|
||||
/please.*log/i,
|
||||
/token.*revoked/i,
|
||||
/oauth.*error/i,
|
||||
/credentials.*invalid/i,
|
||||
],
|
||||
userMessage: 'Authentication failed. Please check your API key or login credentials.',
|
||||
suggestedAction:
|
||||
"Verify your API key is correct and hasn't expired, or run the CLI login command.",
|
||||
retryable: false,
|
||||
},
|
||||
|
||||
// Billing errors
|
||||
{
|
||||
type: ErrorType.BILLING,
|
||||
severity: ErrorSeverity.HIGH,
|
||||
patterns: [
|
||||
/credit.*balance.*low/i,
|
||||
/insufficient.*credit/i,
|
||||
/billing.*issue/i,
|
||||
/payment.*required/i,
|
||||
/usage.*exceeded/i,
|
||||
/quota.*exceeded/i,
|
||||
/add.*credit/i,
|
||||
],
|
||||
userMessage: 'Account has insufficient credits or billing issues.',
|
||||
suggestedAction: 'Please add credits to your account or check your billing settings.',
|
||||
retryable: false,
|
||||
},
|
||||
|
||||
// Rate limit errors
|
||||
{
|
||||
type: ErrorType.RATE_LIMIT,
|
||||
severity: ErrorSeverity.MEDIUM,
|
||||
patterns: [
|
||||
/rate.*limit/i,
|
||||
/too.*many.*request/i,
|
||||
/limit.*reached/i,
|
||||
/try.*later/i,
|
||||
/429/i,
|
||||
/reset.*time/i,
|
||||
/upgrade.*plan/i,
|
||||
],
|
||||
userMessage: 'Rate limit reached. Please wait before trying again.',
|
||||
suggestedAction: 'Wait a few minutes before retrying, or consider upgrading your plan.',
|
||||
retryable: true,
|
||||
},
|
||||
|
||||
// Network errors
|
||||
{
|
||||
type: ErrorType.NETWORK,
|
||||
severity: ErrorSeverity.MEDIUM,
|
||||
patterns: [/network/i, /connection/i, /dns/i, /timeout/i, /econnrefused/i, /enotfound/i],
|
||||
userMessage: 'Network connection issue.',
|
||||
suggestedAction: 'Check your internet connection and try again.',
|
||||
retryable: true,
|
||||
},
|
||||
|
||||
// Timeout errors
|
||||
{
|
||||
type: ErrorType.TIMEOUT,
|
||||
severity: ErrorSeverity.MEDIUM,
|
||||
patterns: [/timeout/i, /aborted/i, /time.*out/i],
|
||||
userMessage: 'Operation timed out.',
|
||||
suggestedAction: 'Try again with a simpler request or check your connection.',
|
||||
retryable: true,
|
||||
},
|
||||
|
||||
// Permission errors
|
||||
{
|
||||
type: ErrorType.PERMISSION,
|
||||
severity: ErrorSeverity.HIGH,
|
||||
patterns: [/permission.*denied/i, /access.*denied/i, /forbidden/i, /403/i, /not.*authorized/i],
|
||||
userMessage: 'Permission denied.',
|
||||
suggestedAction: 'Check if you have the required permissions for this operation.',
|
||||
retryable: false,
|
||||
},
|
||||
|
||||
// CLI not found
|
||||
{
|
||||
type: ErrorType.CLI_NOT_FOUND,
|
||||
severity: ErrorSeverity.HIGH,
|
||||
patterns: [/command not found/i, /not recognized/i, /not.*installed/i, /ENOENT/i],
|
||||
userMessage: 'CLI tool not found.',
|
||||
suggestedAction: "Please install the required CLI tool and ensure it's in your PATH.",
|
||||
retryable: false,
|
||||
},
|
||||
|
||||
// Model not supported
|
||||
{
|
||||
type: ErrorType.MODEL_NOT_SUPPORTED,
|
||||
severity: ErrorSeverity.HIGH,
|
||||
patterns: [/model.*not.*support/i, /unknown.*model/i, /invalid.*model/i],
|
||||
userMessage: 'Model not supported.',
|
||||
suggestedAction: 'Check available models and use a supported one.',
|
||||
retryable: false,
|
||||
},
|
||||
|
||||
// Server errors
|
||||
{
|
||||
type: ErrorType.SERVER_ERROR,
|
||||
severity: ErrorSeverity.HIGH,
|
||||
patterns: [/internal.*server/i, /server.*error/i, /500/i, /502/i, /503/i, /504/i],
|
||||
userMessage: 'Server error occurred.',
|
||||
suggestedAction: 'Try again in a few minutes or contact support if the issue persists.',
|
||||
retryable: true,
|
||||
},
|
||||
];
|
||||
|
||||
/**
|
||||
* Classify an error into a specific type with user-friendly message
|
||||
*/
|
||||
export function classifyError(
|
||||
error: unknown,
|
||||
provider?: string,
|
||||
context?: Record<string, any>
|
||||
): ErrorClassification {
|
||||
const errorText = getErrorText(error);
|
||||
|
||||
// Try to match against known patterns
|
||||
for (const pattern of ERROR_PATTERNS) {
|
||||
for (const regex of pattern.patterns) {
|
||||
if (regex.test(errorText)) {
|
||||
return {
|
||||
type: pattern.type,
|
||||
severity: pattern.severity,
|
||||
userMessage: pattern.userMessage,
|
||||
technicalMessage: errorText,
|
||||
suggestedAction: pattern.suggestedAction,
|
||||
retryable: pattern.retryable,
|
||||
provider,
|
||||
context,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Unknown error
|
||||
return {
|
||||
type: ErrorType.UNKNOWN,
|
||||
severity: ErrorSeverity.MEDIUM,
|
||||
userMessage: 'An unexpected error occurred.',
|
||||
technicalMessage: errorText,
|
||||
suggestedAction: 'Please try again or contact support if the issue persists.',
|
||||
retryable: true,
|
||||
provider,
|
||||
context,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a user-friendly error message
|
||||
*/
|
||||
export function getUserFriendlyErrorMessage(error: unknown, provider?: string): string {
|
||||
const classification = classifyError(error, provider);
|
||||
|
||||
let message = classification.userMessage;
|
||||
|
||||
if (classification.suggestedAction) {
|
||||
message += ` ${classification.suggestedAction}`;
|
||||
}
|
||||
|
||||
// Add provider-specific context if available
|
||||
if (provider) {
|
||||
message = `[${provider.toUpperCase()}] ${message}`;
|
||||
}
|
||||
|
||||
return message;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if an error is retryable
|
||||
*/
|
||||
export function isRetryableError(error: unknown): boolean {
|
||||
const classification = classifyError(error);
|
||||
return classification.retryable;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if an error is authentication-related
|
||||
*/
|
||||
export function isAuthenticationError(error: unknown): boolean {
|
||||
const classification = classifyError(error);
|
||||
return classification.type === ErrorType.AUTHENTICATION;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if an error is billing-related
|
||||
*/
|
||||
export function isBillingError(error: unknown): boolean {
|
||||
const classification = classifyError(error);
|
||||
return classification.type === ErrorType.BILLING;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if an error is rate limit related
|
||||
*/
|
||||
export function isRateLimitError(error: unknown): boolean {
|
||||
const classification = classifyError(error);
|
||||
return classification.type === ErrorType.RATE_LIMIT;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get error text from various error types
|
||||
*/
|
||||
function getErrorText(error: unknown): string {
|
||||
if (typeof error === 'string') {
|
||||
return error;
|
||||
}
|
||||
|
||||
if (error instanceof Error) {
|
||||
return error.message;
|
||||
}
|
||||
|
||||
if (typeof error === 'object' && error !== null) {
|
||||
// Handle structured error objects
|
||||
const errorObj = error as any;
|
||||
|
||||
if (errorObj.message) {
|
||||
return errorObj.message;
|
||||
}
|
||||
|
||||
if (errorObj.error?.message) {
|
||||
return errorObj.error.message;
|
||||
}
|
||||
|
||||
if (errorObj.error) {
|
||||
return typeof errorObj.error === 'string' ? errorObj.error : JSON.stringify(errorObj.error);
|
||||
}
|
||||
|
||||
return JSON.stringify(error);
|
||||
}
|
||||
|
||||
return String(error);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a standardized error response
|
||||
*/
|
||||
export function createErrorResponse(
|
||||
error: unknown,
|
||||
provider?: string,
|
||||
context?: Record<string, any>
|
||||
): {
|
||||
success: false;
|
||||
error: string;
|
||||
errorType: ErrorType;
|
||||
severity: ErrorSeverity;
|
||||
retryable: boolean;
|
||||
suggestedAction?: string;
|
||||
} {
|
||||
const classification = classifyError(error, provider, context);
|
||||
|
||||
return {
|
||||
success: false,
|
||||
error: classification.userMessage,
|
||||
errorType: classification.type,
|
||||
severity: classification.severity,
|
||||
retryable: classification.retryable,
|
||||
suggestedAction: classification.suggestedAction,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Log error with full context
|
||||
*/
|
||||
export function logError(
|
||||
error: unknown,
|
||||
provider?: string,
|
||||
operation?: string,
|
||||
additionalContext?: Record<string, any>
|
||||
): void {
|
||||
const classification = classifyError(error, provider, {
|
||||
operation,
|
||||
...additionalContext,
|
||||
});
|
||||
|
||||
logger.error(`Error in ${provider || 'unknown'}${operation ? ` during ${operation}` : ''}`, {
|
||||
type: classification.type,
|
||||
severity: classification.severity,
|
||||
message: classification.userMessage,
|
||||
technicalMessage: classification.technicalMessage,
|
||||
retryable: classification.retryable,
|
||||
suggestedAction: classification.suggestedAction,
|
||||
context: classification.context,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Provider-specific error handlers
|
||||
*/
|
||||
export const ProviderErrorHandler = {
|
||||
claude: {
|
||||
classify: (error: unknown) => classifyError(error, 'claude'),
|
||||
getUserMessage: (error: unknown) => getUserFriendlyErrorMessage(error, 'claude'),
|
||||
isAuth: (error: unknown) => isAuthenticationError(error),
|
||||
isBilling: (error: unknown) => isBillingError(error),
|
||||
isRateLimit: (error: unknown) => isRateLimitError(error),
|
||||
},
|
||||
|
||||
codex: {
|
||||
classify: (error: unknown) => classifyError(error, 'codex'),
|
||||
getUserMessage: (error: unknown) => getUserFriendlyErrorMessage(error, 'codex'),
|
||||
isAuth: (error: unknown) => isAuthenticationError(error),
|
||||
isBilling: (error: unknown) => isBillingError(error),
|
||||
isRateLimit: (error: unknown) => isRateLimitError(error),
|
||||
},
|
||||
|
||||
cursor: {
|
||||
classify: (error: unknown) => classifyError(error, 'cursor'),
|
||||
getUserMessage: (error: unknown) => getUserFriendlyErrorMessage(error, 'cursor'),
|
||||
isAuth: (error: unknown) => isAuthenticationError(error),
|
||||
isBilling: (error: unknown) => isBillingError(error),
|
||||
isRateLimit: (error: unknown) => isRateLimitError(error),
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
* Create a retry handler for retryable errors
|
||||
*/
|
||||
export function createRetryHandler(maxRetries: number = 3, baseDelay: number = 1000) {
|
||||
return async function <T>(
|
||||
operation: () => Promise<T>,
|
||||
shouldRetry: (error: unknown) => boolean = isRetryableError
|
||||
): Promise<T> {
|
||||
let lastError: unknown;
|
||||
|
||||
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
||||
try {
|
||||
return await operation();
|
||||
} catch (error) {
|
||||
lastError = error;
|
||||
|
||||
if (attempt === maxRetries || !shouldRetry(error)) {
|
||||
throw error;
|
||||
}
|
||||
|
||||
// Exponential backoff with jitter
|
||||
const delay = baseDelay * Math.pow(2, attempt) + Math.random() * 1000;
|
||||
logger.debug(`Retrying operation in ${delay}ms (attempt ${attempt + 1}/${maxRetries})`);
|
||||
await new Promise((resolve) => setTimeout(resolve, delay));
|
||||
}
|
||||
}
|
||||
|
||||
throw lastError;
|
||||
};
|
||||
}
|
||||
173
apps/server/src/lib/permission-enforcer.ts
Normal file
173
apps/server/src/lib/permission-enforcer.ts
Normal file
@@ -0,0 +1,173 @@
|
||||
/**
|
||||
* Permission enforcement utilities for Cursor provider
|
||||
*/
|
||||
|
||||
import type { CursorCliConfigFile } from '@automaker/types';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
|
||||
const logger = createLogger('PermissionEnforcer');
|
||||
|
||||
export interface PermissionCheckResult {
|
||||
allowed: boolean;
|
||||
reason?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a tool call is allowed based on permissions
|
||||
*/
|
||||
export function checkToolCallPermission(
|
||||
toolCall: any,
|
||||
permissions: CursorCliConfigFile | null
|
||||
): PermissionCheckResult {
|
||||
if (!permissions || !permissions.permissions) {
|
||||
// If no permissions are configured, allow everything (backward compatibility)
|
||||
return { allowed: true };
|
||||
}
|
||||
|
||||
const { allow = [], deny = [] } = permissions.permissions;
|
||||
|
||||
// Check shell tool calls
|
||||
if (toolCall.shellToolCall?.args?.command) {
|
||||
const command = toolCall.shellToolCall.args.command;
|
||||
const toolName = `Shell(${extractCommandName(command)})`;
|
||||
|
||||
// Check deny list first (deny takes precedence)
|
||||
for (const denyRule of deny) {
|
||||
if (matchesRule(toolName, denyRule)) {
|
||||
return {
|
||||
allowed: false,
|
||||
reason: `Operation blocked by permission rule: ${denyRule}`,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Then check allow list
|
||||
for (const allowRule of allow) {
|
||||
if (matchesRule(toolName, allowRule)) {
|
||||
return { allowed: true };
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
allowed: false,
|
||||
reason: `Operation not in allow list: ${toolName}`,
|
||||
};
|
||||
}
|
||||
|
||||
// Check read tool calls
|
||||
if (toolCall.readToolCall?.args?.path) {
|
||||
const path = toolCall.readToolCall.args.path;
|
||||
const toolName = `Read(${path})`;
|
||||
|
||||
// Check deny list first
|
||||
for (const denyRule of deny) {
|
||||
if (matchesRule(toolName, denyRule)) {
|
||||
return {
|
||||
allowed: false,
|
||||
reason: `Read operation blocked by permission rule: ${denyRule}`,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Then check allow list
|
||||
for (const allowRule of allow) {
|
||||
if (matchesRule(toolName, allowRule)) {
|
||||
return { allowed: true };
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
allowed: false,
|
||||
reason: `Read operation not in allow list: ${toolName}`,
|
||||
};
|
||||
}
|
||||
|
||||
// Check write tool calls
|
||||
if (toolCall.writeToolCall?.args?.path) {
|
||||
const path = toolCall.writeToolCall.args.path;
|
||||
const toolName = `Write(${path})`;
|
||||
|
||||
// Check deny list first
|
||||
for (const denyRule of deny) {
|
||||
if (matchesRule(toolName, denyRule)) {
|
||||
return {
|
||||
allowed: false,
|
||||
reason: `Write operation blocked by permission rule: ${denyRule}`,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Then check allow list
|
||||
for (const allowRule of allow) {
|
||||
if (matchesRule(toolName, allowRule)) {
|
||||
return { allowed: true };
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
allowed: false,
|
||||
reason: `Write operation not in allow list: ${toolName}`,
|
||||
};
|
||||
}
|
||||
|
||||
// For other tool types, allow by default for now
|
||||
return { allowed: true };
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract the base command name from a shell command
|
||||
*/
|
||||
function extractCommandName(command: string): string {
|
||||
// Remove leading spaces and get the first word
|
||||
const trimmed = command.trim();
|
||||
const firstWord = trimmed.split(/\s+/)[0];
|
||||
return firstWord || 'unknown';
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a tool name matches a permission rule
|
||||
*/
|
||||
function matchesRule(toolName: string, rule: string): boolean {
|
||||
// Exact match
|
||||
if (toolName === rule) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Wildcard patterns
|
||||
if (rule.includes('*')) {
|
||||
const regex = new RegExp(rule.replace(/\*/g, '.*'));
|
||||
return regex.test(toolName);
|
||||
}
|
||||
|
||||
// Prefix match for shell commands (e.g., "Shell(git)" matches "Shell(git status)")
|
||||
if (rule.startsWith('Shell(') && toolName.startsWith('Shell(')) {
|
||||
const ruleCommand = rule.slice(6, -1); // Remove "Shell(" and ")"
|
||||
const toolCommand = extractCommandName(toolName.slice(6, -1)); // Remove "Shell(" and ")"
|
||||
return toolCommand.startsWith(ruleCommand);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Log permission violations
|
||||
*/
|
||||
export function logPermissionViolation(toolCall: any, reason: string, sessionId?: string): void {
|
||||
const sessionIdStr = sessionId ? ` [${sessionId}]` : '';
|
||||
|
||||
if (toolCall.shellToolCall?.args?.command) {
|
||||
logger.warn(
|
||||
`Permission violation${sessionIdStr}: Shell command blocked - ${toolCall.shellToolCall.args.command} (${reason})`
|
||||
);
|
||||
} else if (toolCall.readToolCall?.args?.path) {
|
||||
logger.warn(
|
||||
`Permission violation${sessionIdStr}: Read operation blocked - ${toolCall.readToolCall.args.path} (${reason})`
|
||||
);
|
||||
} else if (toolCall.writeToolCall?.args?.path) {
|
||||
logger.warn(
|
||||
`Permission violation${sessionIdStr}: Write operation blocked - ${toolCall.writeToolCall.args.path} (${reason})`
|
||||
);
|
||||
} else {
|
||||
logger.warn(`Permission violation${sessionIdStr}: Tool call blocked (${reason})`, { toolCall });
|
||||
}
|
||||
}
|
||||
@@ -16,7 +16,6 @@
|
||||
*/
|
||||
|
||||
import type { Options } from '@anthropic-ai/claude-agent-sdk';
|
||||
import os from 'os';
|
||||
import path from 'path';
|
||||
import { resolveModelString } from '@automaker/model-resolver';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
@@ -31,6 +30,68 @@ import {
|
||||
} from '@automaker/types';
|
||||
import { isPathAllowed, PathNotAllowedError, getAllowedRootDirectory } from '@automaker/platform';
|
||||
|
||||
/**
|
||||
* Result of sandbox compatibility check
|
||||
*/
|
||||
export interface SandboxCompatibilityResult {
|
||||
/** Whether sandbox mode can be enabled for this path */
|
||||
enabled: boolean;
|
||||
/** Optional message explaining why sandbox is disabled */
|
||||
message?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a working directory is compatible with sandbox mode.
|
||||
* Some paths (like cloud storage mounts) may not work with sandboxed execution.
|
||||
*
|
||||
* @param cwd - The working directory to check
|
||||
* @param sandboxRequested - Whether sandbox mode was requested by settings
|
||||
* @returns Object indicating if sandbox can be enabled and why not if disabled
|
||||
*/
|
||||
export function checkSandboxCompatibility(
|
||||
cwd: string,
|
||||
sandboxRequested: boolean
|
||||
): SandboxCompatibilityResult {
|
||||
if (!sandboxRequested) {
|
||||
return { enabled: false };
|
||||
}
|
||||
|
||||
const resolvedCwd = path.resolve(cwd);
|
||||
|
||||
// Check for cloud storage paths that may not be compatible with sandbox
|
||||
const cloudStoragePatterns = [
|
||||
// macOS mounted volumes
|
||||
/^\/Volumes\/GoogleDrive/i,
|
||||
/^\/Volumes\/Dropbox/i,
|
||||
/^\/Volumes\/OneDrive/i,
|
||||
/^\/Volumes\/iCloud/i,
|
||||
// macOS home directory
|
||||
/^\/Users\/[^/]+\/Google Drive/i,
|
||||
/^\/Users\/[^/]+\/Dropbox/i,
|
||||
/^\/Users\/[^/]+\/OneDrive/i,
|
||||
/^\/Users\/[^/]+\/Library\/Mobile Documents/i, // iCloud
|
||||
// Linux home directory
|
||||
/^\/home\/[^/]+\/Google Drive/i,
|
||||
/^\/home\/[^/]+\/Dropbox/i,
|
||||
/^\/home\/[^/]+\/OneDrive/i,
|
||||
// Windows
|
||||
/^C:\\Users\\[^\\]+\\Google Drive/i,
|
||||
/^C:\\Users\\[^\\]+\\Dropbox/i,
|
||||
/^C:\\Users\\[^\\]+\\OneDrive/i,
|
||||
];
|
||||
|
||||
for (const pattern of cloudStoragePatterns) {
|
||||
if (pattern.test(resolvedCwd)) {
|
||||
return {
|
||||
enabled: false,
|
||||
message: `Sandbox disabled: Cloud storage path detected (${resolvedCwd}). Sandbox mode may not work correctly with cloud-synced directories.`,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return { enabled: true };
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate that a working directory is allowed by ALLOWED_ROOT_DIRECTORY.
|
||||
* This is the centralized security check for ALL AI model invocations.
|
||||
@@ -57,139 +118,6 @@ export function validateWorkingDirectory(cwd: string): void {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Known cloud storage path patterns where sandbox mode is incompatible.
|
||||
*
|
||||
* The Claude CLI sandbox feature uses filesystem isolation that conflicts with
|
||||
* cloud storage providers' virtual filesystem implementations. This causes the
|
||||
* Claude process to exit with code 1 when sandbox is enabled for these paths.
|
||||
*
|
||||
* Affected providers (macOS paths):
|
||||
* - Dropbox: ~/Library/CloudStorage/Dropbox-*
|
||||
* - Google Drive: ~/Library/CloudStorage/GoogleDrive-*
|
||||
* - OneDrive: ~/Library/CloudStorage/OneDrive-*
|
||||
* - iCloud Drive: ~/Library/Mobile Documents/
|
||||
* - Box: ~/Library/CloudStorage/Box-*
|
||||
*
|
||||
* Note: This is a known limitation when using cloud storage paths.
|
||||
*/
|
||||
|
||||
/**
|
||||
* macOS-specific cloud storage patterns that appear under ~/Library/
|
||||
* These are specific enough to use with includes() safely.
|
||||
*/
|
||||
const MACOS_CLOUD_STORAGE_PATTERNS = [
|
||||
'/Library/CloudStorage/', // Dropbox, Google Drive, OneDrive, Box on macOS
|
||||
'/Library/Mobile Documents/', // iCloud Drive on macOS
|
||||
] as const;
|
||||
|
||||
/**
|
||||
* Generic cloud storage folder names that need to be anchored to the home directory
|
||||
* to avoid false positives (e.g., /home/user/my-project-about-dropbox/).
|
||||
*/
|
||||
const HOME_ANCHORED_CLOUD_FOLDERS = [
|
||||
'Google Drive', // Google Drive on some systems
|
||||
'Dropbox', // Dropbox on Linux/alternative installs
|
||||
'OneDrive', // OneDrive on Linux/alternative installs
|
||||
] as const;
|
||||
|
||||
/**
|
||||
* Check if a path is within a cloud storage location.
|
||||
*
|
||||
* Cloud storage providers use virtual filesystem implementations that are
|
||||
* incompatible with the Claude CLI sandbox feature, causing process crashes.
|
||||
*
|
||||
* Uses two detection strategies:
|
||||
* 1. macOS-specific patterns (under ~/Library/) - checked via includes()
|
||||
* 2. Generic folder names - anchored to home directory to avoid false positives
|
||||
*
|
||||
* @param cwd - The working directory path to check
|
||||
* @returns true if the path is in a cloud storage location
|
||||
*/
|
||||
export function isCloudStoragePath(cwd: string): boolean {
|
||||
const resolvedPath = path.resolve(cwd);
|
||||
// Normalize to forward slashes for consistent pattern matching across platforms
|
||||
let normalizedPath = resolvedPath.split(path.sep).join('/');
|
||||
// Remove Windows drive letter if present (e.g., "C:/Users" -> "/Users")
|
||||
// This ensures Unix paths in tests work the same on Windows
|
||||
normalizedPath = normalizedPath.replace(/^[A-Za-z]:/, '');
|
||||
|
||||
// Check macOS-specific patterns (these are specific enough to use includes)
|
||||
if (MACOS_CLOUD_STORAGE_PATTERNS.some((pattern) => normalizedPath.includes(pattern))) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check home-anchored patterns to avoid false positives
|
||||
// e.g., /home/user/my-project-about-dropbox/ should NOT match
|
||||
const home = os.homedir();
|
||||
for (const folder of HOME_ANCHORED_CLOUD_FOLDERS) {
|
||||
const cloudPath = path.join(home, folder);
|
||||
let normalizedCloudPath = cloudPath.split(path.sep).join('/');
|
||||
// Remove Windows drive letter if present
|
||||
normalizedCloudPath = normalizedCloudPath.replace(/^[A-Za-z]:/, '');
|
||||
// Check if resolved path starts with the cloud storage path followed by a separator
|
||||
// This ensures we match ~/Dropbox/project but not ~/Dropbox-archive or ~/my-dropbox-tool
|
||||
if (
|
||||
normalizedPath === normalizedCloudPath ||
|
||||
normalizedPath.startsWith(normalizedCloudPath + '/')
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Result of sandbox compatibility check
|
||||
*/
|
||||
export interface SandboxCheckResult {
|
||||
/** Whether sandbox should be enabled */
|
||||
enabled: boolean;
|
||||
/** If disabled, the reason why */
|
||||
disabledReason?: 'cloud_storage' | 'user_setting';
|
||||
/** Human-readable message for logging/UI */
|
||||
message?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine if sandbox mode should be enabled for a given configuration.
|
||||
*
|
||||
* Sandbox mode is automatically disabled for cloud storage paths because the
|
||||
* Claude CLI sandbox feature is incompatible with virtual filesystem
|
||||
* implementations used by cloud storage providers (Dropbox, Google Drive, etc.).
|
||||
*
|
||||
* @param cwd - The working directory
|
||||
* @param enableSandboxMode - User's sandbox mode setting
|
||||
* @returns SandboxCheckResult with enabled status and reason if disabled
|
||||
*/
|
||||
export function checkSandboxCompatibility(
|
||||
cwd: string,
|
||||
enableSandboxMode?: boolean
|
||||
): SandboxCheckResult {
|
||||
// User has explicitly disabled sandbox mode
|
||||
if (enableSandboxMode === false) {
|
||||
return {
|
||||
enabled: false,
|
||||
disabledReason: 'user_setting',
|
||||
};
|
||||
}
|
||||
|
||||
// Check for cloud storage incompatibility (applies when enabled or undefined)
|
||||
if (isCloudStoragePath(cwd)) {
|
||||
return {
|
||||
enabled: false,
|
||||
disabledReason: 'cloud_storage',
|
||||
message: `Sandbox mode auto-disabled: Project is in a cloud storage location (${cwd}). The Claude CLI sandbox feature is incompatible with cloud storage filesystems. To use sandbox mode, move your project to a local directory.`,
|
||||
};
|
||||
}
|
||||
|
||||
// Sandbox is compatible and enabled (true or undefined defaults to enabled)
|
||||
return {
|
||||
enabled: true,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool presets for different use cases
|
||||
*/
|
||||
@@ -272,55 +200,31 @@ export function getModelForUseCase(
|
||||
|
||||
/**
|
||||
* Base options that apply to all SDK calls
|
||||
* AUTONOMOUS MODE: Always bypass permissions for fully autonomous operation
|
||||
*/
|
||||
function getBaseOptions(): Partial<Options> {
|
||||
return {
|
||||
permissionMode: 'acceptEdits',
|
||||
permissionMode: 'bypassPermissions',
|
||||
allowDangerouslySkipPermissions: true,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* MCP permission options result
|
||||
* MCP options result
|
||||
*/
|
||||
interface McpPermissionOptions {
|
||||
/** Whether tools should be restricted to a preset */
|
||||
shouldRestrictTools: boolean;
|
||||
/** Options to spread when MCP bypass is enabled */
|
||||
bypassOptions: Partial<Options>;
|
||||
interface McpOptions {
|
||||
/** Options to spread for MCP servers */
|
||||
mcpServerOptions: Partial<Options>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build MCP-related options based on configuration.
|
||||
* Centralizes the logic for determining permission modes and tool restrictions
|
||||
* when MCP servers are configured.
|
||||
*
|
||||
* @param config - The SDK options config
|
||||
* @returns Object with MCP permission settings to spread into final options
|
||||
* @returns Object with MCP server settings to spread into final options
|
||||
*/
|
||||
function buildMcpOptions(config: CreateSdkOptionsConfig): McpPermissionOptions {
|
||||
const hasMcpServers = config.mcpServers && Object.keys(config.mcpServers).length > 0;
|
||||
// Default to true for autonomous workflow. Security is enforced when adding servers
|
||||
// via the security warning dialog that explains the risks.
|
||||
const mcpAutoApprove = config.mcpAutoApproveTools ?? true;
|
||||
const mcpUnrestricted = config.mcpUnrestrictedTools ?? true;
|
||||
|
||||
// Determine if we should bypass permissions based on settings
|
||||
const shouldBypassPermissions = hasMcpServers && mcpAutoApprove;
|
||||
// Determine if we should restrict tools (only when no MCP or unrestricted is disabled)
|
||||
const shouldRestrictTools = !hasMcpServers || !mcpUnrestricted;
|
||||
|
||||
function buildMcpOptions(config: CreateSdkOptionsConfig): McpOptions {
|
||||
return {
|
||||
shouldRestrictTools,
|
||||
// Only include bypass options when MCP is configured and auto-approve is enabled
|
||||
bypassOptions: shouldBypassPermissions
|
||||
? {
|
||||
permissionMode: 'bypassPermissions' as const,
|
||||
// Required flag when using bypassPermissions mode
|
||||
allowDangerouslySkipPermissions: true,
|
||||
}
|
||||
: {},
|
||||
// Include MCP servers if configured
|
||||
mcpServerOptions: config.mcpServers ? { mcpServers: config.mcpServers } : {},
|
||||
};
|
||||
@@ -422,18 +326,9 @@ export interface CreateSdkOptionsConfig {
|
||||
/** Enable auto-loading of CLAUDE.md files via SDK's settingSources */
|
||||
autoLoadClaudeMd?: boolean;
|
||||
|
||||
/** Enable sandbox mode for bash command isolation */
|
||||
enableSandboxMode?: boolean;
|
||||
|
||||
/** MCP servers to make available to the agent */
|
||||
mcpServers?: Record<string, McpServerConfig>;
|
||||
|
||||
/** Auto-approve MCP tool calls without permission prompts */
|
||||
mcpAutoApproveTools?: boolean;
|
||||
|
||||
/** Allow unrestricted tools when MCP servers are enabled */
|
||||
mcpUnrestrictedTools?: boolean;
|
||||
|
||||
/** Extended thinking level for Claude models */
|
||||
thinkingLevel?: ThinkingLevel;
|
||||
}
|
||||
@@ -554,7 +449,6 @@ export function createSuggestionsOptions(config: CreateSdkOptionsConfig): Option
|
||||
* - Full tool access for code modification
|
||||
* - Standard turns for interactive sessions
|
||||
* - Model priority: explicit model > session model > chat default
|
||||
* - Sandbox mode controlled by enableSandboxMode setting (auto-disabled for cloud storage)
|
||||
* - When autoLoadClaudeMd is true, uses preset mode and settingSources for CLAUDE.md loading
|
||||
*/
|
||||
export function createChatOptions(config: CreateSdkOptionsConfig): Options {
|
||||
@@ -573,24 +467,12 @@ export function createChatOptions(config: CreateSdkOptionsConfig): Options {
|
||||
// Build thinking options
|
||||
const thinkingOptions = buildThinkingOptions(config.thinkingLevel);
|
||||
|
||||
// Check sandbox compatibility (auto-disables for cloud storage paths)
|
||||
const sandboxCheck = checkSandboxCompatibility(config.cwd, config.enableSandboxMode);
|
||||
|
||||
return {
|
||||
...getBaseOptions(),
|
||||
model: getModelForUseCase('chat', effectiveModel),
|
||||
maxTurns: MAX_TURNS.standard,
|
||||
cwd: config.cwd,
|
||||
// Only restrict tools if no MCP servers configured or unrestricted is disabled
|
||||
...(mcpOptions.shouldRestrictTools && { allowedTools: [...TOOL_PRESETS.chat] }),
|
||||
// Apply MCP bypass options if configured
|
||||
...mcpOptions.bypassOptions,
|
||||
...(sandboxCheck.enabled && {
|
||||
sandbox: {
|
||||
enabled: true,
|
||||
autoAllowBashIfSandboxed: true,
|
||||
},
|
||||
}),
|
||||
allowedTools: [...TOOL_PRESETS.chat],
|
||||
...claudeMdOptions,
|
||||
...thinkingOptions,
|
||||
...(config.abortController && { abortController: config.abortController }),
|
||||
@@ -605,7 +487,6 @@ export function createChatOptions(config: CreateSdkOptionsConfig): Options {
|
||||
* - Full tool access for code modification and implementation
|
||||
* - Extended turns for thorough feature implementation
|
||||
* - Uses default model (can be overridden)
|
||||
* - Sandbox mode controlled by enableSandboxMode setting (auto-disabled for cloud storage)
|
||||
* - When autoLoadClaudeMd is true, uses preset mode and settingSources for CLAUDE.md loading
|
||||
*/
|
||||
export function createAutoModeOptions(config: CreateSdkOptionsConfig): Options {
|
||||
@@ -621,24 +502,12 @@ export function createAutoModeOptions(config: CreateSdkOptionsConfig): Options {
|
||||
// Build thinking options
|
||||
const thinkingOptions = buildThinkingOptions(config.thinkingLevel);
|
||||
|
||||
// Check sandbox compatibility (auto-disables for cloud storage paths)
|
||||
const sandboxCheck = checkSandboxCompatibility(config.cwd, config.enableSandboxMode);
|
||||
|
||||
return {
|
||||
...getBaseOptions(),
|
||||
model: getModelForUseCase('auto', config.model),
|
||||
maxTurns: MAX_TURNS.maximum,
|
||||
cwd: config.cwd,
|
||||
// Only restrict tools if no MCP servers configured or unrestricted is disabled
|
||||
...(mcpOptions.shouldRestrictTools && { allowedTools: [...TOOL_PRESETS.fullAccess] }),
|
||||
// Apply MCP bypass options if configured
|
||||
...mcpOptions.bypassOptions,
|
||||
...(sandboxCheck.enabled && {
|
||||
sandbox: {
|
||||
enabled: true,
|
||||
autoAllowBashIfSandboxed: true,
|
||||
},
|
||||
}),
|
||||
allowedTools: [...TOOL_PRESETS.fullAccess],
|
||||
...claudeMdOptions,
|
||||
...thinkingOptions,
|
||||
...(config.abortController && { abortController: config.abortController }),
|
||||
@@ -656,7 +525,6 @@ export function createCustomOptions(
|
||||
config: CreateSdkOptionsConfig & {
|
||||
maxTurns?: number;
|
||||
allowedTools?: readonly string[];
|
||||
sandbox?: { enabled: boolean; autoAllowBashIfSandboxed?: boolean };
|
||||
}
|
||||
): Options {
|
||||
// Validate working directory before creating options
|
||||
@@ -671,22 +539,17 @@ export function createCustomOptions(
|
||||
// Build thinking options
|
||||
const thinkingOptions = buildThinkingOptions(config.thinkingLevel);
|
||||
|
||||
// For custom options: use explicit allowedTools if provided, otherwise use preset based on MCP settings
|
||||
// For custom options: use explicit allowedTools if provided, otherwise default to readOnly
|
||||
const effectiveAllowedTools = config.allowedTools
|
||||
? [...config.allowedTools]
|
||||
: mcpOptions.shouldRestrictTools
|
||||
? [...TOOL_PRESETS.readOnly]
|
||||
: undefined;
|
||||
: [...TOOL_PRESETS.readOnly];
|
||||
|
||||
return {
|
||||
...getBaseOptions(),
|
||||
model: getModelForUseCase('default', config.model),
|
||||
maxTurns: config.maxTurns ?? MAX_TURNS.maximum,
|
||||
cwd: config.cwd,
|
||||
...(effectiveAllowedTools && { allowedTools: effectiveAllowedTools }),
|
||||
...(config.sandbox && { sandbox: config.sandbox }),
|
||||
// Apply MCP bypass options if configured
|
||||
...mcpOptions.bypassOptions,
|
||||
allowedTools: effectiveAllowedTools,
|
||||
...claudeMdOptions,
|
||||
...thinkingOptions,
|
||||
...(config.abortController && { abortController: config.abortController }),
|
||||
|
||||
@@ -55,34 +55,6 @@ export async function getAutoLoadClaudeMdSetting(
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the enableSandboxMode setting from global settings.
|
||||
* Returns false if settings service is not available.
|
||||
*
|
||||
* @param settingsService - Optional settings service instance
|
||||
* @param logPrefix - Prefix for log messages (e.g., '[AgentService]')
|
||||
* @returns Promise resolving to the enableSandboxMode setting value
|
||||
*/
|
||||
export async function getEnableSandboxModeSetting(
|
||||
settingsService?: SettingsService | null,
|
||||
logPrefix = '[SettingsHelper]'
|
||||
): Promise<boolean> {
|
||||
if (!settingsService) {
|
||||
logger.info(`${logPrefix} SettingsService not available, sandbox mode disabled`);
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
const globalSettings = await settingsService.getGlobalSettings();
|
||||
const result = globalSettings.enableSandboxMode ?? false;
|
||||
logger.info(`${logPrefix} enableSandboxMode from global settings: ${result}`);
|
||||
return result;
|
||||
} catch (error) {
|
||||
logger.error(`${logPrefix} Failed to load enableSandboxMode setting:`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Filters out CLAUDE.md from context files when autoLoadClaudeMd is enabled
|
||||
* and rebuilds the formatted prompt without it.
|
||||
|
||||
@@ -70,17 +70,6 @@ export class ClaudeProvider extends BaseProvider {
|
||||
const maxThinkingTokens = getThinkingTokenBudget(thinkingLevel);
|
||||
|
||||
// Build Claude SDK options
|
||||
// AUTONOMOUS MODE: Always bypass permissions for fully autonomous operation
|
||||
const hasMcpServers = options.mcpServers && Object.keys(options.mcpServers).length > 0;
|
||||
// Base tools available to all agents
|
||||
// Note: 'Skill' and 'Task' tools are added dynamically by agent-service.ts
|
||||
// based on whether skills/subagents are enabled in settings
|
||||
const defaultTools = ['Read', 'Write', 'Edit', 'Glob', 'Grep', 'Bash', 'WebSearch', 'WebFetch'];
|
||||
|
||||
// AUTONOMOUS MODE: Always bypass permissions and allow unrestricted tools
|
||||
// Only restrict tools when no MCP servers are configured
|
||||
const shouldRestrictTools = !hasMcpServers;
|
||||
|
||||
const sdkOptions: Options = {
|
||||
model,
|
||||
systemPrompt,
|
||||
@@ -88,10 +77,9 @@ export class ClaudeProvider extends BaseProvider {
|
||||
cwd,
|
||||
// Pass only explicitly allowed environment variables to SDK
|
||||
env: buildEnv(),
|
||||
// Only restrict tools if explicitly set OR (no MCP / unrestricted disabled)
|
||||
...(allowedTools && shouldRestrictTools && { allowedTools }),
|
||||
...(!allowedTools && shouldRestrictTools && { allowedTools: defaultTools }),
|
||||
// AUTONOMOUS MODE: Always bypass permissions and allow dangerous operations
|
||||
// Pass through allowedTools if provided by caller (decided by sdk-options.ts)
|
||||
...(allowedTools && { allowedTools }),
|
||||
// AUTONOMOUS MODE: Always bypass permissions for fully autonomous operation
|
||||
permissionMode: 'bypassPermissions',
|
||||
allowDangerouslySkipPermissions: true,
|
||||
abortController,
|
||||
@@ -101,8 +89,6 @@ export class ClaudeProvider extends BaseProvider {
|
||||
: {}),
|
||||
// Forward settingSources for CLAUDE.md file loading
|
||||
...(options.settingSources && { settingSources: options.settingSources }),
|
||||
// Forward sandbox configuration
|
||||
...(options.sandbox && { sandbox: options.sandbox }),
|
||||
// Forward MCP servers configuration
|
||||
...(options.mcpServers && { mcpServers: options.mcpServers }),
|
||||
// Extended thinking configuration
|
||||
|
||||
85
apps/server/src/providers/codex-config-manager.ts
Normal file
85
apps/server/src/providers/codex-config-manager.ts
Normal file
@@ -0,0 +1,85 @@
|
||||
/**
|
||||
* Codex Config Manager - Writes MCP server configuration for Codex CLI
|
||||
*/
|
||||
|
||||
import path from 'path';
|
||||
import type { McpServerConfig } from '@automaker/types';
|
||||
import * as secureFs from '../lib/secure-fs.js';
|
||||
|
||||
const CODEX_CONFIG_DIR = '.codex';
|
||||
const CODEX_CONFIG_FILENAME = 'config.toml';
|
||||
const CODEX_MCP_SECTION = 'mcp_servers';
|
||||
|
||||
function formatTomlString(value: string): string {
|
||||
return JSON.stringify(value);
|
||||
}
|
||||
|
||||
function formatTomlArray(values: string[]): string {
|
||||
const formatted = values.map((value) => formatTomlString(value)).join(', ');
|
||||
return `[${formatted}]`;
|
||||
}
|
||||
|
||||
function formatTomlInlineTable(values: Record<string, string>): string {
|
||||
const entries = Object.entries(values).map(
|
||||
([key, value]) => `${key} = ${formatTomlString(value)}`
|
||||
);
|
||||
return `{ ${entries.join(', ')} }`;
|
||||
}
|
||||
|
||||
function formatTomlKey(key: string): string {
|
||||
return `"${key.replace(/"/g, '\\"')}"`;
|
||||
}
|
||||
|
||||
function buildServerBlock(name: string, server: McpServerConfig): string[] {
|
||||
const lines: string[] = [];
|
||||
const section = `${CODEX_MCP_SECTION}.${formatTomlKey(name)}`;
|
||||
lines.push(`[${section}]`);
|
||||
|
||||
if (server.type) {
|
||||
lines.push(`type = ${formatTomlString(server.type)}`);
|
||||
}
|
||||
|
||||
if ('command' in server && server.command) {
|
||||
lines.push(`command = ${formatTomlString(server.command)}`);
|
||||
}
|
||||
|
||||
if ('args' in server && server.args && server.args.length > 0) {
|
||||
lines.push(`args = ${formatTomlArray(server.args)}`);
|
||||
}
|
||||
|
||||
if ('env' in server && server.env && Object.keys(server.env).length > 0) {
|
||||
lines.push(`env = ${formatTomlInlineTable(server.env)}`);
|
||||
}
|
||||
|
||||
if ('url' in server && server.url) {
|
||||
lines.push(`url = ${formatTomlString(server.url)}`);
|
||||
}
|
||||
|
||||
if ('headers' in server && server.headers && Object.keys(server.headers).length > 0) {
|
||||
lines.push(`headers = ${formatTomlInlineTable(server.headers)}`);
|
||||
}
|
||||
|
||||
return lines;
|
||||
}
|
||||
|
||||
export class CodexConfigManager {
|
||||
async configureMcpServers(
|
||||
cwd: string,
|
||||
mcpServers: Record<string, McpServerConfig>
|
||||
): Promise<void> {
|
||||
const configDir = path.join(cwd, CODEX_CONFIG_DIR);
|
||||
const configPath = path.join(configDir, CODEX_CONFIG_FILENAME);
|
||||
|
||||
await secureFs.mkdir(configDir, { recursive: true });
|
||||
|
||||
const blocks: string[] = [];
|
||||
for (const [name, server] of Object.entries(mcpServers)) {
|
||||
blocks.push(...buildServerBlock(name, server), '');
|
||||
}
|
||||
|
||||
const content = blocks.join('\n').trim();
|
||||
if (content) {
|
||||
await secureFs.writeFile(configPath, content + '\n', 'utf-8');
|
||||
}
|
||||
}
|
||||
}
|
||||
123
apps/server/src/providers/codex-models.ts
Normal file
123
apps/server/src/providers/codex-models.ts
Normal file
@@ -0,0 +1,123 @@
|
||||
/**
|
||||
* Codex Model Definitions
|
||||
*
|
||||
* Official Codex CLI models as documented at https://developers.openai.com/codex/models/
|
||||
*/
|
||||
|
||||
import { CODEX_MODEL_MAP } from '@automaker/types';
|
||||
import type { ModelDefinition } from './types.js';
|
||||
|
||||
const CONTEXT_WINDOW_200K = 200000;
|
||||
const CONTEXT_WINDOW_128K = 128000;
|
||||
const MAX_OUTPUT_32K = 32000;
|
||||
const MAX_OUTPUT_16K = 16000;
|
||||
|
||||
/**
|
||||
* All available Codex models with their specifications
|
||||
*/
|
||||
export const CODEX_MODELS: ModelDefinition[] = [
|
||||
// ========== Codex-Specific Models ==========
|
||||
{
|
||||
id: CODEX_MODEL_MAP.gpt52Codex,
|
||||
name: 'GPT-5.2-Codex',
|
||||
modelString: CODEX_MODEL_MAP.gpt52Codex,
|
||||
provider: 'openai',
|
||||
description:
|
||||
'Most advanced agentic coding model for complex software engineering (default for ChatGPT users).',
|
||||
contextWindow: CONTEXT_WINDOW_200K,
|
||||
maxOutputTokens: MAX_OUTPUT_32K,
|
||||
supportsVision: true,
|
||||
supportsTools: true,
|
||||
tier: 'premium' as const,
|
||||
default: true,
|
||||
hasReasoning: true,
|
||||
},
|
||||
{
|
||||
id: CODEX_MODEL_MAP.gpt5Codex,
|
||||
name: 'GPT-5-Codex',
|
||||
modelString: CODEX_MODEL_MAP.gpt5Codex,
|
||||
provider: 'openai',
|
||||
description: 'Purpose-built for Codex CLI with versatile tool use (default for CLI users).',
|
||||
contextWindow: CONTEXT_WINDOW_200K,
|
||||
maxOutputTokens: MAX_OUTPUT_32K,
|
||||
supportsVision: true,
|
||||
supportsTools: true,
|
||||
tier: 'standard' as const,
|
||||
hasReasoning: true,
|
||||
},
|
||||
{
|
||||
id: CODEX_MODEL_MAP.gpt5CodexMini,
|
||||
name: 'GPT-5-Codex-Mini',
|
||||
modelString: CODEX_MODEL_MAP.gpt5CodexMini,
|
||||
provider: 'openai',
|
||||
description: 'Faster workflows optimized for low-latency code Q&A and editing.',
|
||||
contextWindow: CONTEXT_WINDOW_128K,
|
||||
maxOutputTokens: MAX_OUTPUT_16K,
|
||||
supportsVision: false,
|
||||
supportsTools: true,
|
||||
tier: 'basic' as const,
|
||||
hasReasoning: false,
|
||||
},
|
||||
{
|
||||
id: CODEX_MODEL_MAP.codex1,
|
||||
name: 'Codex-1',
|
||||
modelString: CODEX_MODEL_MAP.codex1,
|
||||
provider: 'openai',
|
||||
description: 'Version of o3 optimized for software engineering with advanced reasoning.',
|
||||
contextWindow: CONTEXT_WINDOW_200K,
|
||||
maxOutputTokens: MAX_OUTPUT_32K,
|
||||
supportsVision: true,
|
||||
supportsTools: true,
|
||||
tier: 'premium' as const,
|
||||
hasReasoning: true,
|
||||
},
|
||||
{
|
||||
id: CODEX_MODEL_MAP.codexMiniLatest,
|
||||
name: 'Codex-Mini-Latest',
|
||||
modelString: CODEX_MODEL_MAP.codexMiniLatest,
|
||||
provider: 'openai',
|
||||
description: 'Version of o4-mini designed for Codex with faster workflows.',
|
||||
contextWindow: CONTEXT_WINDOW_128K,
|
||||
maxOutputTokens: MAX_OUTPUT_16K,
|
||||
supportsVision: true,
|
||||
supportsTools: true,
|
||||
tier: 'standard' as const,
|
||||
hasReasoning: false,
|
||||
},
|
||||
|
||||
// ========== Base GPT-5 Model ==========
|
||||
{
|
||||
id: CODEX_MODEL_MAP.gpt5,
|
||||
name: 'GPT-5',
|
||||
modelString: CODEX_MODEL_MAP.gpt5,
|
||||
provider: 'openai',
|
||||
description: 'GPT-5 base flagship model with strong general-purpose capabilities.',
|
||||
contextWindow: CONTEXT_WINDOW_200K,
|
||||
maxOutputTokens: MAX_OUTPUT_32K,
|
||||
supportsVision: true,
|
||||
supportsTools: true,
|
||||
tier: 'standard' as const,
|
||||
hasReasoning: true,
|
||||
},
|
||||
];
|
||||
|
||||
/**
|
||||
* Get model definition by ID
|
||||
*/
|
||||
export function getCodexModelById(modelId: string): ModelDefinition | undefined {
|
||||
return CODEX_MODELS.find((m) => m.id === modelId || m.modelString === modelId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all models that support reasoning
|
||||
*/
|
||||
export function getReasoningModels(): ModelDefinition[] {
|
||||
return CODEX_MODELS.filter((m) => m.hasReasoning);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get models by tier
|
||||
*/
|
||||
export function getModelsByTier(tier: 'premium' | 'standard' | 'basic'): ModelDefinition[] {
|
||||
return CODEX_MODELS.filter((m) => m.tier === tier);
|
||||
}
|
||||
1111
apps/server/src/providers/codex-provider.ts
Normal file
1111
apps/server/src/providers/codex-provider.ts
Normal file
File diff suppressed because it is too large
Load Diff
173
apps/server/src/providers/codex-sdk-client.ts
Normal file
173
apps/server/src/providers/codex-sdk-client.ts
Normal file
@@ -0,0 +1,173 @@
|
||||
/**
|
||||
* Codex SDK client - Executes Codex queries via official @openai/codex-sdk
|
||||
*
|
||||
* Used for programmatic control of Codex from within the application.
|
||||
* Provides cleaner integration than spawning CLI processes.
|
||||
*/
|
||||
|
||||
import { Codex } from '@openai/codex-sdk';
|
||||
import { formatHistoryAsText, classifyError, getUserFriendlyErrorMessage } from '@automaker/utils';
|
||||
import { supportsReasoningEffort } from '@automaker/types';
|
||||
import type { ExecuteOptions, ProviderMessage } from './types.js';
|
||||
|
||||
const OPENAI_API_KEY_ENV = 'OPENAI_API_KEY';
|
||||
const SDK_HISTORY_HEADER = 'Current request:\n';
|
||||
const DEFAULT_RESPONSE_TEXT = '';
|
||||
const SDK_ERROR_DETAILS_LABEL = 'Details:';
|
||||
|
||||
type PromptBlock = {
|
||||
type: string;
|
||||
text?: string;
|
||||
source?: {
|
||||
type?: string;
|
||||
media_type?: string;
|
||||
data?: string;
|
||||
};
|
||||
};
|
||||
|
||||
function resolveApiKey(): string {
|
||||
const apiKey = process.env[OPENAI_API_KEY_ENV];
|
||||
if (!apiKey) {
|
||||
throw new Error('OPENAI_API_KEY is not set.');
|
||||
}
|
||||
return apiKey;
|
||||
}
|
||||
|
||||
function normalizePromptBlocks(prompt: ExecuteOptions['prompt']): PromptBlock[] {
|
||||
if (Array.isArray(prompt)) {
|
||||
return prompt as PromptBlock[];
|
||||
}
|
||||
return [{ type: 'text', text: prompt }];
|
||||
}
|
||||
|
||||
function buildPromptText(options: ExecuteOptions, systemPrompt: string | null): string {
|
||||
const historyText =
|
||||
options.conversationHistory && options.conversationHistory.length > 0
|
||||
? formatHistoryAsText(options.conversationHistory)
|
||||
: '';
|
||||
|
||||
const promptBlocks = normalizePromptBlocks(options.prompt);
|
||||
const promptTexts: string[] = [];
|
||||
|
||||
for (const block of promptBlocks) {
|
||||
if (block.type === 'text' && typeof block.text === 'string' && block.text.trim()) {
|
||||
promptTexts.push(block.text);
|
||||
}
|
||||
}
|
||||
|
||||
const promptContent = promptTexts.join('\n\n');
|
||||
if (!promptContent.trim()) {
|
||||
throw new Error('Codex SDK prompt is empty.');
|
||||
}
|
||||
|
||||
const parts: string[] = [];
|
||||
if (systemPrompt) {
|
||||
parts.push(`System: ${systemPrompt}`);
|
||||
}
|
||||
if (historyText) {
|
||||
parts.push(historyText);
|
||||
}
|
||||
parts.push(`${SDK_HISTORY_HEADER}${promptContent}`);
|
||||
|
||||
return parts.join('\n\n');
|
||||
}
|
||||
|
||||
function buildSdkErrorMessage(rawMessage: string, userMessage: string): string {
|
||||
if (!rawMessage) {
|
||||
return userMessage;
|
||||
}
|
||||
if (!userMessage || rawMessage === userMessage) {
|
||||
return rawMessage;
|
||||
}
|
||||
return `${userMessage}\n\n${SDK_ERROR_DETAILS_LABEL} ${rawMessage}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a query using the official Codex SDK
|
||||
*
|
||||
* The SDK provides a cleaner interface than spawning CLI processes:
|
||||
* - Handles authentication automatically
|
||||
* - Provides TypeScript types
|
||||
* - Supports thread management and resumption
|
||||
* - Better error handling
|
||||
*/
|
||||
export async function* executeCodexSdkQuery(
|
||||
options: ExecuteOptions,
|
||||
systemPrompt: string | null
|
||||
): AsyncGenerator<ProviderMessage> {
|
||||
try {
|
||||
const apiKey = resolveApiKey();
|
||||
const codex = new Codex({ apiKey });
|
||||
|
||||
// Resume existing thread or start new one
|
||||
let thread;
|
||||
if (options.sdkSessionId) {
|
||||
try {
|
||||
thread = codex.resumeThread(options.sdkSessionId);
|
||||
} catch {
|
||||
// If resume fails, start a new thread
|
||||
thread = codex.startThread();
|
||||
}
|
||||
} else {
|
||||
thread = codex.startThread();
|
||||
}
|
||||
|
||||
const promptText = buildPromptText(options, systemPrompt);
|
||||
|
||||
// Build run options with reasoning effort if supported
|
||||
const runOptions: {
|
||||
signal?: AbortSignal;
|
||||
reasoning?: { effort: string };
|
||||
} = {
|
||||
signal: options.abortController?.signal,
|
||||
};
|
||||
|
||||
// Add reasoning effort if model supports it and reasoningEffort is specified
|
||||
if (
|
||||
options.reasoningEffort &&
|
||||
supportsReasoningEffort(options.model) &&
|
||||
options.reasoningEffort !== 'none'
|
||||
) {
|
||||
runOptions.reasoning = { effort: options.reasoningEffort };
|
||||
}
|
||||
|
||||
// Run the query
|
||||
const result = await thread.run(promptText, runOptions);
|
||||
|
||||
// Extract response text (from finalResponse property)
|
||||
const outputText = result.finalResponse ?? DEFAULT_RESPONSE_TEXT;
|
||||
|
||||
// Get thread ID (may be null if not populated yet)
|
||||
const threadId = thread.id ?? undefined;
|
||||
|
||||
// Yield assistant message
|
||||
yield {
|
||||
type: 'assistant',
|
||||
session_id: threadId,
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: [{ type: 'text', text: outputText }],
|
||||
},
|
||||
};
|
||||
|
||||
// Yield result
|
||||
yield {
|
||||
type: 'result',
|
||||
subtype: 'success',
|
||||
session_id: threadId,
|
||||
result: outputText,
|
||||
};
|
||||
} catch (error) {
|
||||
const errorInfo = classifyError(error);
|
||||
const userMessage = getUserFriendlyErrorMessage(error);
|
||||
const combinedMessage = buildSdkErrorMessage(errorInfo.message, userMessage);
|
||||
console.error('[CodexSDK] executeQuery() error during execution:', {
|
||||
type: errorInfo.type,
|
||||
message: errorInfo.message,
|
||||
isRateLimit: errorInfo.isRateLimit,
|
||||
retryAfter: errorInfo.retryAfter,
|
||||
stack: error instanceof Error ? error.stack : undefined,
|
||||
});
|
||||
yield { type: 'error', error: combinedMessage };
|
||||
}
|
||||
}
|
||||
436
apps/server/src/providers/codex-tool-mapping.ts
Normal file
436
apps/server/src/providers/codex-tool-mapping.ts
Normal file
@@ -0,0 +1,436 @@
|
||||
export type CodexToolResolution = {
|
||||
name: string;
|
||||
input: Record<string, unknown>;
|
||||
};
|
||||
|
||||
export type CodexTodoItem = {
|
||||
content: string;
|
||||
status: 'pending' | 'in_progress' | 'completed';
|
||||
activeForm?: string;
|
||||
};
|
||||
|
||||
const TOOL_NAME_BASH = 'Bash';
|
||||
const TOOL_NAME_READ = 'Read';
|
||||
const TOOL_NAME_EDIT = 'Edit';
|
||||
const TOOL_NAME_WRITE = 'Write';
|
||||
const TOOL_NAME_GREP = 'Grep';
|
||||
const TOOL_NAME_GLOB = 'Glob';
|
||||
const TOOL_NAME_TODO = 'TodoWrite';
|
||||
const TOOL_NAME_DELETE = 'Delete';
|
||||
const TOOL_NAME_LS = 'Ls';
|
||||
|
||||
const INPUT_KEY_COMMAND = 'command';
|
||||
const INPUT_KEY_FILE_PATH = 'file_path';
|
||||
const INPUT_KEY_PATTERN = 'pattern';
|
||||
|
||||
const SHELL_WRAPPER_PATTERNS = [
|
||||
/^\/bin\/bash\s+-lc\s+["']([\s\S]+)["']$/,
|
||||
/^bash\s+-lc\s+["']([\s\S]+)["']$/,
|
||||
/^\/bin\/sh\s+-lc\s+["']([\s\S]+)["']$/,
|
||||
/^sh\s+-lc\s+["']([\s\S]+)["']$/,
|
||||
/^cmd\.exe\s+\/c\s+["']?([\s\S]+)["']?$/i,
|
||||
/^powershell(?:\.exe)?\s+-Command\s+["']?([\s\S]+)["']?$/i,
|
||||
/^pwsh(?:\.exe)?\s+-Command\s+["']?([\s\S]+)["']?$/i,
|
||||
] as const;
|
||||
|
||||
const COMMAND_SEPARATOR_PATTERN = /\s*(?:&&|\|\||;)\s*/;
|
||||
const SEGMENT_SKIP_PREFIXES = ['cd ', 'export ', 'set ', 'pushd '] as const;
|
||||
const WRAPPER_COMMANDS = new Set(['sudo', 'env', 'command']);
|
||||
const READ_COMMANDS = new Set(['cat', 'sed', 'head', 'tail', 'less', 'more', 'bat', 'stat', 'wc']);
|
||||
const SEARCH_COMMANDS = new Set(['rg', 'grep', 'ag', 'ack']);
|
||||
const GLOB_COMMANDS = new Set(['ls', 'find', 'fd', 'tree']);
|
||||
const DELETE_COMMANDS = new Set(['rm', 'del', 'erase', 'remove', 'unlink']);
|
||||
const LIST_COMMANDS = new Set(['ls', 'dir', 'll', 'la']);
|
||||
const WRITE_COMMANDS = new Set(['tee', 'touch', 'mkdir']);
|
||||
const APPLY_PATCH_COMMAND = 'apply_patch';
|
||||
const APPLY_PATCH_PATTERN = /\bapply_patch\b/;
|
||||
const REDIRECTION_TARGET_PATTERN = /(?:>>|>)\s*([^\s]+)/;
|
||||
const SED_IN_PLACE_FLAGS = new Set(['-i', '--in-place']);
|
||||
const PERL_IN_PLACE_FLAG = /-.*i/;
|
||||
const SEARCH_PATTERN_FLAGS = new Set(['-e', '--regexp']);
|
||||
const SEARCH_VALUE_FLAGS = new Set([
|
||||
'-g',
|
||||
'--glob',
|
||||
'--iglob',
|
||||
'--type',
|
||||
'--type-add',
|
||||
'--type-clear',
|
||||
'--encoding',
|
||||
]);
|
||||
const SEARCH_FILE_LIST_FLAGS = new Set(['--files']);
|
||||
const TODO_LINE_PATTERN = /^[-*]\s*(?:\[(?<status>[ x~])\]\s*)?(?<content>.+)$/;
|
||||
const TODO_STATUS_COMPLETED = 'completed';
|
||||
const TODO_STATUS_IN_PROGRESS = 'in_progress';
|
||||
const TODO_STATUS_PENDING = 'pending';
|
||||
const PATCH_FILE_MARKERS = [
|
||||
'*** Update File: ',
|
||||
'*** Add File: ',
|
||||
'*** Delete File: ',
|
||||
'*** Move to: ',
|
||||
] as const;
|
||||
|
||||
function stripShellWrapper(command: string): string {
|
||||
const trimmed = command.trim();
|
||||
for (const pattern of SHELL_WRAPPER_PATTERNS) {
|
||||
const match = trimmed.match(pattern);
|
||||
if (match && match[1]) {
|
||||
return unescapeCommand(match[1].trim());
|
||||
}
|
||||
}
|
||||
return trimmed;
|
||||
}
|
||||
|
||||
function unescapeCommand(command: string): string {
|
||||
return command.replace(/\\(["'])/g, '$1');
|
||||
}
|
||||
|
||||
function extractPrimarySegment(command: string): string {
|
||||
const segments = command
|
||||
.split(COMMAND_SEPARATOR_PATTERN)
|
||||
.map((segment) => segment.trim())
|
||||
.filter(Boolean);
|
||||
|
||||
for (const segment of segments) {
|
||||
const shouldSkip = SEGMENT_SKIP_PREFIXES.some((prefix) => segment.startsWith(prefix));
|
||||
if (!shouldSkip) {
|
||||
return segment;
|
||||
}
|
||||
}
|
||||
|
||||
return command.trim();
|
||||
}
|
||||
|
||||
function tokenizeCommand(command: string): string[] {
|
||||
const tokens: string[] = [];
|
||||
let current = '';
|
||||
let inSingleQuote = false;
|
||||
let inDoubleQuote = false;
|
||||
let isEscaped = false;
|
||||
|
||||
for (const char of command) {
|
||||
if (isEscaped) {
|
||||
current += char;
|
||||
isEscaped = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (char === '\\') {
|
||||
isEscaped = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (char === "'" && !inDoubleQuote) {
|
||||
inSingleQuote = !inSingleQuote;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (char === '"' && !inSingleQuote) {
|
||||
inDoubleQuote = !inDoubleQuote;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!inSingleQuote && !inDoubleQuote && /\s/.test(char)) {
|
||||
if (current) {
|
||||
tokens.push(current);
|
||||
current = '';
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
current += char;
|
||||
}
|
||||
|
||||
if (current) {
|
||||
tokens.push(current);
|
||||
}
|
||||
|
||||
return tokens;
|
||||
}
|
||||
|
||||
function stripWrapperTokens(tokens: string[]): string[] {
|
||||
let index = 0;
|
||||
while (index < tokens.length && WRAPPER_COMMANDS.has(tokens[index].toLowerCase())) {
|
||||
index += 1;
|
||||
}
|
||||
return tokens.slice(index);
|
||||
}
|
||||
|
||||
function extractFilePathFromTokens(tokens: string[]): string | null {
|
||||
const candidates = tokens.slice(1).filter((token) => token && !token.startsWith('-'));
|
||||
if (candidates.length === 0) return null;
|
||||
return candidates[candidates.length - 1];
|
||||
}
|
||||
|
||||
function extractSearchPattern(tokens: string[]): string | null {
|
||||
const remaining = tokens.slice(1);
|
||||
|
||||
for (let index = 0; index < remaining.length; index += 1) {
|
||||
const token = remaining[index];
|
||||
if (token === '--') {
|
||||
return remaining[index + 1] ?? null;
|
||||
}
|
||||
if (SEARCH_PATTERN_FLAGS.has(token)) {
|
||||
return remaining[index + 1] ?? null;
|
||||
}
|
||||
if (SEARCH_VALUE_FLAGS.has(token)) {
|
||||
index += 1;
|
||||
continue;
|
||||
}
|
||||
if (token.startsWith('-')) {
|
||||
continue;
|
||||
}
|
||||
return token;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function extractTeeTarget(tokens: string[]): string | null {
|
||||
const teeIndex = tokens.findIndex((token) => token === 'tee');
|
||||
if (teeIndex < 0) return null;
|
||||
const candidate = tokens[teeIndex + 1];
|
||||
return candidate && !candidate.startsWith('-') ? candidate : null;
|
||||
}
|
||||
|
||||
function extractRedirectionTarget(command: string): string | null {
|
||||
const match = command.match(REDIRECTION_TARGET_PATTERN);
|
||||
return match?.[1] ?? null;
|
||||
}
|
||||
|
||||
function extractFilePathFromDeleteTokens(tokens: string[]): string | null {
|
||||
// rm file.txt or rm /path/to/file.txt
|
||||
// Skip flags and get the first non-flag argument
|
||||
for (let i = 1; i < tokens.length; i++) {
|
||||
const token = tokens[i];
|
||||
if (token && !token.startsWith('-')) {
|
||||
return token;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function hasSedInPlaceFlag(tokens: string[]): boolean {
|
||||
return tokens.some((token) => SED_IN_PLACE_FLAGS.has(token) || token.startsWith('-i'));
|
||||
}
|
||||
|
||||
function hasPerlInPlaceFlag(tokens: string[]): boolean {
|
||||
return tokens.some((token) => PERL_IN_PLACE_FLAG.test(token));
|
||||
}
|
||||
|
||||
function extractPatchFilePath(command: string): string | null {
|
||||
for (const marker of PATCH_FILE_MARKERS) {
|
||||
const index = command.indexOf(marker);
|
||||
if (index < 0) continue;
|
||||
const start = index + marker.length;
|
||||
const end = command.indexOf('\n', start);
|
||||
const rawPath = (end === -1 ? command.slice(start) : command.slice(start, end)).trim();
|
||||
if (rawPath) return rawPath;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function buildInputWithFilePath(filePath: string | null): Record<string, unknown> {
|
||||
return filePath ? { [INPUT_KEY_FILE_PATH]: filePath } : {};
|
||||
}
|
||||
|
||||
function buildInputWithPattern(pattern: string | null): Record<string, unknown> {
|
||||
return pattern ? { [INPUT_KEY_PATTERN]: pattern } : {};
|
||||
}
|
||||
|
||||
export function resolveCodexToolCall(command: string): CodexToolResolution {
|
||||
const normalized = stripShellWrapper(command);
|
||||
const primarySegment = extractPrimarySegment(normalized);
|
||||
const tokens = stripWrapperTokens(tokenizeCommand(primarySegment));
|
||||
const commandToken = tokens[0]?.toLowerCase() ?? '';
|
||||
|
||||
const redirectionTarget = extractRedirectionTarget(primarySegment);
|
||||
if (redirectionTarget) {
|
||||
return {
|
||||
name: TOOL_NAME_WRITE,
|
||||
input: buildInputWithFilePath(redirectionTarget),
|
||||
};
|
||||
}
|
||||
|
||||
if (commandToken === APPLY_PATCH_COMMAND || APPLY_PATCH_PATTERN.test(primarySegment)) {
|
||||
return {
|
||||
name: TOOL_NAME_EDIT,
|
||||
input: buildInputWithFilePath(extractPatchFilePath(primarySegment)),
|
||||
};
|
||||
}
|
||||
|
||||
if (commandToken === 'sed' && hasSedInPlaceFlag(tokens)) {
|
||||
return {
|
||||
name: TOOL_NAME_EDIT,
|
||||
input: buildInputWithFilePath(extractFilePathFromTokens(tokens)),
|
||||
};
|
||||
}
|
||||
|
||||
if (commandToken === 'perl' && hasPerlInPlaceFlag(tokens)) {
|
||||
return {
|
||||
name: TOOL_NAME_EDIT,
|
||||
input: buildInputWithFilePath(extractFilePathFromTokens(tokens)),
|
||||
};
|
||||
}
|
||||
|
||||
if (WRITE_COMMANDS.has(commandToken)) {
|
||||
const filePath =
|
||||
commandToken === 'tee' ? extractTeeTarget(tokens) : extractFilePathFromTokens(tokens);
|
||||
return {
|
||||
name: TOOL_NAME_WRITE,
|
||||
input: buildInputWithFilePath(filePath),
|
||||
};
|
||||
}
|
||||
|
||||
if (SEARCH_COMMANDS.has(commandToken)) {
|
||||
if (tokens.some((token) => SEARCH_FILE_LIST_FLAGS.has(token))) {
|
||||
return {
|
||||
name: TOOL_NAME_GLOB,
|
||||
input: buildInputWithPattern(extractFilePathFromTokens(tokens)),
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
name: TOOL_NAME_GREP,
|
||||
input: buildInputWithPattern(extractSearchPattern(tokens)),
|
||||
};
|
||||
}
|
||||
|
||||
// Handle Delete commands (rm, del, erase, remove, unlink)
|
||||
if (DELETE_COMMANDS.has(commandToken)) {
|
||||
// Skip if -r or -rf flags (recursive delete should go to Bash)
|
||||
if (
|
||||
tokens.some((token) => token === '-r' || token === '-rf' || token === '-f' || token === '-rf')
|
||||
) {
|
||||
return {
|
||||
name: TOOL_NAME_BASH,
|
||||
input: { [INPUT_KEY_COMMAND]: normalized },
|
||||
};
|
||||
}
|
||||
// Simple file deletion - extract the file path
|
||||
const filePath = extractFilePathFromDeleteTokens(tokens);
|
||||
if (filePath) {
|
||||
return {
|
||||
name: TOOL_NAME_DELETE,
|
||||
input: { path: filePath },
|
||||
};
|
||||
}
|
||||
// Fall back to bash if we can't determine the file path
|
||||
return {
|
||||
name: TOOL_NAME_BASH,
|
||||
input: { [INPUT_KEY_COMMAND]: normalized },
|
||||
};
|
||||
}
|
||||
|
||||
// Handle simple Ls commands (just listing, not find/glob)
|
||||
if (LIST_COMMANDS.has(commandToken)) {
|
||||
const filePath = extractFilePathFromTokens(tokens);
|
||||
return {
|
||||
name: TOOL_NAME_LS,
|
||||
input: { path: filePath || '.' },
|
||||
};
|
||||
}
|
||||
|
||||
if (GLOB_COMMANDS.has(commandToken)) {
|
||||
return {
|
||||
name: TOOL_NAME_GLOB,
|
||||
input: buildInputWithPattern(extractFilePathFromTokens(tokens)),
|
||||
};
|
||||
}
|
||||
|
||||
if (READ_COMMANDS.has(commandToken)) {
|
||||
return {
|
||||
name: TOOL_NAME_READ,
|
||||
input: buildInputWithFilePath(extractFilePathFromTokens(tokens)),
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
name: TOOL_NAME_BASH,
|
||||
input: { [INPUT_KEY_COMMAND]: normalized },
|
||||
};
|
||||
}
|
||||
|
||||
function parseTodoLines(lines: string[]): CodexTodoItem[] {
|
||||
const todos: CodexTodoItem[] = [];
|
||||
|
||||
for (const line of lines) {
|
||||
const match = line.match(TODO_LINE_PATTERN);
|
||||
if (!match?.groups?.content) continue;
|
||||
|
||||
const statusToken = match.groups.status;
|
||||
const status =
|
||||
statusToken === 'x'
|
||||
? TODO_STATUS_COMPLETED
|
||||
: statusToken === '~'
|
||||
? TODO_STATUS_IN_PROGRESS
|
||||
: TODO_STATUS_PENDING;
|
||||
|
||||
todos.push({ content: match.groups.content.trim(), status });
|
||||
}
|
||||
|
||||
return todos;
|
||||
}
|
||||
|
||||
function extractTodoFromArray(value: unknown[]): CodexTodoItem[] {
|
||||
return value
|
||||
.map((entry) => {
|
||||
if (typeof entry === 'string') {
|
||||
return { content: entry, status: TODO_STATUS_PENDING };
|
||||
}
|
||||
if (entry && typeof entry === 'object') {
|
||||
const record = entry as Record<string, unknown>;
|
||||
const content =
|
||||
typeof record.content === 'string'
|
||||
? record.content
|
||||
: typeof record.text === 'string'
|
||||
? record.text
|
||||
: typeof record.title === 'string'
|
||||
? record.title
|
||||
: null;
|
||||
if (!content) return null;
|
||||
const status =
|
||||
record.status === TODO_STATUS_COMPLETED ||
|
||||
record.status === TODO_STATUS_IN_PROGRESS ||
|
||||
record.status === TODO_STATUS_PENDING
|
||||
? (record.status as CodexTodoItem['status'])
|
||||
: TODO_STATUS_PENDING;
|
||||
const activeForm = typeof record.activeForm === 'string' ? record.activeForm : undefined;
|
||||
return { content, status, activeForm };
|
||||
}
|
||||
return null;
|
||||
})
|
||||
.filter((item): item is CodexTodoItem => Boolean(item));
|
||||
}
|
||||
|
||||
export function extractCodexTodoItems(item: Record<string, unknown>): CodexTodoItem[] | null {
|
||||
const todosValue = item.todos;
|
||||
if (Array.isArray(todosValue)) {
|
||||
const todos = extractTodoFromArray(todosValue);
|
||||
return todos.length > 0 ? todos : null;
|
||||
}
|
||||
|
||||
const itemsValue = item.items;
|
||||
if (Array.isArray(itemsValue)) {
|
||||
const todos = extractTodoFromArray(itemsValue);
|
||||
return todos.length > 0 ? todos : null;
|
||||
}
|
||||
|
||||
const textValue =
|
||||
typeof item.text === 'string'
|
||||
? item.text
|
||||
: typeof item.content === 'string'
|
||||
? item.content
|
||||
: null;
|
||||
if (!textValue) return null;
|
||||
|
||||
const lines = textValue
|
||||
.split('\n')
|
||||
.map((line) => line.trim())
|
||||
.filter(Boolean);
|
||||
const todos = parseTodoLines(lines);
|
||||
return todos.length > 0 ? todos : null;
|
||||
}
|
||||
|
||||
export function getCodexTodoToolName(): string {
|
||||
return TOOL_NAME_TODO;
|
||||
}
|
||||
@@ -29,6 +29,8 @@ import type {
|
||||
ContentBlock,
|
||||
} from './types.js';
|
||||
import { stripProviderPrefix } from '@automaker/types';
|
||||
import { validateApiKey } from '../lib/auth-utils.js';
|
||||
import { getEffectivePermissions } from '../services/cursor-config-service.js';
|
||||
import {
|
||||
type CursorStreamEvent,
|
||||
type CursorSystemEvent,
|
||||
@@ -321,12 +323,19 @@ export class CursorProvider extends CliProvider {
|
||||
// Build CLI arguments for cursor-agent
|
||||
// NOTE: Prompt is NOT included here - it's passed via stdin to avoid
|
||||
// shell escaping issues when content contains $(), backticks, etc.
|
||||
const cliArgs: string[] = [
|
||||
const cliArgs: string[] = [];
|
||||
|
||||
// If using Cursor IDE (cliPath is 'cursor' not 'cursor-agent'), add 'agent' subcommand
|
||||
if (this.cliPath && !this.cliPath.includes('cursor-agent')) {
|
||||
cliArgs.push('agent');
|
||||
}
|
||||
|
||||
cliArgs.push(
|
||||
'-p', // Print mode (non-interactive)
|
||||
'--output-format',
|
||||
'stream-json',
|
||||
'--stream-partial-output', // Real-time streaming
|
||||
];
|
||||
'--stream-partial-output' // Real-time streaming
|
||||
);
|
||||
|
||||
// Only add --force if NOT in read-only mode
|
||||
// Without --force, Cursor CLI suggests changes but doesn't apply them
|
||||
@@ -472,7 +481,9 @@ export class CursorProvider extends CliProvider {
|
||||
// ==========================================================================
|
||||
|
||||
/**
|
||||
* Override CLI detection to add Cursor-specific versions directory check
|
||||
* Override CLI detection to add Cursor-specific checks:
|
||||
* 1. Versions directory for cursor-agent installations
|
||||
* 2. Cursor IDE with 'cursor agent' subcommand support
|
||||
*/
|
||||
protected detectCli(): CliDetectionResult {
|
||||
// First try standard detection (PATH, common paths, WSL)
|
||||
@@ -507,6 +518,39 @@ export class CursorProvider extends CliProvider {
|
||||
}
|
||||
}
|
||||
|
||||
// If cursor-agent not found, try to find 'cursor' IDE and use 'cursor agent' subcommand
|
||||
// The Cursor IDE includes the agent as a subcommand: cursor agent
|
||||
if (process.platform !== 'win32') {
|
||||
const cursorPaths = [
|
||||
'/usr/bin/cursor',
|
||||
'/usr/local/bin/cursor',
|
||||
path.join(os.homedir(), '.local/bin/cursor'),
|
||||
'/opt/cursor/cursor',
|
||||
];
|
||||
|
||||
for (const cursorPath of cursorPaths) {
|
||||
if (fs.existsSync(cursorPath)) {
|
||||
// Verify cursor agent subcommand works
|
||||
try {
|
||||
execSync(`"${cursorPath}" agent --version`, {
|
||||
encoding: 'utf8',
|
||||
timeout: 5000,
|
||||
stdio: 'pipe',
|
||||
});
|
||||
logger.debug(`Using cursor agent via Cursor IDE: ${cursorPath}`);
|
||||
// Return cursor path but we'll use 'cursor agent' subcommand
|
||||
return {
|
||||
cliPath: cursorPath,
|
||||
useWsl: false,
|
||||
strategy: 'native',
|
||||
};
|
||||
} catch {
|
||||
// cursor agent subcommand doesn't work, try next path
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -642,6 +686,9 @@ export class CursorProvider extends CliProvider {
|
||||
|
||||
logger.debug(`CursorProvider.executeQuery called with model: "${options.model}"`);
|
||||
|
||||
// Get effective permissions for this project
|
||||
const effectivePermissions = await getEffectivePermissions(options.cwd || process.cwd());
|
||||
|
||||
// Debug: log raw events when AUTOMAKER_DEBUG_RAW_OUTPUT is enabled
|
||||
const debugRawEvents =
|
||||
process.env.AUTOMAKER_DEBUG_RAW_OUTPUT === 'true' ||
|
||||
@@ -838,9 +885,16 @@ export class CursorProvider extends CliProvider {
|
||||
});
|
||||
return result;
|
||||
}
|
||||
const result = execSync(`"${this.cliPath}" --version`, {
|
||||
|
||||
// If using Cursor IDE, use 'cursor agent --version'
|
||||
const versionCmd = this.cliPath.includes('cursor-agent')
|
||||
? `"${this.cliPath}" --version`
|
||||
: `"${this.cliPath}" agent --version`;
|
||||
|
||||
const result = execSync(versionCmd, {
|
||||
encoding: 'utf8',
|
||||
timeout: 5000,
|
||||
stdio: 'pipe',
|
||||
}).trim();
|
||||
return result;
|
||||
} catch {
|
||||
@@ -857,8 +911,13 @@ export class CursorProvider extends CliProvider {
|
||||
return { authenticated: false, method: 'none' };
|
||||
}
|
||||
|
||||
// Check for API key in environment
|
||||
// Check for API key in environment with validation
|
||||
if (process.env.CURSOR_API_KEY) {
|
||||
const validation = validateApiKey(process.env.CURSOR_API_KEY, 'cursor');
|
||||
if (!validation.isValid) {
|
||||
logger.warn('Cursor API key validation failed:', validation.error);
|
||||
return { authenticated: false, method: 'api_key', error: validation.error };
|
||||
}
|
||||
return { authenticated: true, method: 'api_key' };
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
|
||||
import { BaseProvider } from './base-provider.js';
|
||||
import type { InstallationStatus, ModelDefinition } from './types.js';
|
||||
import { isCursorModel, type ModelProvider } from '@automaker/types';
|
||||
import { isCursorModel, isCodexModel, type ModelProvider } from '@automaker/types';
|
||||
|
||||
/**
|
||||
* Provider registration entry
|
||||
@@ -156,6 +156,41 @@ export class ProviderFactory {
|
||||
static getRegisteredProviderNames(): string[] {
|
||||
return Array.from(providerRegistry.keys());
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a specific model supports vision/image input
|
||||
*
|
||||
* @param modelId Model identifier
|
||||
* @returns Whether the model supports vision (defaults to true if model not found)
|
||||
*/
|
||||
static modelSupportsVision(modelId: string): boolean {
|
||||
const provider = this.getProviderForModel(modelId);
|
||||
const models = provider.getAvailableModels();
|
||||
|
||||
// Find the model in the available models list
|
||||
for (const model of models) {
|
||||
if (
|
||||
model.id === modelId ||
|
||||
model.modelString === modelId ||
|
||||
model.id.endsWith(`-${modelId}`) ||
|
||||
model.modelString.endsWith(`-${modelId}`) ||
|
||||
model.modelString === modelId.replace(/^(claude|cursor|codex)-/, '') ||
|
||||
model.modelString === modelId.replace(/-(claude|cursor|codex)$/, '')
|
||||
) {
|
||||
return model.supportsVision ?? true;
|
||||
}
|
||||
}
|
||||
|
||||
// Also try exact match with model string from provider's model map
|
||||
for (const model of models) {
|
||||
if (model.modelString === modelId || model.id === modelId) {
|
||||
return model.supportsVision ?? true;
|
||||
}
|
||||
}
|
||||
|
||||
// Default to true (Claude SDK supports vision by default)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
@@ -165,6 +200,7 @@ export class ProviderFactory {
|
||||
// Import providers for registration side-effects
|
||||
import { ClaudeProvider } from './claude-provider.js';
|
||||
import { CursorProvider } from './cursor-provider.js';
|
||||
import { CodexProvider } from './codex-provider.js';
|
||||
|
||||
// Register Claude provider
|
||||
registerProvider('claude', {
|
||||
@@ -184,3 +220,11 @@ registerProvider('cursor', {
|
||||
canHandleModel: (model: string) => isCursorModel(model),
|
||||
priority: 10, // Higher priority - check Cursor models first
|
||||
});
|
||||
|
||||
// Register Codex provider
|
||||
registerProvider('codex', {
|
||||
factory: () => new CodexProvider(),
|
||||
aliases: ['openai'],
|
||||
canHandleModel: (model: string) => isCodexModel(model),
|
||||
priority: 5, // Medium priority - check after Cursor but before Claude
|
||||
});
|
||||
|
||||
@@ -229,12 +229,13 @@ export function createAuthRoutes(): Router {
|
||||
await invalidateSession(sessionToken);
|
||||
}
|
||||
|
||||
// Clear the cookie
|
||||
res.clearCookie(cookieName, {
|
||||
httpOnly: true,
|
||||
secure: process.env.NODE_ENV === 'production',
|
||||
sameSite: 'strict',
|
||||
path: '/',
|
||||
// Clear the cookie by setting it to empty with immediate expiration
|
||||
// Using res.cookie() with maxAge: 0 is more reliable than clearCookie()
|
||||
// in cross-origin development environments
|
||||
res.cookie(cookieName, '', {
|
||||
...getSessionCookieOptions(),
|
||||
maxAge: 0,
|
||||
expires: new Date(0),
|
||||
});
|
||||
|
||||
res.json({
|
||||
|
||||
@@ -31,7 +31,9 @@ export function createFollowUpFeatureHandler(autoModeService: AutoModeService) {
|
||||
// Start follow-up in background
|
||||
// followUpFeature derives workDir from feature.branchName
|
||||
autoModeService
|
||||
.followUpFeature(projectPath, featureId, prompt, imagePaths, useWorktrees ?? true)
|
||||
// Default to false to match run-feature/resume-feature behavior.
|
||||
// Worktrees should only be used when explicitly enabled by the user.
|
||||
.followUpFeature(projectPath, featureId, prompt, imagePaths, useWorktrees ?? false)
|
||||
.catch((error) => {
|
||||
logger.error(`[AutoMode] Follow up feature ${featureId} error:`, error);
|
||||
})
|
||||
|
||||
@@ -13,7 +13,10 @@ export function createClaudeRoutes(service: ClaudeUsageService): Router {
|
||||
// Check if Claude CLI is available first
|
||||
const isAvailable = await service.isAvailable();
|
||||
if (!isAvailable) {
|
||||
res.status(503).json({
|
||||
// IMPORTANT: This endpoint is behind Automaker session auth already.
|
||||
// Use a 200 + error payload for Claude CLI issues so the UI doesn't
|
||||
// interpret it as an invalid Automaker session (401/403 triggers logout).
|
||||
res.status(200).json({
|
||||
error: 'Claude CLI not found',
|
||||
message: "Please install Claude Code CLI and run 'claude login' to authenticate",
|
||||
});
|
||||
@@ -26,12 +29,13 @@ export function createClaudeRoutes(service: ClaudeUsageService): Router {
|
||||
const message = error instanceof Error ? error.message : 'Unknown error';
|
||||
|
||||
if (message.includes('Authentication required') || message.includes('token_expired')) {
|
||||
res.status(401).json({
|
||||
// Do NOT use 401/403 here: that status code is reserved for Automaker session auth.
|
||||
res.status(200).json({
|
||||
error: 'Authentication required',
|
||||
message: "Please run 'claude login' to authenticate",
|
||||
});
|
||||
} else if (message.includes('timed out')) {
|
||||
res.status(504).json({
|
||||
res.status(200).json({
|
||||
error: 'Command timed out',
|
||||
message: 'The Claude CLI took too long to respond',
|
||||
});
|
||||
|
||||
56
apps/server/src/routes/codex/index.ts
Normal file
56
apps/server/src/routes/codex/index.ts
Normal file
@@ -0,0 +1,56 @@
|
||||
import { Router, Request, Response } from 'express';
|
||||
import { CodexUsageService } from '../../services/codex-usage-service.js';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
|
||||
const logger = createLogger('Codex');
|
||||
|
||||
export function createCodexRoutes(service: CodexUsageService): Router {
|
||||
const router = Router();
|
||||
|
||||
// Get current usage (attempts to fetch from Codex CLI)
|
||||
router.get('/usage', async (req: Request, res: Response) => {
|
||||
try {
|
||||
// Check if Codex CLI is available first
|
||||
const isAvailable = await service.isAvailable();
|
||||
if (!isAvailable) {
|
||||
// IMPORTANT: This endpoint is behind Automaker session auth already.
|
||||
// Use a 200 + error payload for Codex CLI issues so the UI doesn't
|
||||
// interpret it as an invalid Automaker session (401/403 triggers logout).
|
||||
res.status(200).json({
|
||||
error: 'Codex CLI not found',
|
||||
message: "Please install Codex CLI and run 'codex login' to authenticate",
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const usage = await service.fetchUsageData();
|
||||
res.json(usage);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : 'Unknown error';
|
||||
|
||||
if (message.includes('not authenticated') || message.includes('login')) {
|
||||
// Do NOT use 401/403 here: that status code is reserved for Automaker session auth.
|
||||
res.status(200).json({
|
||||
error: 'Authentication required',
|
||||
message: "Please run 'codex login' to authenticate",
|
||||
});
|
||||
} else if (message.includes('not available') || message.includes('does not provide')) {
|
||||
// This is the expected case - Codex doesn't provide usage stats
|
||||
res.status(200).json({
|
||||
error: 'Usage statistics not available',
|
||||
message: message,
|
||||
});
|
||||
} else if (message.includes('timed out')) {
|
||||
res.status(200).json({
|
||||
error: 'Command timed out',
|
||||
message: 'The Codex CLI took too long to respond',
|
||||
});
|
||||
} else {
|
||||
logger.error('Error fetching usage:', error);
|
||||
res.status(500).json({ error: message });
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return router;
|
||||
}
|
||||
@@ -232,7 +232,6 @@ File: ${fileName}${truncated ? ' (truncated)' : ''}`;
|
||||
maxTurns: 1,
|
||||
allowedTools: [],
|
||||
autoLoadClaudeMd,
|
||||
sandbox: { enabled: true, autoAllowBashIfSandboxed: true },
|
||||
thinkingLevel, // Pass thinking level for extended thinking
|
||||
});
|
||||
|
||||
|
||||
@@ -394,14 +394,13 @@ export function createDescribeImageHandler(
|
||||
maxTurns: 1,
|
||||
allowedTools: [],
|
||||
autoLoadClaudeMd,
|
||||
sandbox: { enabled: true, autoAllowBashIfSandboxed: true },
|
||||
thinkingLevel, // Pass thinking level for extended thinking
|
||||
});
|
||||
|
||||
logger.info(
|
||||
`[${requestId}] SDK options model=${sdkOptions.model} maxTurns=${sdkOptions.maxTurns} allowedTools=${JSON.stringify(
|
||||
sdkOptions.allowedTools
|
||||
)} sandbox=${JSON.stringify(sdkOptions.sandbox)}`
|
||||
)}`
|
||||
);
|
||||
|
||||
const promptGenerator = (async function* () {
|
||||
|
||||
@@ -9,6 +9,7 @@ import { createListHandler } from './routes/list.js';
|
||||
import { createGetHandler } from './routes/get.js';
|
||||
import { createCreateHandler } from './routes/create.js';
|
||||
import { createUpdateHandler } from './routes/update.js';
|
||||
import { createBulkUpdateHandler } from './routes/bulk-update.js';
|
||||
import { createDeleteHandler } from './routes/delete.js';
|
||||
import { createAgentOutputHandler, createRawOutputHandler } from './routes/agent-output.js';
|
||||
import { createGenerateTitleHandler } from './routes/generate-title.js';
|
||||
@@ -20,6 +21,11 @@ export function createFeaturesRoutes(featureLoader: FeatureLoader): Router {
|
||||
router.post('/get', validatePathParams('projectPath'), createGetHandler(featureLoader));
|
||||
router.post('/create', validatePathParams('projectPath'), createCreateHandler(featureLoader));
|
||||
router.post('/update', validatePathParams('projectPath'), createUpdateHandler(featureLoader));
|
||||
router.post(
|
||||
'/bulk-update',
|
||||
validatePathParams('projectPath'),
|
||||
createBulkUpdateHandler(featureLoader)
|
||||
);
|
||||
router.post('/delete', validatePathParams('projectPath'), createDeleteHandler(featureLoader));
|
||||
router.post('/agent-output', createAgentOutputHandler(featureLoader));
|
||||
router.post('/raw-output', createRawOutputHandler(featureLoader));
|
||||
|
||||
75
apps/server/src/routes/features/routes/bulk-update.ts
Normal file
75
apps/server/src/routes/features/routes/bulk-update.ts
Normal file
@@ -0,0 +1,75 @@
|
||||
/**
|
||||
* POST /bulk-update endpoint - Update multiple features at once
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { FeatureLoader } from '../../../services/feature-loader.js';
|
||||
import type { Feature } from '@automaker/types';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
|
||||
interface BulkUpdateRequest {
|
||||
projectPath: string;
|
||||
featureIds: string[];
|
||||
updates: Partial<Feature>;
|
||||
}
|
||||
|
||||
interface BulkUpdateResult {
|
||||
featureId: string;
|
||||
success: boolean;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
export function createBulkUpdateHandler(featureLoader: FeatureLoader) {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { projectPath, featureIds, updates } = req.body as BulkUpdateRequest;
|
||||
|
||||
if (!projectPath || !featureIds || !Array.isArray(featureIds) || featureIds.length === 0) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'projectPath and featureIds (non-empty array) are required',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
if (!updates || Object.keys(updates).length === 0) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'updates object with at least one field is required',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const results: BulkUpdateResult[] = [];
|
||||
const updatedFeatures: Feature[] = [];
|
||||
|
||||
for (const featureId of featureIds) {
|
||||
try {
|
||||
const updated = await featureLoader.update(projectPath, featureId, updates);
|
||||
results.push({ featureId, success: true });
|
||||
updatedFeatures.push(updated);
|
||||
} catch (error) {
|
||||
results.push({
|
||||
featureId,
|
||||
success: false,
|
||||
error: getErrorMessage(error),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const successCount = results.filter((r) => r.success).length;
|
||||
const failureCount = results.filter((r) => !r.success).length;
|
||||
|
||||
res.json({
|
||||
success: failureCount === 0,
|
||||
updatedCount: successCount,
|
||||
failedCount: failureCount,
|
||||
results,
|
||||
features: updatedFeatures,
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Bulk update features failed');
|
||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -10,11 +10,14 @@ import { getErrorMessage, logError } from '../common.js';
|
||||
export function createUpdateHandler(featureLoader: FeatureLoader) {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { projectPath, featureId, updates } = req.body as {
|
||||
projectPath: string;
|
||||
featureId: string;
|
||||
updates: Partial<Feature>;
|
||||
};
|
||||
const { projectPath, featureId, updates, descriptionHistorySource, enhancementMode } =
|
||||
req.body as {
|
||||
projectPath: string;
|
||||
featureId: string;
|
||||
updates: Partial<Feature>;
|
||||
descriptionHistorySource?: 'enhance' | 'edit';
|
||||
enhancementMode?: 'improve' | 'technical' | 'simplify' | 'acceptance';
|
||||
};
|
||||
|
||||
if (!projectPath || !featureId || !updates) {
|
||||
res.status(400).json({
|
||||
@@ -24,7 +27,13 @@ export function createUpdateHandler(featureLoader: FeatureLoader) {
|
||||
return;
|
||||
}
|
||||
|
||||
const updated = await featureLoader.update(projectPath, featureId, updates);
|
||||
const updated = await featureLoader.update(
|
||||
projectPath,
|
||||
featureId,
|
||||
updates,
|
||||
descriptionHistorySource,
|
||||
enhancementMode
|
||||
);
|
||||
res.json({ success: true, feature: updated });
|
||||
} catch (error) {
|
||||
logError(error, 'Update feature failed');
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
import type { Request, Response } from 'express';
|
||||
import type { SettingsService } from '../../../services/settings-service.js';
|
||||
import type { GlobalSettings } from '../../../types/settings.js';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
import { getErrorMessage, logError, logger } from '../common.js';
|
||||
|
||||
/**
|
||||
* Create handler factory for PUT /api/settings/global
|
||||
@@ -32,6 +32,18 @@ export function createUpdateGlobalHandler(settingsService: SettingsService) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Minimal debug logging to help diagnose accidental wipes.
|
||||
if ('projects' in updates || 'theme' in updates || 'localStorageMigrated' in updates) {
|
||||
const projectsLen = Array.isArray((updates as any).projects)
|
||||
? (updates as any).projects.length
|
||||
: undefined;
|
||||
logger.info(
|
||||
`Update global settings request: projects=${projectsLen ?? 'n/a'}, theme=${
|
||||
(updates as any).theme ?? 'n/a'
|
||||
}, localStorageMigrated=${(updates as any).localStorageMigrated ?? 'n/a'}`
|
||||
);
|
||||
}
|
||||
|
||||
const settings = await settingsService.updateGlobalSettings(updates);
|
||||
|
||||
res.json({
|
||||
|
||||
@@ -11,8 +11,12 @@ import { createDeleteApiKeyHandler } from './routes/delete-api-key.js';
|
||||
import { createApiKeysHandler } from './routes/api-keys.js';
|
||||
import { createPlatformHandler } from './routes/platform.js';
|
||||
import { createVerifyClaudeAuthHandler } from './routes/verify-claude-auth.js';
|
||||
import { createVerifyCodexAuthHandler } from './routes/verify-codex-auth.js';
|
||||
import { createGhStatusHandler } from './routes/gh-status.js';
|
||||
import { createCursorStatusHandler } from './routes/cursor-status.js';
|
||||
import { createCodexStatusHandler } from './routes/codex-status.js';
|
||||
import { createInstallCodexHandler } from './routes/install-codex.js';
|
||||
import { createAuthCodexHandler } from './routes/auth-codex.js';
|
||||
import {
|
||||
createGetCursorConfigHandler,
|
||||
createSetCursorDefaultModelHandler,
|
||||
@@ -35,10 +39,16 @@ export function createSetupRoutes(): Router {
|
||||
router.get('/api-keys', createApiKeysHandler());
|
||||
router.get('/platform', createPlatformHandler());
|
||||
router.post('/verify-claude-auth', createVerifyClaudeAuthHandler());
|
||||
router.post('/verify-codex-auth', createVerifyCodexAuthHandler());
|
||||
router.get('/gh-status', createGhStatusHandler());
|
||||
|
||||
// Cursor CLI routes
|
||||
router.get('/cursor-status', createCursorStatusHandler());
|
||||
|
||||
// Codex CLI routes
|
||||
router.get('/codex-status', createCodexStatusHandler());
|
||||
router.post('/install-codex', createInstallCodexHandler());
|
||||
router.post('/auth-codex', createAuthCodexHandler());
|
||||
router.get('/cursor-config', createGetCursorConfigHandler());
|
||||
router.post('/cursor-config/default-model', createSetCursorDefaultModelHandler());
|
||||
router.post('/cursor-config/models', createSetCursorModelsHandler());
|
||||
|
||||
@@ -11,6 +11,7 @@ export function createApiKeysHandler() {
|
||||
res.json({
|
||||
success: true,
|
||||
hasAnthropicKey: !!getApiKey('anthropic') || !!process.env.ANTHROPIC_API_KEY,
|
||||
hasOpenaiKey: !!getApiKey('openai') || !!process.env.OPENAI_API_KEY,
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Get API keys failed');
|
||||
|
||||
31
apps/server/src/routes/setup/routes/auth-codex.ts
Normal file
31
apps/server/src/routes/setup/routes/auth-codex.ts
Normal file
@@ -0,0 +1,31 @@
|
||||
/**
|
||||
* POST /auth-codex endpoint - Authenticate Codex CLI
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { logError, getErrorMessage } from '../common.js';
|
||||
|
||||
/**
|
||||
* Creates handler for POST /api/setup/auth-codex
|
||||
* Returns instructions for manual Codex CLI authentication
|
||||
*/
|
||||
export function createAuthCodexHandler() {
|
||||
return async (_req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const loginCommand = 'codex login';
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
requiresManualAuth: true,
|
||||
command: loginCommand,
|
||||
message: `Please authenticate Codex CLI manually by running: ${loginCommand}`,
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Auth Codex failed');
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: getErrorMessage(error),
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
49
apps/server/src/routes/setup/routes/codex-status.ts
Normal file
49
apps/server/src/routes/setup/routes/codex-status.ts
Normal file
@@ -0,0 +1,49 @@
|
||||
/**
|
||||
* GET /codex-status endpoint - Get Codex CLI installation and auth status
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { CodexProvider } from '../../../providers/codex-provider.js';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
|
||||
/**
|
||||
* Creates handler for GET /api/setup/codex-status
|
||||
* Returns Codex CLI installation and authentication status
|
||||
*/
|
||||
export function createCodexStatusHandler() {
|
||||
const installCommand = 'npm install -g @openai/codex';
|
||||
const loginCommand = 'codex login';
|
||||
|
||||
return async (_req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const provider = new CodexProvider();
|
||||
const status = await provider.detectInstallation();
|
||||
|
||||
// Derive auth method from authenticated status and API key presence
|
||||
let authMethod = 'none';
|
||||
if (status.authenticated) {
|
||||
authMethod = status.hasApiKey ? 'api_key_env' : 'cli_authenticated';
|
||||
}
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
installed: status.installed,
|
||||
version: status.version || null,
|
||||
path: status.path || null,
|
||||
auth: {
|
||||
authenticated: status.authenticated || false,
|
||||
method: authMethod,
|
||||
hasApiKey: status.hasApiKey || false,
|
||||
},
|
||||
installCommand,
|
||||
loginCommand,
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Get Codex status failed');
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: getErrorMessage(error),
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -46,13 +46,14 @@ export function createDeleteApiKeyHandler() {
|
||||
// Map provider to env key name
|
||||
const envKeyMap: Record<string, string> = {
|
||||
anthropic: 'ANTHROPIC_API_KEY',
|
||||
openai: 'OPENAI_API_KEY',
|
||||
};
|
||||
|
||||
const envKey = envKeyMap[provider];
|
||||
if (!envKey) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: `Unknown provider: ${provider}. Only anthropic is supported.`,
|
||||
error: `Unknown provider: ${provider}. Only anthropic and openai are supported.`,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
33
apps/server/src/routes/setup/routes/install-codex.ts
Normal file
33
apps/server/src/routes/setup/routes/install-codex.ts
Normal file
@@ -0,0 +1,33 @@
|
||||
/**
|
||||
* POST /install-codex endpoint - Install Codex CLI
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { logError, getErrorMessage } from '../common.js';
|
||||
|
||||
/**
|
||||
* Creates handler for POST /api/setup/install-codex
|
||||
* Installs Codex CLI (currently returns instructions for manual install)
|
||||
*/
|
||||
export function createInstallCodexHandler() {
|
||||
return async (_req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
// For now, return manual installation instructions
|
||||
// In the future, this could potentially trigger npm global install
|
||||
const installCommand = 'npm install -g @openai/codex';
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: `Please install Codex CLI manually by running: ${installCommand}`,
|
||||
requiresManualInstall: true,
|
||||
installCommand,
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Install Codex failed');
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: getErrorMessage(error),
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -7,8 +7,16 @@ import type { Request, Response } from 'express';
|
||||
import { query } from '@anthropic-ai/claude-agent-sdk';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import { getApiKey } from '../common.js';
|
||||
import {
|
||||
createSecureAuthEnv,
|
||||
AuthSessionManager,
|
||||
AuthRateLimiter,
|
||||
validateApiKey,
|
||||
createTempEnvOverride,
|
||||
} from '../../../lib/auth-utils.js';
|
||||
|
||||
const logger = createLogger('Setup');
|
||||
const rateLimiter = new AuthRateLimiter();
|
||||
|
||||
// Known error patterns that indicate auth failure
|
||||
const AUTH_ERROR_PATTERNS = [
|
||||
@@ -77,6 +85,19 @@ export function createVerifyClaudeAuthHandler() {
|
||||
apiKey?: string;
|
||||
};
|
||||
|
||||
// Rate limiting to prevent abuse
|
||||
const clientIp = req.ip || req.socket.remoteAddress || 'unknown';
|
||||
if (!rateLimiter.canAttempt(clientIp)) {
|
||||
const resetTime = rateLimiter.getResetTime(clientIp);
|
||||
res.status(429).json({
|
||||
success: false,
|
||||
authenticated: false,
|
||||
error: 'Too many authentication attempts. Please try again later.',
|
||||
resetTime,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
logger.info(
|
||||
`[Setup] Verifying Claude authentication using method: ${authMethod || 'auto'}${apiKey ? ' (with provided key)' : ''}`
|
||||
);
|
||||
@@ -89,37 +110,48 @@ export function createVerifyClaudeAuthHandler() {
|
||||
let errorMessage = '';
|
||||
let receivedAnyContent = false;
|
||||
|
||||
// Save original env values
|
||||
const originalAnthropicKey = process.env.ANTHROPIC_API_KEY;
|
||||
// Create secure auth session
|
||||
const sessionId = `claude-auth-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
|
||||
|
||||
try {
|
||||
// Configure environment based on auth method
|
||||
if (authMethod === 'cli') {
|
||||
// For CLI verification, remove any API key so it uses CLI credentials only
|
||||
delete process.env.ANTHROPIC_API_KEY;
|
||||
logger.info('[Setup] Cleared API key environment for CLI verification');
|
||||
} else if (authMethod === 'api_key') {
|
||||
// For API key verification, use provided key, stored key, or env var (in order of priority)
|
||||
if (apiKey) {
|
||||
// Use the provided API key (allows testing unsaved keys)
|
||||
process.env.ANTHROPIC_API_KEY = apiKey;
|
||||
logger.info('[Setup] Using provided API key for verification');
|
||||
} else {
|
||||
const storedApiKey = getApiKey('anthropic');
|
||||
if (storedApiKey) {
|
||||
process.env.ANTHROPIC_API_KEY = storedApiKey;
|
||||
logger.info('[Setup] Using stored API key for verification');
|
||||
} else if (!process.env.ANTHROPIC_API_KEY) {
|
||||
res.json({
|
||||
success: true,
|
||||
authenticated: false,
|
||||
error: 'No API key configured. Please enter an API key first.',
|
||||
});
|
||||
return;
|
||||
}
|
||||
// For API key verification, validate the key first
|
||||
if (authMethod === 'api_key' && apiKey) {
|
||||
const validation = validateApiKey(apiKey, 'anthropic');
|
||||
if (!validation.isValid) {
|
||||
res.json({
|
||||
success: true,
|
||||
authenticated: false,
|
||||
error: validation.error,
|
||||
});
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Create secure environment without modifying process.env
|
||||
const authEnv = createSecureAuthEnv(authMethod || 'api_key', apiKey, 'anthropic');
|
||||
|
||||
// For API key verification without provided key, use stored key or env var
|
||||
if (authMethod === 'api_key' && !apiKey) {
|
||||
const storedApiKey = getApiKey('anthropic');
|
||||
if (storedApiKey) {
|
||||
authEnv.ANTHROPIC_API_KEY = storedApiKey;
|
||||
logger.info('[Setup] Using stored API key for verification');
|
||||
} else if (!authEnv.ANTHROPIC_API_KEY) {
|
||||
res.json({
|
||||
success: true,
|
||||
authenticated: false,
|
||||
error: 'No API key configured. Please enter an API key first.',
|
||||
});
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Store the secure environment in session manager
|
||||
AuthSessionManager.createSession(sessionId, authMethod || 'api_key', apiKey, 'anthropic');
|
||||
|
||||
// Create temporary environment override for SDK call
|
||||
const cleanupEnv = createTempEnvOverride(authEnv);
|
||||
|
||||
// Run a minimal query to verify authentication
|
||||
const stream = query({
|
||||
prompt: "Reply with only the word 'ok'",
|
||||
@@ -278,13 +310,8 @@ export function createVerifyClaudeAuthHandler() {
|
||||
}
|
||||
} finally {
|
||||
clearTimeout(timeoutId);
|
||||
// Restore original environment
|
||||
if (originalAnthropicKey !== undefined) {
|
||||
process.env.ANTHROPIC_API_KEY = originalAnthropicKey;
|
||||
} else if (authMethod === 'cli') {
|
||||
// If we cleared it and there was no original, keep it cleared
|
||||
delete process.env.ANTHROPIC_API_KEY;
|
||||
}
|
||||
// Clean up the auth session
|
||||
AuthSessionManager.destroySession(sessionId);
|
||||
}
|
||||
|
||||
logger.info('[Setup] Verification result:', {
|
||||
|
||||
282
apps/server/src/routes/setup/routes/verify-codex-auth.ts
Normal file
282
apps/server/src/routes/setup/routes/verify-codex-auth.ts
Normal file
@@ -0,0 +1,282 @@
|
||||
/**
|
||||
* POST /verify-codex-auth endpoint - Verify Codex authentication
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import { CODEX_MODEL_MAP } from '@automaker/types';
|
||||
import { ProviderFactory } from '../../../providers/provider-factory.js';
|
||||
import { getApiKey } from '../common.js';
|
||||
import { getCodexAuthIndicators } from '@automaker/platform';
|
||||
import {
|
||||
createSecureAuthEnv,
|
||||
AuthSessionManager,
|
||||
AuthRateLimiter,
|
||||
validateApiKey,
|
||||
createTempEnvOverride,
|
||||
} from '../../../lib/auth-utils.js';
|
||||
|
||||
const logger = createLogger('Setup');
|
||||
const rateLimiter = new AuthRateLimiter();
|
||||
const OPENAI_API_KEY_ENV = 'OPENAI_API_KEY';
|
||||
const AUTH_PROMPT = "Reply with only the word 'ok'";
|
||||
const AUTH_TIMEOUT_MS = 30000;
|
||||
const ERROR_BILLING_MESSAGE =
|
||||
'Credit balance is too low. Please add credits to your OpenAI account.';
|
||||
const ERROR_RATE_LIMIT_MESSAGE =
|
||||
'Rate limit reached. Please wait a while before trying again or upgrade your plan.';
|
||||
const ERROR_CLI_AUTH_REQUIRED =
|
||||
"CLI authentication failed. Please run 'codex login' to authenticate.";
|
||||
const ERROR_API_KEY_REQUIRED = 'No API key configured. Please enter an API key first.';
|
||||
const AUTH_ERROR_PATTERNS = [
|
||||
'authentication',
|
||||
'unauthorized',
|
||||
'invalid_api_key',
|
||||
'invalid api key',
|
||||
'api key is invalid',
|
||||
'not authenticated',
|
||||
'login',
|
||||
'auth(',
|
||||
'token refresh',
|
||||
'tokenrefresh',
|
||||
'failed to parse server response',
|
||||
'transport channel closed',
|
||||
];
|
||||
const BILLING_ERROR_PATTERNS = [
|
||||
'credit balance is too low',
|
||||
'credit balance too low',
|
||||
'insufficient credits',
|
||||
'insufficient balance',
|
||||
'no credits',
|
||||
'out of credits',
|
||||
'billing',
|
||||
'payment required',
|
||||
'add credits',
|
||||
];
|
||||
const RATE_LIMIT_PATTERNS = [
|
||||
'limit reached',
|
||||
'rate limit',
|
||||
'rate_limit',
|
||||
'too many requests',
|
||||
'resets',
|
||||
'429',
|
||||
];
|
||||
|
||||
function containsAuthError(text: string): boolean {
|
||||
const lowerText = text.toLowerCase();
|
||||
return AUTH_ERROR_PATTERNS.some((pattern) => lowerText.includes(pattern));
|
||||
}
|
||||
|
||||
function isBillingError(text: string): boolean {
|
||||
const lowerText = text.toLowerCase();
|
||||
return BILLING_ERROR_PATTERNS.some((pattern) => lowerText.includes(pattern));
|
||||
}
|
||||
|
||||
function isRateLimitError(text: string): boolean {
|
||||
if (isBillingError(text)) {
|
||||
return false;
|
||||
}
|
||||
const lowerText = text.toLowerCase();
|
||||
return RATE_LIMIT_PATTERNS.some((pattern) => lowerText.includes(pattern));
|
||||
}
|
||||
|
||||
export function createVerifyCodexAuthHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
const { authMethod, apiKey } = req.body as {
|
||||
authMethod?: 'cli' | 'api_key';
|
||||
apiKey?: string;
|
||||
};
|
||||
|
||||
// Create session ID for cleanup
|
||||
const sessionId = `codex-auth-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
|
||||
|
||||
// Rate limiting
|
||||
const clientIp = req.ip || req.socket.remoteAddress || 'unknown';
|
||||
if (!rateLimiter.canAttempt(clientIp)) {
|
||||
const resetTime = rateLimiter.getResetTime(clientIp);
|
||||
res.status(429).json({
|
||||
success: false,
|
||||
authenticated: false,
|
||||
error: 'Too many authentication attempts. Please try again later.',
|
||||
resetTime,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const abortController = new AbortController();
|
||||
const timeoutId = setTimeout(() => abortController.abort(), AUTH_TIMEOUT_MS);
|
||||
|
||||
try {
|
||||
// Create secure environment without modifying process.env
|
||||
const authEnv = createSecureAuthEnv(authMethod || 'api_key', apiKey, 'openai');
|
||||
|
||||
// For API key auth, validate and use the provided key or stored key
|
||||
if (authMethod === 'api_key') {
|
||||
if (apiKey) {
|
||||
// Use the provided API key
|
||||
const validation = validateApiKey(apiKey, 'openai');
|
||||
if (!validation.isValid) {
|
||||
res.json({ success: true, authenticated: false, error: validation.error });
|
||||
return;
|
||||
}
|
||||
authEnv[OPENAI_API_KEY_ENV] = validation.normalizedKey;
|
||||
} else {
|
||||
// Try stored key
|
||||
const storedApiKey = getApiKey('openai');
|
||||
if (storedApiKey) {
|
||||
const validation = validateApiKey(storedApiKey, 'openai');
|
||||
if (!validation.isValid) {
|
||||
res.json({ success: true, authenticated: false, error: validation.error });
|
||||
return;
|
||||
}
|
||||
authEnv[OPENAI_API_KEY_ENV] = validation.normalizedKey;
|
||||
} else if (!authEnv[OPENAI_API_KEY_ENV]) {
|
||||
res.json({ success: true, authenticated: false, error: ERROR_API_KEY_REQUIRED });
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create session and temporary environment override
|
||||
AuthSessionManager.createSession(sessionId, authMethod || 'api_key', undefined, 'openai');
|
||||
const cleanupEnv = createTempEnvOverride(authEnv);
|
||||
|
||||
try {
|
||||
if (authMethod === 'cli') {
|
||||
const authIndicators = await getCodexAuthIndicators();
|
||||
if (!authIndicators.hasOAuthToken && !authIndicators.hasApiKey) {
|
||||
res.json({
|
||||
success: true,
|
||||
authenticated: false,
|
||||
error: ERROR_CLI_AUTH_REQUIRED,
|
||||
});
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Use Codex provider explicitly (not ProviderFactory.getProviderForModel)
|
||||
// because Cursor also supports GPT models and has higher priority
|
||||
const provider = ProviderFactory.getProviderByName('codex');
|
||||
if (!provider) {
|
||||
throw new Error('Codex provider not available');
|
||||
}
|
||||
const stream = provider.executeQuery({
|
||||
prompt: AUTH_PROMPT,
|
||||
model: CODEX_MODEL_MAP.gpt52Codex,
|
||||
cwd: process.cwd(),
|
||||
maxTurns: 1,
|
||||
allowedTools: [],
|
||||
abortController,
|
||||
});
|
||||
|
||||
let receivedAnyContent = false;
|
||||
let errorMessage = '';
|
||||
|
||||
for await (const msg of stream) {
|
||||
if (msg.type === 'error' && msg.error) {
|
||||
if (isBillingError(msg.error)) {
|
||||
errorMessage = ERROR_BILLING_MESSAGE;
|
||||
} else if (isRateLimitError(msg.error)) {
|
||||
errorMessage = ERROR_RATE_LIMIT_MESSAGE;
|
||||
} else {
|
||||
errorMessage = msg.error;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (msg.type === 'assistant' && msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === 'text' && block.text) {
|
||||
receivedAnyContent = true;
|
||||
if (isBillingError(block.text)) {
|
||||
errorMessage = ERROR_BILLING_MESSAGE;
|
||||
break;
|
||||
}
|
||||
if (isRateLimitError(block.text)) {
|
||||
errorMessage = ERROR_RATE_LIMIT_MESSAGE;
|
||||
break;
|
||||
}
|
||||
if (containsAuthError(block.text)) {
|
||||
errorMessage = block.text;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (msg.type === 'result' && msg.result) {
|
||||
receivedAnyContent = true;
|
||||
if (isBillingError(msg.result)) {
|
||||
errorMessage = ERROR_BILLING_MESSAGE;
|
||||
} else if (isRateLimitError(msg.result)) {
|
||||
errorMessage = ERROR_RATE_LIMIT_MESSAGE;
|
||||
} else if (containsAuthError(msg.result)) {
|
||||
errorMessage = msg.result;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (errorMessage) {
|
||||
// Rate limit and billing errors mean auth succeeded but usage is limited
|
||||
const isUsageLimitError =
|
||||
errorMessage === ERROR_BILLING_MESSAGE || errorMessage === ERROR_RATE_LIMIT_MESSAGE;
|
||||
|
||||
const response: {
|
||||
success: boolean;
|
||||
authenticated: boolean;
|
||||
error: string;
|
||||
details?: string;
|
||||
} = {
|
||||
success: true,
|
||||
authenticated: isUsageLimitError ? true : false,
|
||||
error: isUsageLimitError
|
||||
? errorMessage
|
||||
: authMethod === 'cli'
|
||||
? ERROR_CLI_AUTH_REQUIRED
|
||||
: 'API key is invalid or has been revoked.',
|
||||
};
|
||||
|
||||
// Include detailed error for auth failures so users can debug
|
||||
if (!isUsageLimitError && errorMessage !== response.error) {
|
||||
response.details = errorMessage;
|
||||
}
|
||||
|
||||
res.json(response);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!receivedAnyContent) {
|
||||
res.json({
|
||||
success: true,
|
||||
authenticated: false,
|
||||
error: 'No response received from Codex. Please check your authentication.',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
res.json({ success: true, authenticated: true });
|
||||
} finally {
|
||||
// Clean up environment override
|
||||
cleanupEnv();
|
||||
}
|
||||
} catch (error: unknown) {
|
||||
const errMessage = error instanceof Error ? error.message : String(error);
|
||||
logger.error('[Setup] Codex auth verification error:', errMessage);
|
||||
const normalizedError = isBillingError(errMessage)
|
||||
? ERROR_BILLING_MESSAGE
|
||||
: isRateLimitError(errMessage)
|
||||
? ERROR_RATE_LIMIT_MESSAGE
|
||||
: errMessage;
|
||||
res.json({
|
||||
success: true,
|
||||
authenticated: false,
|
||||
error: normalizedError,
|
||||
});
|
||||
} finally {
|
||||
clearTimeout(timeoutId);
|
||||
// Clean up session
|
||||
AuthSessionManager.destroySession(sessionId);
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -11,9 +11,10 @@ import { getGitRepositoryDiffs } from '../../common.js';
|
||||
export function createDiffsHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { projectPath, featureId } = req.body as {
|
||||
const { projectPath, featureId, useWorktrees } = req.body as {
|
||||
projectPath: string;
|
||||
featureId: string;
|
||||
useWorktrees?: boolean;
|
||||
};
|
||||
|
||||
if (!projectPath || !featureId) {
|
||||
@@ -24,6 +25,19 @@ export function createDiffsHandler() {
|
||||
return;
|
||||
}
|
||||
|
||||
// If worktrees aren't enabled, don't probe .worktrees at all.
|
||||
// This avoids noisy logs that make it look like features are "running in worktrees".
|
||||
if (useWorktrees === false) {
|
||||
const result = await getGitRepositoryDiffs(projectPath);
|
||||
res.json({
|
||||
success: true,
|
||||
diff: result.diff,
|
||||
files: result.files,
|
||||
hasChanges: result.hasChanges,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Git worktrees are stored in project directory
|
||||
const worktreePath = path.join(projectPath, '.worktrees', featureId);
|
||||
|
||||
@@ -41,7 +55,11 @@ export function createDiffsHandler() {
|
||||
});
|
||||
} catch (innerError) {
|
||||
// Worktree doesn't exist - fallback to main project path
|
||||
logError(innerError, 'Worktree access failed, falling back to main project');
|
||||
const code = (innerError as NodeJS.ErrnoException | undefined)?.code;
|
||||
// ENOENT is expected when a feature has no worktree; don't log as an error.
|
||||
if (code && code !== 'ENOENT') {
|
||||
logError(innerError, 'Worktree access failed, falling back to main project');
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await getGitRepositoryDiffs(projectPath);
|
||||
|
||||
@@ -15,10 +15,11 @@ const execAsync = promisify(exec);
|
||||
export function createFileDiffHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { projectPath, featureId, filePath } = req.body as {
|
||||
const { projectPath, featureId, filePath, useWorktrees } = req.body as {
|
||||
projectPath: string;
|
||||
featureId: string;
|
||||
filePath: string;
|
||||
useWorktrees?: boolean;
|
||||
};
|
||||
|
||||
if (!projectPath || !featureId || !filePath) {
|
||||
@@ -29,6 +30,12 @@ export function createFileDiffHandler() {
|
||||
return;
|
||||
}
|
||||
|
||||
// If worktrees aren't enabled, don't probe .worktrees at all.
|
||||
if (useWorktrees === false) {
|
||||
res.json({ success: true, diff: '', filePath });
|
||||
return;
|
||||
}
|
||||
|
||||
// Git worktrees are stored in project directory
|
||||
const worktreePath = path.join(projectPath, '.worktrees', featureId);
|
||||
|
||||
@@ -57,7 +64,11 @@ export function createFileDiffHandler() {
|
||||
|
||||
res.json({ success: true, diff, filePath });
|
||||
} catch (innerError) {
|
||||
logError(innerError, 'Worktree file diff failed');
|
||||
const code = (innerError as NodeJS.ErrnoException | undefined)?.code;
|
||||
// ENOENT is expected when a feature has no worktree; don't log as an error.
|
||||
if (code && code !== 'ENOENT') {
|
||||
logError(innerError, 'Worktree file diff failed');
|
||||
}
|
||||
res.json({ success: true, diff: '', filePath });
|
||||
}
|
||||
} catch (error) {
|
||||
|
||||
@@ -13,6 +13,8 @@ import {
|
||||
isAbortError,
|
||||
loadContextFiles,
|
||||
createLogger,
|
||||
classifyError,
|
||||
getUserFriendlyErrorMessage,
|
||||
} from '@automaker/utils';
|
||||
import { ProviderFactory } from '../providers/provider-factory.js';
|
||||
import { createChatOptions, validateWorkingDirectory } from '../lib/sdk-options.js';
|
||||
@@ -20,7 +22,6 @@ import { PathNotAllowedError } from '@automaker/platform';
|
||||
import type { SettingsService } from './settings-service.js';
|
||||
import {
|
||||
getAutoLoadClaudeMdSetting,
|
||||
getEnableSandboxModeSetting,
|
||||
filterClaudeMdFromContext,
|
||||
getMCPServersFromSettings,
|
||||
getPromptCustomization,
|
||||
@@ -175,6 +176,18 @@ export class AgentService {
|
||||
session.thinkingLevel = thinkingLevel;
|
||||
}
|
||||
|
||||
// Validate vision support before processing images
|
||||
const effectiveModel = model || session.model;
|
||||
if (imagePaths && imagePaths.length > 0 && effectiveModel) {
|
||||
const supportsVision = ProviderFactory.modelSupportsVision(effectiveModel);
|
||||
if (!supportsVision) {
|
||||
throw new Error(
|
||||
`This model (${effectiveModel}) does not support image input. ` +
|
||||
`Please switch to a model that supports vision, or remove the images and try again.`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Read images and convert to base64
|
||||
const images: Message['images'] = [];
|
||||
if (imagePaths && imagePaths.length > 0) {
|
||||
@@ -235,12 +248,6 @@ export class AgentService {
|
||||
'[AgentService]'
|
||||
);
|
||||
|
||||
// Load enableSandboxMode setting (global setting only)
|
||||
const enableSandboxMode = await getEnableSandboxModeSetting(
|
||||
this.settingsService,
|
||||
'[AgentService]'
|
||||
);
|
||||
|
||||
// Load MCP servers from settings (global setting only)
|
||||
const mcpServers = await getMCPServersFromSettings(this.settingsService, '[AgentService]');
|
||||
|
||||
@@ -286,7 +293,6 @@ export class AgentService {
|
||||
systemPrompt: combinedSystemPrompt,
|
||||
abortController: session.abortController!,
|
||||
autoLoadClaudeMd,
|
||||
enableSandboxMode,
|
||||
thinkingLevel: effectiveThinkingLevel, // Pass thinking level for Claude models
|
||||
mcpServers: Object.keys(mcpServers).length > 0 ? mcpServers : undefined,
|
||||
});
|
||||
@@ -350,7 +356,6 @@ export class AgentService {
|
||||
abortController: session.abortController!,
|
||||
conversationHistory: conversationHistory.length > 0 ? conversationHistory : undefined,
|
||||
settingSources: settingSources.length > 0 ? settingSources : undefined,
|
||||
sandbox: sdkOptions.sandbox, // Pass sandbox configuration
|
||||
sdkSessionId: session.sdkSessionId, // Pass SDK session ID for resuming
|
||||
mcpServers: Object.keys(mcpServers).length > 0 ? mcpServers : undefined, // Pass MCP servers configuration
|
||||
agents: customSubagents, // Pass custom subagents for task delegation
|
||||
@@ -434,6 +439,53 @@ export class AgentService {
|
||||
content: responseText,
|
||||
toolUses,
|
||||
});
|
||||
} else if (msg.type === 'error') {
|
||||
// Some providers (like Codex CLI/SaaS or Cursor CLI) surface failures as
|
||||
// streamed error messages instead of throwing. Handle these here so the
|
||||
// Agent Runner UX matches the Claude/Cursor behavior without changing
|
||||
// their provider implementations.
|
||||
const rawErrorText =
|
||||
(typeof msg.error === 'string' && msg.error.trim()) ||
|
||||
'Unexpected error from provider during agent execution.';
|
||||
|
||||
const errorInfo = classifyError(new Error(rawErrorText));
|
||||
|
||||
// Keep the provider-supplied text intact (Codex already includes helpful tips),
|
||||
// only add a small rate-limit hint when we can detect it.
|
||||
const enhancedText = errorInfo.isRateLimit
|
||||
? `${rawErrorText}\n\nTip: It looks like you hit a rate limit. Try waiting a bit or reducing concurrent Agent Runner / Auto Mode tasks.`
|
||||
: rawErrorText;
|
||||
|
||||
this.logger.error('Provider error during agent execution:', {
|
||||
type: errorInfo.type,
|
||||
message: errorInfo.message,
|
||||
});
|
||||
|
||||
// Mark session as no longer running so the UI and queue stay in sync
|
||||
session.isRunning = false;
|
||||
session.abortController = null;
|
||||
|
||||
const errorMessage: Message = {
|
||||
id: this.generateId(),
|
||||
role: 'assistant',
|
||||
content: `Error: ${enhancedText}`,
|
||||
timestamp: new Date().toISOString(),
|
||||
isError: true,
|
||||
};
|
||||
|
||||
session.messages.push(errorMessage);
|
||||
await this.saveSession(sessionId, session.messages);
|
||||
|
||||
this.emitAgentEvent(sessionId, {
|
||||
type: 'error',
|
||||
error: enhancedText,
|
||||
message: errorMessage,
|
||||
});
|
||||
|
||||
// Don't continue streaming after an error message
|
||||
return {
|
||||
success: false,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -47,7 +47,6 @@ import type { SettingsService } from './settings-service.js';
|
||||
import { pipelineService, PipelineService } from './pipeline-service.js';
|
||||
import {
|
||||
getAutoLoadClaudeMdSetting,
|
||||
getEnableSandboxModeSetting,
|
||||
filterClaudeMdFromContext,
|
||||
getMCPServersFromSettings,
|
||||
getPromptCustomization,
|
||||
@@ -1314,7 +1313,6 @@ Format your response as a structured markdown document.`;
|
||||
allowedTools: sdkOptions.allowedTools as string[],
|
||||
abortController,
|
||||
settingSources: sdkOptions.settingSources,
|
||||
sandbox: sdkOptions.sandbox, // Pass sandbox configuration
|
||||
thinkingLevel: analysisThinkingLevel, // Pass thinking level
|
||||
};
|
||||
|
||||
@@ -1784,9 +1782,13 @@ Format your response as a structured markdown document.`;
|
||||
// Apply dependency-aware ordering
|
||||
const { orderedFeatures } = resolveDependencies(pendingFeatures);
|
||||
|
||||
// Get skipVerificationInAutoMode setting
|
||||
const settings = await this.settingsService?.getGlobalSettings();
|
||||
const skipVerification = settings?.skipVerificationInAutoMode ?? false;
|
||||
|
||||
// Filter to only features with satisfied dependencies
|
||||
const readyFeatures = orderedFeatures.filter((feature: Feature) =>
|
||||
areDependenciesSatisfied(feature, allFeatures)
|
||||
areDependenciesSatisfied(feature, allFeatures, { skipVerification })
|
||||
);
|
||||
|
||||
return readyFeatures;
|
||||
@@ -1989,6 +1991,18 @@ This helps parse your summary correctly in the output logs.`;
|
||||
const planningMode = options?.planningMode || 'skip';
|
||||
const previousContent = options?.previousContent;
|
||||
|
||||
// Validate vision support before processing images
|
||||
const effectiveModel = model || 'claude-sonnet-4-20250514';
|
||||
if (imagePaths && imagePaths.length > 0) {
|
||||
const supportsVision = ProviderFactory.modelSupportsVision(effectiveModel);
|
||||
if (!supportsVision) {
|
||||
throw new Error(
|
||||
`This model (${effectiveModel}) does not support image input. ` +
|
||||
`Please switch to a model that supports vision (like Claude models), or remove the images and try again.`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Check if this planning mode can generate a spec/plan that needs approval
|
||||
// - spec and full always generate specs
|
||||
// - lite only generates approval-ready content when requirePlanApproval is true
|
||||
@@ -2062,9 +2076,6 @@ This mock response was generated because AUTOMAKER_MOCK_AGENT=true was set.
|
||||
? options.autoLoadClaudeMd
|
||||
: await getAutoLoadClaudeMdSetting(finalProjectPath, this.settingsService, '[AutoMode]');
|
||||
|
||||
// Load enableSandboxMode setting (global setting only)
|
||||
const enableSandboxMode = await getEnableSandboxModeSetting(this.settingsService, '[AutoMode]');
|
||||
|
||||
// Load MCP servers from settings (global setting only)
|
||||
const mcpServers = await getMCPServersFromSettings(this.settingsService, '[AutoMode]');
|
||||
|
||||
@@ -2076,7 +2087,6 @@ This mock response was generated because AUTOMAKER_MOCK_AGENT=true was set.
|
||||
model: model,
|
||||
abortController,
|
||||
autoLoadClaudeMd,
|
||||
enableSandboxMode,
|
||||
mcpServers: Object.keys(mcpServers).length > 0 ? mcpServers : undefined,
|
||||
thinkingLevel: options?.thinkingLevel,
|
||||
});
|
||||
@@ -2119,7 +2129,6 @@ This mock response was generated because AUTOMAKER_MOCK_AGENT=true was set.
|
||||
abortController,
|
||||
systemPrompt: sdkOptions.systemPrompt,
|
||||
settingSources: sdkOptions.settingSources,
|
||||
sandbox: sdkOptions.sandbox, // Pass sandbox configuration
|
||||
mcpServers: Object.keys(mcpServers).length > 0 ? mcpServers : undefined, // Pass MCP servers configuration
|
||||
thinkingLevel: options?.thinkingLevel, // Pass thinking level for extended thinking
|
||||
};
|
||||
@@ -2202,9 +2211,23 @@ This mock response was generated because AUTOMAKER_MOCK_AGENT=true was set.
|
||||
}, WRITE_DEBOUNCE_MS);
|
||||
};
|
||||
|
||||
// Heartbeat logging so "silent" model calls are visible.
|
||||
// Some runs can take a while before the first streamed message arrives.
|
||||
const streamStartTime = Date.now();
|
||||
let receivedAnyStreamMessage = false;
|
||||
const STREAM_HEARTBEAT_MS = 15_000;
|
||||
const streamHeartbeat = setInterval(() => {
|
||||
if (receivedAnyStreamMessage) return;
|
||||
const elapsedSeconds = Math.round((Date.now() - streamStartTime) / 1000);
|
||||
logger.info(
|
||||
`Waiting for first model response for feature ${featureId} (${elapsedSeconds}s elapsed)...`
|
||||
);
|
||||
}, STREAM_HEARTBEAT_MS);
|
||||
|
||||
// Wrap stream processing in try/finally to ensure timeout cleanup on any error/abort
|
||||
try {
|
||||
streamLoop: for await (const msg of stream) {
|
||||
receivedAnyStreamMessage = true;
|
||||
// Log raw stream event for debugging
|
||||
appendRawEvent(msg);
|
||||
|
||||
@@ -2721,6 +2744,7 @@ Implement all the changes described in the plan above.`;
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
clearInterval(streamHeartbeat);
|
||||
// ALWAYS clear pending timeouts to prevent memory leaks
|
||||
// This runs on success, error, or abort
|
||||
if (writeTimeout) {
|
||||
|
||||
88
apps/server/src/services/codex-usage-service.ts
Normal file
88
apps/server/src/services/codex-usage-service.ts
Normal file
@@ -0,0 +1,88 @@
|
||||
import * as os from 'os';
|
||||
import { findCodexCliPath } from '@automaker/platform';
|
||||
import { checkCodexAuthentication } from '../lib/codex-auth.js';
|
||||
|
||||
export interface CodexRateLimitWindow {
|
||||
limit: number;
|
||||
used: number;
|
||||
remaining: number;
|
||||
usedPercent: number;
|
||||
windowDurationMins: number;
|
||||
resetsAt: number;
|
||||
}
|
||||
|
||||
export interface CodexCreditsSnapshot {
|
||||
balance?: string;
|
||||
unlimited?: boolean;
|
||||
hasCredits?: boolean;
|
||||
}
|
||||
|
||||
export type CodexPlanType = 'free' | 'plus' | 'pro' | 'team' | 'enterprise' | 'edu' | 'unknown';
|
||||
|
||||
export interface CodexUsageData {
|
||||
rateLimits: {
|
||||
primary?: CodexRateLimitWindow;
|
||||
secondary?: CodexRateLimitWindow;
|
||||
credits?: CodexCreditsSnapshot;
|
||||
planType?: CodexPlanType;
|
||||
} | null;
|
||||
lastUpdated: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Codex Usage Service
|
||||
*
|
||||
* Unlike Claude Code CLI which provides a `/usage` command, Codex CLI
|
||||
* does not expose usage statistics directly. This service returns a
|
||||
* clear message explaining this limitation.
|
||||
*
|
||||
* Future enhancement: Could query OpenAI API headers for rate limit info.
|
||||
*/
|
||||
export class CodexUsageService {
|
||||
private codexBinary = 'codex';
|
||||
private isWindows = os.platform() === 'win32';
|
||||
private cachedCliPath: string | null = null;
|
||||
|
||||
/**
|
||||
* Check if Codex CLI is available on the system
|
||||
*/
|
||||
async isAvailable(): Promise<boolean> {
|
||||
// Prefer our platform-aware resolver over `which/where` because the server
|
||||
// process PATH may not include npm global bins (nvm/fnm/volta/pnpm).
|
||||
this.cachedCliPath = await findCodexCliPath();
|
||||
return Boolean(this.cachedCliPath);
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt to fetch usage data
|
||||
*
|
||||
* Note: Codex CLI doesn't provide usage statistics like Claude Code does.
|
||||
* This method returns an error explaining this limitation.
|
||||
*/
|
||||
async fetchUsageData(): Promise<CodexUsageData> {
|
||||
// Check authentication status first
|
||||
const isAuthenticated = await this.checkAuthentication();
|
||||
|
||||
if (!isAuthenticated) {
|
||||
throw new Error("Codex is not authenticated. Please run 'codex login' to authenticate.");
|
||||
}
|
||||
|
||||
// Codex CLI doesn't provide a usage command
|
||||
// Return an error that will be caught and displayed
|
||||
throw new Error(
|
||||
'Codex usage statistics are not available. Unlike Claude Code, the Codex CLI does not provide a built-in usage command. ' +
|
||||
'Usage limits are enforced by OpenAI but cannot be queried via the CLI. ' +
|
||||
'Check your OpenAI dashboard at https://platform.openai.com/usage for detailed usage information.'
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if Codex is authenticated
|
||||
*/
|
||||
private async checkAuthentication(): Promise<boolean> {
|
||||
// Use the cached CLI path if available, otherwise fall back to finding it
|
||||
const cliPath = this.cachedCliPath || (await findCodexCliPath());
|
||||
const authCheck = await checkCodexAuthentication(cliPath);
|
||||
return authCheck.authenticated;
|
||||
}
|
||||
}
|
||||
@@ -4,7 +4,7 @@
|
||||
*/
|
||||
|
||||
import path from 'path';
|
||||
import type { Feature } from '@automaker/types';
|
||||
import type { Feature, DescriptionHistoryEntry } from '@automaker/types';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import * as secureFs from '../lib/secure-fs.js';
|
||||
import {
|
||||
@@ -274,6 +274,16 @@ export class FeatureLoader {
|
||||
featureData.imagePaths
|
||||
);
|
||||
|
||||
// Initialize description history with the initial description
|
||||
const initialHistory: DescriptionHistoryEntry[] = [];
|
||||
if (featureData.description && featureData.description.trim()) {
|
||||
initialHistory.push({
|
||||
description: featureData.description,
|
||||
timestamp: new Date().toISOString(),
|
||||
source: 'initial',
|
||||
});
|
||||
}
|
||||
|
||||
// Ensure feature has required fields
|
||||
const feature: Feature = {
|
||||
category: featureData.category || 'Uncategorized',
|
||||
@@ -281,6 +291,7 @@ export class FeatureLoader {
|
||||
...featureData,
|
||||
id: featureId,
|
||||
imagePaths: migratedImagePaths,
|
||||
descriptionHistory: initialHistory,
|
||||
};
|
||||
|
||||
// Write feature.json
|
||||
@@ -292,11 +303,18 @@ export class FeatureLoader {
|
||||
|
||||
/**
|
||||
* Update a feature (partial updates supported)
|
||||
* @param projectPath - Path to the project
|
||||
* @param featureId - ID of the feature to update
|
||||
* @param updates - Partial feature updates
|
||||
* @param descriptionHistorySource - Source of description change ('enhance' or 'edit')
|
||||
* @param enhancementMode - Enhancement mode if source is 'enhance'
|
||||
*/
|
||||
async update(
|
||||
projectPath: string,
|
||||
featureId: string,
|
||||
updates: Partial<Feature>
|
||||
updates: Partial<Feature>,
|
||||
descriptionHistorySource?: 'enhance' | 'edit',
|
||||
enhancementMode?: 'improve' | 'technical' | 'simplify' | 'acceptance'
|
||||
): Promise<Feature> {
|
||||
const feature = await this.get(projectPath, featureId);
|
||||
if (!feature) {
|
||||
@@ -313,11 +331,28 @@ export class FeatureLoader {
|
||||
updatedImagePaths = await this.migrateImages(projectPath, featureId, updates.imagePaths);
|
||||
}
|
||||
|
||||
// Track description history if description changed
|
||||
let updatedHistory = feature.descriptionHistory || [];
|
||||
if (
|
||||
updates.description !== undefined &&
|
||||
updates.description !== feature.description &&
|
||||
updates.description.trim()
|
||||
) {
|
||||
const historyEntry: DescriptionHistoryEntry = {
|
||||
description: updates.description,
|
||||
timestamp: new Date().toISOString(),
|
||||
source: descriptionHistorySource || 'edit',
|
||||
...(descriptionHistorySource === 'enhance' && enhancementMode ? { enhancementMode } : {}),
|
||||
};
|
||||
updatedHistory = [...updatedHistory, historyEntry];
|
||||
}
|
||||
|
||||
// Merge updates
|
||||
const updatedFeature: Feature = {
|
||||
...feature,
|
||||
...updates,
|
||||
...(updatedImagePaths !== undefined ? { imagePaths: updatedImagePaths } : {}),
|
||||
descriptionHistory: updatedHistory,
|
||||
};
|
||||
|
||||
// Write back to file
|
||||
|
||||
@@ -153,14 +153,6 @@ export class SettingsService {
|
||||
const storedVersion = settings.version || 1;
|
||||
let needsSave = false;
|
||||
|
||||
// Migration v1 -> v2: Force enableSandboxMode to false for existing users
|
||||
// Sandbox mode can cause issues on some systems, so we're disabling it by default
|
||||
if (storedVersion < 2) {
|
||||
logger.info('Migrating settings from v1 to v2: disabling sandbox mode');
|
||||
result.enableSandboxMode = false;
|
||||
needsSave = true;
|
||||
}
|
||||
|
||||
// Migration v2 -> v3: Convert string phase models to PhaseModelEntry objects
|
||||
// Note: migratePhaseModels() handles the actual conversion for both v1 and v2 formats
|
||||
if (storedVersion < 3) {
|
||||
@@ -170,6 +162,16 @@ export class SettingsService {
|
||||
needsSave = true;
|
||||
}
|
||||
|
||||
// Migration v3 -> v4: Add onboarding/setup wizard state fields
|
||||
// Older settings files never stored setup state in settings.json (it lived in localStorage),
|
||||
// so default to "setup complete" for existing installs to avoid forcing re-onboarding.
|
||||
if (storedVersion < 4) {
|
||||
if (settings.setupComplete === undefined) result.setupComplete = true;
|
||||
if (settings.isFirstRun === undefined) result.isFirstRun = false;
|
||||
if (settings.skipClaudeSetup === undefined) result.skipClaudeSetup = false;
|
||||
needsSave = true;
|
||||
}
|
||||
|
||||
// Update version if any migration occurred
|
||||
if (needsSave) {
|
||||
result.version = SETTINGS_VERSION;
|
||||
@@ -264,25 +266,79 @@ export class SettingsService {
|
||||
const settingsPath = getGlobalSettingsPath(this.dataDir);
|
||||
|
||||
const current = await this.getGlobalSettings();
|
||||
|
||||
// Guard against destructive "empty array/object" overwrites.
|
||||
// During auth transitions, the UI can briefly have default/empty state and accidentally
|
||||
// sync it, wiping persisted settings (especially `projects`).
|
||||
const sanitizedUpdates: Partial<GlobalSettings> = { ...updates };
|
||||
let attemptedProjectWipe = false;
|
||||
|
||||
const ignoreEmptyArrayOverwrite = <K extends keyof GlobalSettings>(key: K): void => {
|
||||
const nextVal = sanitizedUpdates[key] as unknown;
|
||||
const curVal = current[key] as unknown;
|
||||
if (
|
||||
Array.isArray(nextVal) &&
|
||||
nextVal.length === 0 &&
|
||||
Array.isArray(curVal) &&
|
||||
curVal.length > 0
|
||||
) {
|
||||
delete sanitizedUpdates[key];
|
||||
}
|
||||
};
|
||||
|
||||
const currentProjectsLen = Array.isArray(current.projects) ? current.projects.length : 0;
|
||||
if (
|
||||
Array.isArray(sanitizedUpdates.projects) &&
|
||||
sanitizedUpdates.projects.length === 0 &&
|
||||
currentProjectsLen > 0
|
||||
) {
|
||||
attemptedProjectWipe = true;
|
||||
delete sanitizedUpdates.projects;
|
||||
}
|
||||
|
||||
ignoreEmptyArrayOverwrite('trashedProjects');
|
||||
ignoreEmptyArrayOverwrite('projectHistory');
|
||||
ignoreEmptyArrayOverwrite('recentFolders');
|
||||
ignoreEmptyArrayOverwrite('aiProfiles');
|
||||
ignoreEmptyArrayOverwrite('mcpServers');
|
||||
ignoreEmptyArrayOverwrite('enabledCursorModels');
|
||||
|
||||
// Empty object overwrite guard
|
||||
if (
|
||||
sanitizedUpdates.lastSelectedSessionByProject &&
|
||||
typeof sanitizedUpdates.lastSelectedSessionByProject === 'object' &&
|
||||
!Array.isArray(sanitizedUpdates.lastSelectedSessionByProject) &&
|
||||
Object.keys(sanitizedUpdates.lastSelectedSessionByProject).length === 0 &&
|
||||
current.lastSelectedSessionByProject &&
|
||||
Object.keys(current.lastSelectedSessionByProject).length > 0
|
||||
) {
|
||||
delete sanitizedUpdates.lastSelectedSessionByProject;
|
||||
}
|
||||
|
||||
// If a request attempted to wipe projects, also ignore theme changes in that same request.
|
||||
if (attemptedProjectWipe) {
|
||||
delete sanitizedUpdates.theme;
|
||||
}
|
||||
|
||||
const updated: GlobalSettings = {
|
||||
...current,
|
||||
...updates,
|
||||
...sanitizedUpdates,
|
||||
version: SETTINGS_VERSION,
|
||||
};
|
||||
|
||||
// Deep merge keyboard shortcuts if provided
|
||||
if (updates.keyboardShortcuts) {
|
||||
if (sanitizedUpdates.keyboardShortcuts) {
|
||||
updated.keyboardShortcuts = {
|
||||
...current.keyboardShortcuts,
|
||||
...updates.keyboardShortcuts,
|
||||
...sanitizedUpdates.keyboardShortcuts,
|
||||
};
|
||||
}
|
||||
|
||||
// Deep merge phaseModels if provided
|
||||
if (updates.phaseModels) {
|
||||
if (sanitizedUpdates.phaseModels) {
|
||||
updated.phaseModels = {
|
||||
...current.phaseModels,
|
||||
...updates.phaseModels,
|
||||
...sanitizedUpdates.phaseModels,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -523,8 +579,26 @@ export class SettingsService {
|
||||
}
|
||||
}
|
||||
|
||||
// Parse setup wizard state (previously stored in localStorage)
|
||||
let setupState: Record<string, unknown> = {};
|
||||
if (localStorageData['automaker-setup']) {
|
||||
try {
|
||||
const parsed = JSON.parse(localStorageData['automaker-setup']);
|
||||
setupState = parsed.state || parsed;
|
||||
} catch (e) {
|
||||
errors.push(`Failed to parse automaker-setup: ${e}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Extract global settings
|
||||
const globalSettings: Partial<GlobalSettings> = {
|
||||
setupComplete:
|
||||
setupState.setupComplete !== undefined ? (setupState.setupComplete as boolean) : false,
|
||||
isFirstRun: setupState.isFirstRun !== undefined ? (setupState.isFirstRun as boolean) : true,
|
||||
skipClaudeSetup:
|
||||
setupState.skipClaudeSetup !== undefined
|
||||
? (setupState.skipClaudeSetup as boolean)
|
||||
: false,
|
||||
theme: (appState.theme as GlobalSettings['theme']) || 'dark',
|
||||
sidebarOpen: appState.sidebarOpen !== undefined ? (appState.sidebarOpen as boolean) : true,
|
||||
chatHistoryOpen: (appState.chatHistoryOpen as boolean) || false,
|
||||
@@ -537,6 +611,10 @@ export class SettingsService {
|
||||
appState.enableDependencyBlocking !== undefined
|
||||
? (appState.enableDependencyBlocking as boolean)
|
||||
: true,
|
||||
skipVerificationInAutoMode:
|
||||
appState.skipVerificationInAutoMode !== undefined
|
||||
? (appState.skipVerificationInAutoMode as boolean)
|
||||
: false,
|
||||
useWorktrees: (appState.useWorktrees as boolean) || false,
|
||||
showProfilesOnly: (appState.showProfilesOnly as boolean) || false,
|
||||
defaultPlanningMode:
|
||||
|
||||
373
apps/server/src/tests/cli-integration.test.ts
Normal file
373
apps/server/src/tests/cli-integration.test.ts
Normal file
@@ -0,0 +1,373 @@
|
||||
/**
|
||||
* CLI Integration Tests
|
||||
*
|
||||
* Comprehensive tests for CLI detection, authentication, and operations
|
||||
* across all providers (Claude, Codex, Cursor)
|
||||
*/
|
||||
|
||||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import {
|
||||
detectCli,
|
||||
detectAllCLis,
|
||||
findCommand,
|
||||
getCliVersion,
|
||||
getInstallInstructions,
|
||||
validateCliInstallation,
|
||||
} from '../lib/cli-detection.js';
|
||||
import { classifyError, getUserFriendlyErrorMessage } from '../lib/error-handler.js';
|
||||
|
||||
describe('CLI Detection Framework', () => {
|
||||
describe('findCommand', () => {
|
||||
it('should find existing command', async () => {
|
||||
// Test with a command that should exist
|
||||
const result = await findCommand(['node']);
|
||||
expect(result).toBeTruthy();
|
||||
});
|
||||
|
||||
it('should return null for non-existent command', async () => {
|
||||
const result = await findCommand(['nonexistent-command-12345']);
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('should find first available command from alternatives', async () => {
|
||||
const result = await findCommand(['nonexistent-command-12345', 'node']);
|
||||
expect(result).toBeTruthy();
|
||||
expect(result).toContain('node');
|
||||
});
|
||||
});
|
||||
|
||||
describe('getCliVersion', () => {
|
||||
it('should get version for existing command', async () => {
|
||||
const version = await getCliVersion('node', ['--version'], 5000);
|
||||
expect(version).toBeTruthy();
|
||||
expect(typeof version).toBe('string');
|
||||
});
|
||||
|
||||
it('should timeout for non-responsive command', async () => {
|
||||
await expect(getCliVersion('sleep', ['10'], 1000)).rejects.toThrow();
|
||||
}, 15000); // Give extra time for test timeout
|
||||
|
||||
it("should handle command that doesn't exist", async () => {
|
||||
await expect(
|
||||
getCliVersion('nonexistent-command-12345', ['--version'], 2000)
|
||||
).rejects.toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
describe('getInstallInstructions', () => {
|
||||
it('should return instructions for supported platforms', () => {
|
||||
const claudeInstructions = getInstallInstructions('claude', 'darwin');
|
||||
expect(claudeInstructions).toContain('brew install');
|
||||
|
||||
const codexInstructions = getInstallInstructions('codex', 'linux');
|
||||
expect(codexInstructions).toContain('npm install');
|
||||
});
|
||||
|
||||
it('should handle unsupported platform', () => {
|
||||
const instructions = getInstallInstructions('claude', 'unknown-platform' as any);
|
||||
expect(instructions).toContain('No installation instructions available');
|
||||
});
|
||||
});
|
||||
|
||||
describe('validateCliInstallation', () => {
|
||||
it('should validate properly installed CLI', () => {
|
||||
const cliInfo = {
|
||||
name: 'Test CLI',
|
||||
command: 'node',
|
||||
version: 'v18.0.0',
|
||||
path: '/usr/bin/node',
|
||||
installed: true,
|
||||
authenticated: true,
|
||||
authMethod: 'cli' as const,
|
||||
};
|
||||
|
||||
const result = validateCliInstallation(cliInfo);
|
||||
expect(result.valid).toBe(true);
|
||||
expect(result.issues).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should detect issues with installation', () => {
|
||||
const cliInfo = {
|
||||
name: 'Test CLI',
|
||||
command: '',
|
||||
version: '',
|
||||
path: '',
|
||||
installed: false,
|
||||
authenticated: false,
|
||||
authMethod: 'none' as const,
|
||||
};
|
||||
|
||||
const result = validateCliInstallation(cliInfo);
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.issues.length).toBeGreaterThan(0);
|
||||
expect(result.issues).toContain('CLI is not installed');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Error Handling System', () => {
|
||||
describe('classifyError', () => {
|
||||
it('should classify authentication errors', () => {
|
||||
const authError = new Error('invalid_api_key: Your API key is invalid');
|
||||
const result = classifyError(authError, 'claude');
|
||||
|
||||
expect(result.type).toBe('authentication');
|
||||
expect(result.severity).toBe('high');
|
||||
expect(result.userMessage).toContain('Authentication failed');
|
||||
expect(result.retryable).toBe(false);
|
||||
expect(result.provider).toBe('claude');
|
||||
});
|
||||
|
||||
it('should classify billing errors', () => {
|
||||
const billingError = new Error('credit balance is too low');
|
||||
const result = classifyError(billingError);
|
||||
|
||||
expect(result.type).toBe('billing');
|
||||
expect(result.severity).toBe('high');
|
||||
expect(result.userMessage).toContain('insufficient credits');
|
||||
expect(result.retryable).toBe(false);
|
||||
});
|
||||
|
||||
it('should classify rate limit errors', () => {
|
||||
const rateLimitError = new Error('Rate limit reached. Try again later.');
|
||||
const result = classifyError(rateLimitError);
|
||||
|
||||
expect(result.type).toBe('rate_limit');
|
||||
expect(result.severity).toBe('medium');
|
||||
expect(result.userMessage).toContain('Rate limit reached');
|
||||
expect(result.retryable).toBe(true);
|
||||
});
|
||||
|
||||
it('should classify network errors', () => {
|
||||
const networkError = new Error('ECONNREFUSED: Connection refused');
|
||||
const result = classifyError(networkError);
|
||||
|
||||
expect(result.type).toBe('network');
|
||||
expect(result.severity).toBe('medium');
|
||||
expect(result.userMessage).toContain('Network connection issue');
|
||||
expect(result.retryable).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle unknown errors', () => {
|
||||
const unknownError = new Error('Something completely unexpected happened');
|
||||
const result = classifyError(unknownError);
|
||||
|
||||
expect(result.type).toBe('unknown');
|
||||
expect(result.severity).toBe('medium');
|
||||
expect(result.userMessage).toContain('unexpected error');
|
||||
expect(result.retryable).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getUserFriendlyErrorMessage', () => {
|
||||
it('should include provider name in message', () => {
|
||||
const error = new Error('invalid_api_key');
|
||||
const message = getUserFriendlyErrorMessage(error, 'claude');
|
||||
|
||||
expect(message).toContain('[CLAUDE]');
|
||||
});
|
||||
|
||||
it('should include suggested action when available', () => {
|
||||
const error = new Error('invalid_api_key');
|
||||
const message = getUserFriendlyErrorMessage(error);
|
||||
|
||||
expect(message).toContain('Verify your API key');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Provider-Specific Tests', () => {
|
||||
describe('Claude CLI Detection', () => {
|
||||
it('should detect Claude CLI if installed', async () => {
|
||||
const result = await detectCli('claude');
|
||||
|
||||
if (result.detected) {
|
||||
expect(result.cli.name).toBe('Claude CLI');
|
||||
expect(result.cli.installed).toBe(true);
|
||||
expect(result.cli.command).toBeTruthy();
|
||||
}
|
||||
// If not installed, that's also a valid test result
|
||||
});
|
||||
|
||||
it('should handle missing Claude CLI gracefully', async () => {
|
||||
// This test will pass regardless of whether Claude is installed
|
||||
const result = await detectCli('claude');
|
||||
expect(typeof result.detected).toBe('boolean');
|
||||
expect(Array.isArray(result.issues)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Codex CLI Detection', () => {
|
||||
it('should detect Codex CLI if installed', async () => {
|
||||
const result = await detectCli('codex');
|
||||
|
||||
if (result.detected) {
|
||||
expect(result.cli.name).toBe('Codex CLI');
|
||||
expect(result.cli.installed).toBe(true);
|
||||
expect(result.cli.command).toBeTruthy();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe('Cursor CLI Detection', () => {
|
||||
it('should detect Cursor CLI if installed', async () => {
|
||||
const result = await detectCli('cursor');
|
||||
|
||||
if (result.detected) {
|
||||
expect(result.cli.name).toBe('Cursor CLI');
|
||||
expect(result.cli.installed).toBe(true);
|
||||
expect(result.cli.command).toBeTruthy();
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Integration Tests', () => {
|
||||
describe('detectAllCLis', () => {
|
||||
it('should detect all available CLIs', async () => {
|
||||
const results = await detectAllCLis();
|
||||
|
||||
expect(results).toHaveProperty('claude');
|
||||
expect(results).toHaveProperty('codex');
|
||||
expect(results).toHaveProperty('cursor');
|
||||
|
||||
// Each should have the expected structure
|
||||
Object.values(results).forEach((result) => {
|
||||
expect(result).toHaveProperty('cli');
|
||||
expect(result).toHaveProperty('detected');
|
||||
expect(result).toHaveProperty('issues');
|
||||
expect(result.cli).toHaveProperty('name');
|
||||
expect(result.cli).toHaveProperty('installed');
|
||||
expect(result.cli).toHaveProperty('authenticated');
|
||||
});
|
||||
}, 30000); // Longer timeout for CLI detection
|
||||
|
||||
it('should handle concurrent CLI detection', async () => {
|
||||
// Run detection multiple times concurrently
|
||||
const promises = [detectAllCLis(), detectAllCLis(), detectAllCLis()];
|
||||
|
||||
const results = await Promise.all(promises);
|
||||
|
||||
// All should return consistent results
|
||||
expect(results).toHaveLength(3);
|
||||
results.forEach((result) => {
|
||||
expect(result).toHaveProperty('claude');
|
||||
expect(result).toHaveProperty('codex');
|
||||
expect(result).toHaveProperty('cursor');
|
||||
});
|
||||
}, 45000);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Error Recovery Tests', () => {
|
||||
it('should handle partial CLI detection failures', async () => {
|
||||
// Mock a scenario where some CLIs fail to detect
|
||||
const results = await detectAllCLis();
|
||||
|
||||
// Should still return results for all providers
|
||||
expect(results).toHaveProperty('claude');
|
||||
expect(results).toHaveProperty('codex');
|
||||
expect(results).toHaveProperty('cursor');
|
||||
|
||||
// Should provide error information for failures
|
||||
Object.entries(results).forEach(([provider, result]) => {
|
||||
if (!result.detected && result.issues.length > 0) {
|
||||
expect(result.issues.length).toBeGreaterThan(0);
|
||||
expect(result.issues[0]).toBeTruthy();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle timeout during CLI detection', async () => {
|
||||
// Test with very short timeout
|
||||
const result = await detectCli('claude', { timeout: 1 });
|
||||
|
||||
// Should handle gracefully without throwing
|
||||
expect(typeof result.detected).toBe('boolean');
|
||||
expect(Array.isArray(result.issues)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Security Tests', () => {
|
||||
it('should not expose sensitive information in error messages', () => {
|
||||
const errorWithKey = new Error('invalid_api_key: sk-ant-abc123secret456');
|
||||
const message = getUserFriendlyErrorMessage(errorWithKey);
|
||||
|
||||
// Should not expose the actual API key
|
||||
expect(message).not.toContain('sk-ant-abc123secret456');
|
||||
expect(message).toContain('Authentication failed');
|
||||
});
|
||||
|
||||
it('should sanitize file paths in error messages', () => {
|
||||
const errorWithPath = new Error('Permission denied: /home/user/.ssh/id_rsa');
|
||||
const message = getUserFriendlyErrorMessage(errorWithPath);
|
||||
|
||||
// Should not expose sensitive file paths
|
||||
expect(message).not.toContain('/home/user/.ssh/id_rsa');
|
||||
});
|
||||
});
|
||||
|
||||
// Performance Tests
|
||||
describe('Performance Tests', () => {
|
||||
it('should detect CLIs within reasonable time', async () => {
|
||||
const startTime = Date.now();
|
||||
const results = await detectAllCLis();
|
||||
const endTime = Date.now();
|
||||
|
||||
const duration = endTime - startTime;
|
||||
expect(duration).toBeLessThan(10000); // Should complete in under 10 seconds
|
||||
expect(results).toHaveProperty('claude');
|
||||
expect(results).toHaveProperty('codex');
|
||||
expect(results).toHaveProperty('cursor');
|
||||
}, 15000);
|
||||
|
||||
it('should handle rapid repeated calls', async () => {
|
||||
// Make multiple rapid calls
|
||||
const promises = Array.from({ length: 10 }, () => detectAllCLis());
|
||||
const results = await Promise.all(promises);
|
||||
|
||||
// All should complete successfully
|
||||
expect(results).toHaveLength(10);
|
||||
results.forEach((result) => {
|
||||
expect(result).toHaveProperty('claude');
|
||||
expect(result).toHaveProperty('codex');
|
||||
expect(result).toHaveProperty('cursor');
|
||||
});
|
||||
}, 60000);
|
||||
});
|
||||
|
||||
// Edge Cases
|
||||
describe('Edge Cases', () => {
|
||||
it('should handle empty CLI names', async () => {
|
||||
await expect(detectCli('' as any)).rejects.toThrow();
|
||||
});
|
||||
|
||||
it('should handle null CLI names', async () => {
|
||||
await expect(detectCli(null as any)).rejects.toThrow();
|
||||
});
|
||||
|
||||
it('should handle undefined CLI names', async () => {
|
||||
await expect(detectCli(undefined as any)).rejects.toThrow();
|
||||
});
|
||||
|
||||
it('should handle malformed error objects', () => {
|
||||
const testCases = [
|
||||
null,
|
||||
undefined,
|
||||
'',
|
||||
123,
|
||||
[],
|
||||
{ nested: { error: { message: 'test' } } },
|
||||
{ error: 'simple string error' },
|
||||
];
|
||||
|
||||
testCases.forEach((error) => {
|
||||
expect(() => {
|
||||
const result = classifyError(error);
|
||||
expect(result).toHaveProperty('type');
|
||||
expect(result).toHaveProperty('severity');
|
||||
expect(result).toHaveProperty('userMessage');
|
||||
}).not.toThrow();
|
||||
});
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user