Merge remote-tracking branch 'origin/v0.10.0rc' into stefandevo/main

This commit is contained in:
Kacper
2026-01-11 17:34:19 +01:00
156 changed files with 8389 additions and 5916 deletions

View File

@@ -55,6 +55,8 @@ import { createClaudeRoutes } from './routes/claude/index.js';
import { ClaudeUsageService } from './services/claude-usage-service.js';
import { createCodexRoutes } from './routes/codex/index.js';
import { CodexUsageService } from './services/codex-usage-service.js';
import { CodexAppServerService } from './services/codex-app-server-service.js';
import { CodexModelCacheService } from './services/codex-model-cache-service.js';
import { createGitHubRoutes } from './routes/github/index.js';
import { createContextRoutes } from './routes/context/index.js';
import { createBacklogPlanRoutes } from './routes/backlog-plan/index.js';
@@ -168,7 +170,9 @@ const agentService = new AgentService(DATA_DIR, events, settingsService);
const featureLoader = new FeatureLoader();
const autoModeService = new AutoModeService(events, settingsService);
const claudeUsageService = new ClaudeUsageService();
const codexUsageService = new CodexUsageService();
const codexAppServerService = new CodexAppServerService();
const codexModelCacheService = new CodexModelCacheService(DATA_DIR, codexAppServerService);
const codexUsageService = new CodexUsageService(codexAppServerService);
const mcpTestService = new MCPTestService(settingsService);
const ideationService = new IdeationService(events, settingsService, featureLoader);
@@ -176,6 +180,11 @@ const ideationService = new IdeationService(events, settingsService, featureLoad
(async () => {
await agentService.initialize();
logger.info('Agent service initialized');
// Bootstrap Codex model cache in background (don't block server startup)
void codexModelCacheService.getModels().catch((err) => {
logger.error('Failed to bootstrap Codex model cache:', err);
});
})();
// Run stale validation cleanup every hour to prevent memory leaks from crashed validations
@@ -219,7 +228,7 @@ app.use('/api/templates', createTemplatesRoutes());
app.use('/api/terminal', createTerminalRoutes());
app.use('/api/settings', createSettingsRoutes(settingsService));
app.use('/api/claude', createClaudeRoutes(claudeUsageService));
app.use('/api/codex', createCodexRoutes(codexUsageService));
app.use('/api/codex', createCodexRoutes(codexUsageService, codexModelCacheService));
app.use('/api/github', createGitHubRoutes(events, settingsService));
app.use('/api/context', createContextRoutes(settingsService));
app.use('/api/backlog-plan', createBacklogPlanRoutes(events, settingsService));
@@ -588,6 +597,26 @@ const startServer = (port: number) => {
startServer(PORT);
// Global error handlers to prevent crashes from uncaught errors
process.on('unhandledRejection', (reason: unknown, _promise: Promise<unknown>) => {
logger.error('Unhandled Promise Rejection:', {
reason: reason instanceof Error ? reason.message : String(reason),
stack: reason instanceof Error ? reason.stack : undefined,
});
// Don't exit - log the error and continue running
// This prevents the server from crashing due to unhandled rejections
});
process.on('uncaughtException', (error: Error) => {
logger.error('Uncaught Exception:', {
message: error.message,
stack: error.stack,
});
// Exit on uncaught exceptions to prevent undefined behavior
// The process is in an unknown state after an uncaught exception
process.exit(1);
});
// Graceful shutdown
process.on('SIGTERM', () => {
logger.info('SIGTERM received, shutting down...');

View File

@@ -5,9 +5,11 @@
* Never assumes authenticated - only returns true if CLI confirms.
*/
import { spawnProcess, getCodexAuthPath } from '@automaker/platform';
import { spawnProcess } from '@automaker/platform';
import { findCodexCliPath } from '@automaker/platform';
import * as fs from 'fs';
import { createLogger } from '@automaker/utils';
const logger = createLogger('CodexAuth');
const CODEX_COMMAND = 'codex';
const OPENAI_API_KEY_ENV = 'OPENAI_API_KEY';
@@ -26,36 +28,16 @@ export interface CodexAuthCheckResult {
export async function checkCodexAuthentication(
cliPath?: string | null
): Promise<CodexAuthCheckResult> {
console.log('[CodexAuth] checkCodexAuthentication called with cliPath:', cliPath);
const resolvedCliPath = cliPath || (await findCodexCliPath());
const hasApiKey = !!process.env[OPENAI_API_KEY_ENV];
console.log('[CodexAuth] resolvedCliPath:', resolvedCliPath);
console.log('[CodexAuth] hasApiKey:', hasApiKey);
// Debug: Check auth file
const authFilePath = getCodexAuthPath();
console.log('[CodexAuth] Auth file path:', authFilePath);
try {
const authFileExists = fs.existsSync(authFilePath);
console.log('[CodexAuth] Auth file exists:', authFileExists);
if (authFileExists) {
const authContent = fs.readFileSync(authFilePath, 'utf-8');
console.log('[CodexAuth] Auth file content:', authContent.substring(0, 500)); // First 500 chars
}
} catch (error) {
console.log('[CodexAuth] Error reading auth file:', error);
}
// If CLI is not installed, cannot be authenticated
if (!resolvedCliPath) {
console.log('[CodexAuth] No CLI path found, returning not authenticated');
logger.info('CLI not found');
return { authenticated: false, method: 'none' };
}
try {
console.log('[CodexAuth] Running: ' + resolvedCliPath + ' login status');
const result = await spawnProcess({
command: resolvedCliPath || CODEX_COMMAND,
args: ['login', 'status'],
@@ -66,33 +48,21 @@ export async function checkCodexAuthentication(
},
});
console.log('[CodexAuth] Command result:');
console.log('[CodexAuth] exitCode:', result.exitCode);
console.log('[CodexAuth] stdout:', JSON.stringify(result.stdout));
console.log('[CodexAuth] stderr:', JSON.stringify(result.stderr));
// Check both stdout and stderr for "logged in" - Codex CLI outputs to stderr
const combinedOutput = (result.stdout + result.stderr).toLowerCase();
const isLoggedIn = combinedOutput.includes('logged in');
console.log('[CodexAuth] isLoggedIn (contains "logged in" in stdout or stderr):', isLoggedIn);
if (result.exitCode === 0 && isLoggedIn) {
// Determine auth method based on what we know
const method = hasApiKey ? 'api_key_env' : 'cli_authenticated';
console.log('[CodexAuth] Authenticated! method:', method);
logger.info(`✓ Authenticated (${method})`);
return { authenticated: true, method };
}
console.log(
'[CodexAuth] Not authenticated. exitCode:',
result.exitCode,
'isLoggedIn:',
isLoggedIn
);
logger.info('Not authenticated');
return { authenticated: false, method: 'none' };
} catch (error) {
console.log('[CodexAuth] Error running command:', error);
logger.error('Failed to check authentication:', error);
return { authenticated: false, method: 'none' };
}
console.log('[CodexAuth] Returning not authenticated');
return { authenticated: false, method: 'none' };
}

View File

@@ -21,6 +21,7 @@ import {
extractTextFromContent,
classifyError,
getUserFriendlyErrorMessage,
createLogger,
} from '@automaker/utils';
import type {
ExecuteOptions,
@@ -658,6 +659,8 @@ async function loadCodexInstructions(cwd: string, enabled: boolean): Promise<str
.join('\n\n');
}
const logger = createLogger('CodexProvider');
export class CodexProvider extends BaseProvider {
getName(): string {
return 'codex';
@@ -967,21 +970,11 @@ export class CodexProvider extends BaseProvider {
}
async detectInstallation(): Promise<InstallationStatus> {
console.log('[CodexProvider.detectInstallation] Starting...');
const cliPath = await findCodexCliPath();
const hasApiKey = !!process.env[OPENAI_API_KEY_ENV];
const authIndicators = await getCodexAuthIndicators();
const installed = !!cliPath;
console.log('[CodexProvider.detectInstallation] cliPath:', cliPath);
console.log('[CodexProvider.detectInstallation] hasApiKey:', hasApiKey);
console.log(
'[CodexProvider.detectInstallation] authIndicators:',
JSON.stringify(authIndicators)
);
console.log('[CodexProvider.detectInstallation] installed:', installed);
let version = '';
if (installed) {
try {
@@ -991,20 +984,16 @@ export class CodexProvider extends BaseProvider {
cwd: process.cwd(),
});
version = result.stdout.trim();
console.log('[CodexProvider.detectInstallation] version:', version);
} catch (error) {
console.log('[CodexProvider.detectInstallation] Error getting version:', error);
version = '';
}
}
// Determine auth status - always verify with CLI, never assume authenticated
console.log('[CodexProvider.detectInstallation] Calling checkCodexAuthentication...');
const authCheck = await checkCodexAuthentication(cliPath);
console.log('[CodexProvider.detectInstallation] authCheck result:', JSON.stringify(authCheck));
const authenticated = authCheck.authenticated;
const result = {
return {
installed,
path: cliPath || undefined,
version: version || undefined,
@@ -1012,8 +1001,6 @@ export class CodexProvider extends BaseProvider {
hasApiKey,
authenticated,
};
console.log('[CodexProvider.detectInstallation] Final result:', JSON.stringify(result));
return result;
}
getAvailableModels(): ModelDefinition[] {
@@ -1025,36 +1012,24 @@ export class CodexProvider extends BaseProvider {
* Check authentication status for Codex CLI
*/
async checkAuth(): Promise<CodexAuthStatus> {
console.log('[CodexProvider.checkAuth] Starting auth check...');
const cliPath = await findCodexCliPath();
const hasApiKey = !!process.env[OPENAI_API_KEY_ENV];
const authIndicators = await getCodexAuthIndicators();
console.log('[CodexProvider.checkAuth] cliPath:', cliPath);
console.log('[CodexProvider.checkAuth] hasApiKey:', hasApiKey);
console.log('[CodexProvider.checkAuth] authIndicators:', JSON.stringify(authIndicators));
// Check for API key in environment
if (hasApiKey) {
console.log('[CodexProvider.checkAuth] Has API key, returning authenticated');
return { authenticated: true, method: 'api_key' };
}
// Check for OAuth/token from Codex CLI
if (authIndicators.hasOAuthToken || authIndicators.hasApiKey) {
console.log(
'[CodexProvider.checkAuth] Has OAuth token or API key in auth file, returning authenticated'
);
return { authenticated: true, method: 'oauth' };
}
// CLI is installed but not authenticated via indicators - try CLI command
console.log('[CodexProvider.checkAuth] No indicators found, trying CLI command...');
if (cliPath) {
try {
// Try 'codex login status' first (same as checkCodexAuthentication)
console.log('[CodexProvider.checkAuth] Running: ' + cliPath + ' login status');
const result = await spawnProcess({
command: cliPath || CODEX_COMMAND,
args: ['login', 'status'],
@@ -1064,26 +1039,19 @@ export class CodexProvider extends BaseProvider {
TERM: 'dumb',
},
});
console.log('[CodexProvider.checkAuth] login status result:');
console.log('[CodexProvider.checkAuth] exitCode:', result.exitCode);
console.log('[CodexProvider.checkAuth] stdout:', JSON.stringify(result.stdout));
console.log('[CodexProvider.checkAuth] stderr:', JSON.stringify(result.stderr));
// Check both stdout and stderr - Codex CLI outputs to stderr
const combinedOutput = (result.stdout + result.stderr).toLowerCase();
const isLoggedIn = combinedOutput.includes('logged in');
console.log('[CodexProvider.checkAuth] isLoggedIn:', isLoggedIn);
if (result.exitCode === 0 && isLoggedIn) {
console.log('[CodexProvider.checkAuth] CLI says logged in, returning authenticated');
return { authenticated: true, method: 'oauth' };
}
} catch (error) {
console.log('[CodexProvider.checkAuth] Error running login status:', error);
logger.warn('Error running login status command during auth check:', error);
}
}
console.log('[CodexProvider.checkAuth] Not authenticated');
return { authenticated: false, method: 'none' };
}

View File

@@ -41,95 +41,103 @@ export interface OpenCodeAuthStatus {
/**
* Base interface for all OpenCode stream events
* OpenCode uses underscore format: step_start, step_finish, text
*/
interface OpenCodeBaseEvent {
/** Event type identifier */
type: string;
/** Optional session identifier */
session_id?: string;
/** Timestamp of the event */
timestamp?: number;
/** Session ID */
sessionID?: string;
/** Part object containing the actual event data */
part?: Record<string, unknown>;
}
/**
* Text delta event - Incremental text output from the model
* Text event - Text output from the model
* Format: {"type":"text","part":{"text":"content",...}}
*/
export interface OpenCodeTextDeltaEvent extends OpenCodeBaseEvent {
type: 'text-delta';
/** The incremental text content */
text: string;
}
/**
* Text end event - Signals completion of text generation
*/
export interface OpenCodeTextEndEvent extends OpenCodeBaseEvent {
type: 'text-end';
export interface OpenCodeTextEvent extends OpenCodeBaseEvent {
type: 'text';
part: {
type: 'text';
text: string;
[key: string]: unknown;
};
}
/**
* Tool call event - Request to execute a tool
*/
export interface OpenCodeToolCallEvent extends OpenCodeBaseEvent {
type: 'tool-call';
/** Unique identifier for this tool call */
call_id?: string;
/** Tool name to invoke */
name: string;
/** Arguments to pass to the tool */
args: unknown;
type: 'tool_call';
part: {
type: 'tool-call';
name: string;
call_id?: string;
args: unknown;
[key: string]: unknown;
};
}
/**
* Tool result event - Output from a tool execution
*/
export interface OpenCodeToolResultEvent extends OpenCodeBaseEvent {
type: 'tool-result';
/** The tool call ID this result corresponds to */
call_id?: string;
/** Output from the tool execution */
output: string;
type: 'tool_result';
part: {
type: 'tool-result';
call_id?: string;
output: string;
[key: string]: unknown;
};
}
/**
* Tool error event - Tool execution failed
*/
export interface OpenCodeToolErrorEvent extends OpenCodeBaseEvent {
type: 'tool-error';
/** The tool call ID that failed */
call_id?: string;
/** Error message describing the failure */
error: string;
type: 'tool_error';
part: {
type: 'tool-error';
call_id?: string;
error: string;
[key: string]: unknown;
};
}
/**
* Start step event - Begins an agentic loop iteration
* Format: {"type":"step_start","part":{...}}
*/
export interface OpenCodeStartStepEvent extends OpenCodeBaseEvent {
type: 'start-step';
/** Step number in the agentic loop */
step?: number;
type: 'step_start';
part?: {
type: 'step-start';
[key: string]: unknown;
};
}
/**
* Finish step event - Completes an agentic loop iteration
* Format: {"type":"step_finish","part":{"reason":"stop",...}}
*/
export interface OpenCodeFinishStepEvent extends OpenCodeBaseEvent {
type: 'finish-step';
/** Step number that completed */
step?: number;
/** Whether the step completed successfully */
success?: boolean;
/** Optional result data */
result?: string;
/** Optional error if step failed */
error?: string;
type: 'step_finish';
part?: {
type: 'step-finish';
reason?: string;
error?: string;
[key: string]: unknown;
};
}
/**
* Union type of all OpenCode stream events
*/
export type OpenCodeStreamEvent =
| OpenCodeTextDeltaEvent
| OpenCodeTextEndEvent
| OpenCodeTextEvent
| OpenCodeToolCallEvent
| OpenCodeToolResultEvent
| OpenCodeToolErrorEvent
@@ -219,14 +227,12 @@ export class OpencodeProvider extends CliProvider {
*
* Arguments built:
* - 'run' subcommand for executing queries
* - '--format', 'stream-json' for JSONL streaming output
* - '-q' / '--quiet' to suppress spinner and interactive elements
* - '-c', '<cwd>' for working directory
* - '--format', 'json' for JSON streaming output
* - '--model', '<model>' for model selection (if specified)
* - '-' as final arg to read prompt from stdin
* - Message passed via stdin (no positional args needed)
*
* The prompt is NOT included in CLI args - it's passed via stdin to avoid
* shell escaping issues with special characters in content.
* The prompt is passed via stdin to avoid shell escaping issues.
* OpenCode will read from stdin when no positional message arguments are provided.
*
* @param options - Execution options containing model, cwd, etc.
* @returns Array of CLI arguments for opencode run
@@ -234,27 +240,18 @@ export class OpencodeProvider extends CliProvider {
buildCliArgs(options: ExecuteOptions): string[] {
const args: string[] = ['run'];
// Add streaming JSON output format for JSONL parsing
args.push('--format', 'stream-json');
// Suppress spinner and interactive elements for non-TTY usage
args.push('-q');
// Set working directory
if (options.cwd) {
args.push('-c', options.cwd);
}
// Add JSON output format for streaming
args.push('--format', 'json');
// Handle model selection
// Strip 'opencode-' prefix if present, OpenCode uses format like 'anthropic/claude-sonnet-4-5'
// Strip 'opencode-' prefix if present, OpenCode uses native format
if (options.model) {
const model = stripProviderPrefix(options.model);
args.push('--model', model);
}
// Use '-' to indicate reading prompt from stdin
// This avoids shell escaping issues with special characters
args.push('-');
// Note: Working directory is set via subprocess cwd option, not CLI args
// Note: Message is passed via stdin, OpenCode reads from stdin automatically
return args;
}
@@ -314,14 +311,12 @@ export class OpencodeProvider extends CliProvider {
* Normalize a raw CLI event to ProviderMessage format
*
* Maps OpenCode event types to the standard ProviderMessage structure:
* - text-delta -> type: 'assistant', content with type: 'text'
* - text-end -> null (informational, no message needed)
* - tool-call -> type: 'assistant', content with type: 'tool_use'
* - tool-result -> type: 'assistant', content with type: 'tool_result'
* - tool-error -> type: 'error'
* - start-step -> null (informational, no message needed)
* - finish-step with success -> type: 'result', subtype: 'success'
* - finish-step with error -> type: 'error'
* - text -> type: 'assistant', content with type: 'text'
* - step_start -> null (informational, no message needed)
* - step_finish -> type: 'result', subtype: 'success' (or error if failed)
* - tool_call -> type: 'assistant', content with type: 'tool_use'
* - tool_result -> type: 'assistant', content with type: 'tool_result'
* - tool_error -> type: 'error'
*
* @param event - Raw event from OpenCode CLI JSONL output
* @returns Normalized ProviderMessage or null to skip the event
@@ -334,24 +329,24 @@ export class OpencodeProvider extends CliProvider {
const openCodeEvent = event as OpenCodeStreamEvent;
switch (openCodeEvent.type) {
case 'text-delta': {
const textEvent = openCodeEvent as OpenCodeTextDeltaEvent;
case 'text': {
const textEvent = openCodeEvent as OpenCodeTextEvent;
// Skip empty text deltas
if (!textEvent.text) {
// Skip if no text content
if (!textEvent.part?.text) {
return null;
}
const content: ContentBlock[] = [
{
type: 'text',
text: textEvent.text,
text: textEvent.part.text,
},
];
return {
type: 'assistant',
session_id: textEvent.session_id,
session_id: textEvent.sessionID,
message: {
role: 'assistant',
content,
@@ -359,90 +354,105 @@ export class OpencodeProvider extends CliProvider {
};
}
case 'text-end': {
// Text end is informational - no message needed
return null;
}
case 'tool-call': {
const toolEvent = openCodeEvent as OpenCodeToolCallEvent;
// Generate a tool use ID if not provided
const toolUseId = toolEvent.call_id || generateToolUseId();
const content: ContentBlock[] = [
{
type: 'tool_use',
name: toolEvent.name,
tool_use_id: toolUseId,
input: toolEvent.args,
},
];
return {
type: 'assistant',
session_id: toolEvent.session_id,
message: {
role: 'assistant',
content,
},
};
}
case 'tool-result': {
const resultEvent = openCodeEvent as OpenCodeToolResultEvent;
const content: ContentBlock[] = [
{
type: 'tool_result',
tool_use_id: resultEvent.call_id,
content: resultEvent.output,
},
];
return {
type: 'assistant',
session_id: resultEvent.session_id,
message: {
role: 'assistant',
content,
},
};
}
case 'tool-error': {
const errorEvent = openCodeEvent as OpenCodeToolErrorEvent;
return {
type: 'error',
session_id: errorEvent.session_id,
error: errorEvent.error || 'Tool execution failed',
};
}
case 'start-step': {
case 'step_start': {
// Start step is informational - no message needed
return null;
}
case 'finish-step': {
case 'step_finish': {
const finishEvent = openCodeEvent as OpenCodeFinishStepEvent;
// Check if the step failed
if (finishEvent.success === false || finishEvent.error) {
// Check if the step failed (either has error field or reason is 'error')
if (finishEvent.part?.error || finishEvent.part?.reason === 'error') {
return {
type: 'error',
session_id: finishEvent.session_id,
error: finishEvent.error || 'Step execution failed',
session_id: finishEvent.sessionID,
error: finishEvent.part?.error || 'Step execution failed',
};
}
// Successful completion
const result: { type: 'result'; subtype: 'success'; session_id?: string; result?: string } =
{
type: 'result',
subtype: 'success',
};
if (finishEvent.sessionID) {
result.session_id = finishEvent.sessionID;
}
// Safely handle arbitrary result payloads from CLI: ensure we assign a string.
const rawResult =
(finishEvent.part && (finishEvent.part as Record<string, unknown>).result) ?? undefined;
if (rawResult !== undefined) {
result.result = typeof rawResult === 'string' ? rawResult : JSON.stringify(rawResult);
}
return result;
}
case 'tool_call': {
const toolEvent = openCodeEvent as OpenCodeToolCallEvent;
if (!toolEvent.part) {
return null;
}
// Generate a tool use ID if not provided
const toolUseId = toolEvent.part.call_id || generateToolUseId();
const content: ContentBlock[] = [
{
type: 'tool_use',
name: toolEvent.part.name,
tool_use_id: toolUseId,
input: toolEvent.part.args,
},
];
return {
type: 'result',
subtype: 'success',
session_id: finishEvent.session_id,
result: finishEvent.result,
type: 'assistant',
session_id: toolEvent.sessionID,
message: {
role: 'assistant',
content,
},
};
}
case 'tool_result': {
const resultEvent = openCodeEvent as OpenCodeToolResultEvent;
if (!resultEvent.part) {
return null;
}
const content: ContentBlock[] = [
{
type: 'tool_result',
tool_use_id: resultEvent.part.call_id,
content: resultEvent.part.output,
},
];
return {
type: 'assistant',
session_id: resultEvent.sessionID,
message: {
role: 'assistant',
content,
},
};
}
case 'tool_error': {
const errorEvent = openCodeEvent as OpenCodeToolErrorEvent;
return {
type: 'error',
session_id: errorEvent.sessionID,
error: errorEvent.part?.error || 'Tool execution failed',
};
}

View File

@@ -6,26 +6,57 @@ import { createLogger } from '@automaker/utils';
const logger = createLogger('SpecRegeneration');
// Shared state for tracking generation status - private
let isRunning = false;
let currentAbortController: AbortController | null = null;
// Shared state for tracking generation status - scoped by project path
const runningProjects = new Map<string, boolean>();
const abortControllers = new Map<string, AbortController>();
/**
* Get the current running state
* Get the running state for a specific project
*/
export function getSpecRegenerationStatus(): {
export function getSpecRegenerationStatus(projectPath?: string): {
isRunning: boolean;
currentAbortController: AbortController | null;
projectPath?: string;
} {
return { isRunning, currentAbortController };
if (projectPath) {
return {
isRunning: runningProjects.get(projectPath) || false,
currentAbortController: abortControllers.get(projectPath) || null,
projectPath,
};
}
// Fallback: check if any project is running (for backward compatibility)
const isAnyRunning = Array.from(runningProjects.values()).some((running) => running);
return { isRunning: isAnyRunning, currentAbortController: null };
}
/**
* Set the running state and abort controller
* Get the project path that is currently running (if any)
*/
export function setRunningState(running: boolean, controller: AbortController | null = null): void {
isRunning = running;
currentAbortController = controller;
export function getRunningProjectPath(): string | null {
for (const [path, running] of runningProjects.entries()) {
if (running) return path;
}
return null;
}
/**
* Set the running state and abort controller for a specific project
*/
export function setRunningState(
projectPath: string,
running: boolean,
controller: AbortController | null = null
): void {
if (running) {
runningProjects.set(projectPath, true);
if (controller) {
abortControllers.set(projectPath, controller);
}
} else {
runningProjects.delete(projectPath);
abortControllers.delete(projectPath);
}
}
/**

View File

@@ -47,17 +47,17 @@ export function createCreateHandler(events: EventEmitter) {
return;
}
const { isRunning } = getSpecRegenerationStatus();
const { isRunning } = getSpecRegenerationStatus(projectPath);
if (isRunning) {
logger.warn('Generation already running, rejecting request');
res.json({ success: false, error: 'Spec generation already running' });
logger.warn('Generation already running for project:', projectPath);
res.json({ success: false, error: 'Spec generation already running for this project' });
return;
}
logAuthStatus('Before starting generation');
const abortController = new AbortController();
setRunningState(true, abortController);
setRunningState(projectPath, true, abortController);
logger.info('Starting background generation task...');
// Start generation in background
@@ -80,7 +80,7 @@ export function createCreateHandler(events: EventEmitter) {
})
.finally(() => {
logger.info('Generation task finished (success or error)');
setRunningState(false, null);
setRunningState(projectPath, false, null);
});
logger.info('Returning success response (generation running in background)');

View File

@@ -40,17 +40,17 @@ export function createGenerateFeaturesHandler(
return;
}
const { isRunning } = getSpecRegenerationStatus();
const { isRunning } = getSpecRegenerationStatus(projectPath);
if (isRunning) {
logger.warn('Generation already running, rejecting request');
res.json({ success: false, error: 'Generation already running' });
logger.warn('Generation already running for project:', projectPath);
res.json({ success: false, error: 'Generation already running for this project' });
return;
}
logAuthStatus('Before starting feature generation');
const abortController = new AbortController();
setRunningState(true, abortController);
setRunningState(projectPath, true, abortController);
logger.info('Starting background feature generation task...');
generateFeaturesFromSpec(projectPath, events, abortController, maxFeatures, settingsService)
@@ -63,7 +63,7 @@ export function createGenerateFeaturesHandler(
})
.finally(() => {
logger.info('Feature generation task finished (success or error)');
setRunningState(false, null);
setRunningState(projectPath, false, null);
});
logger.info('Returning success response (generation running in background)');

View File

@@ -48,17 +48,17 @@ export function createGenerateHandler(events: EventEmitter, settingsService?: Se
return;
}
const { isRunning } = getSpecRegenerationStatus();
const { isRunning } = getSpecRegenerationStatus(projectPath);
if (isRunning) {
logger.warn('Generation already running, rejecting request');
res.json({ success: false, error: 'Spec generation already running' });
logger.warn('Generation already running for project:', projectPath);
res.json({ success: false, error: 'Spec generation already running for this project' });
return;
}
logAuthStatus('Before starting generation');
const abortController = new AbortController();
setRunningState(true, abortController);
setRunningState(projectPath, true, abortController);
logger.info('Starting background generation task...');
generateSpec(
@@ -81,7 +81,7 @@ export function createGenerateHandler(events: EventEmitter, settingsService?: Se
})
.finally(() => {
logger.info('Generation task finished (success or error)');
setRunningState(false, null);
setRunningState(projectPath, false, null);
});
logger.info('Returning success response (generation running in background)');

View File

@@ -6,10 +6,11 @@ import type { Request, Response } from 'express';
import { getSpecRegenerationStatus, getErrorMessage } from '../common.js';
export function createStatusHandler() {
return async (_req: Request, res: Response): Promise<void> => {
return async (req: Request, res: Response): Promise<void> => {
try {
const { isRunning } = getSpecRegenerationStatus();
res.json({ success: true, isRunning });
const projectPath = req.query.projectPath as string | undefined;
const { isRunning } = getSpecRegenerationStatus(projectPath);
res.json({ success: true, isRunning, projectPath });
} catch (error) {
res.status(500).json({ success: false, error: getErrorMessage(error) });
}

View File

@@ -6,13 +6,16 @@ import type { Request, Response } from 'express';
import { getSpecRegenerationStatus, setRunningState, getErrorMessage } from '../common.js';
export function createStopHandler() {
return async (_req: Request, res: Response): Promise<void> => {
return async (req: Request, res: Response): Promise<void> => {
try {
const { currentAbortController } = getSpecRegenerationStatus();
const { projectPath } = req.body as { projectPath?: string };
const { currentAbortController } = getSpecRegenerationStatus(projectPath);
if (currentAbortController) {
currentAbortController.abort();
}
setRunningState(false, null);
if (projectPath) {
setRunningState(projectPath, false, null);
}
res.json({ success: true });
} catch (error) {
res.status(500).json({ success: false, error: getErrorMessage(error) });

View File

@@ -17,6 +17,7 @@ import { createAnalyzeProjectHandler } from './routes/analyze-project.js';
import { createFollowUpFeatureHandler } from './routes/follow-up-feature.js';
import { createCommitFeatureHandler } from './routes/commit-feature.js';
import { createApprovePlanHandler } from './routes/approve-plan.js';
import { createResumeInterruptedHandler } from './routes/resume-interrupted.js';
export function createAutoModeRoutes(autoModeService: AutoModeService): Router {
const router = Router();
@@ -63,6 +64,11 @@ export function createAutoModeRoutes(autoModeService: AutoModeService): Router {
validatePathParams('projectPath'),
createApprovePlanHandler(autoModeService)
);
router.post(
'/resume-interrupted',
validatePathParams('projectPath'),
createResumeInterruptedHandler(autoModeService)
);
return router;
}

View File

@@ -0,0 +1,42 @@
/**
* Resume Interrupted Features Handler
*
* Checks for features that were interrupted (in pipeline steps or in_progress)
* when the server was restarted and resumes them.
*/
import type { Request, Response } from 'express';
import { createLogger } from '@automaker/utils';
import type { AutoModeService } from '../../../services/auto-mode-service.js';
const logger = createLogger('ResumeInterrupted');
interface ResumeInterruptedRequest {
projectPath: string;
}
export function createResumeInterruptedHandler(autoModeService: AutoModeService) {
return async (req: Request, res: Response): Promise<void> => {
const { projectPath } = req.body as ResumeInterruptedRequest;
if (!projectPath) {
res.status(400).json({ error: 'Project path is required' });
return;
}
logger.info(`Checking for interrupted features in ${projectPath}`);
try {
await autoModeService.resumeInterruptedFeatures(projectPath);
res.json({
success: true,
message: 'Resume check completed',
});
} catch (error) {
logger.error('Error resuming interrupted features:', error);
res.status(500).json({
error: error instanceof Error ? error.message : 'Unknown error',
});
}
};
}

View File

@@ -1,17 +1,21 @@
import { Router, Request, Response } from 'express';
import { CodexUsageService } from '../../services/codex-usage-service.js';
import { CodexModelCacheService } from '../../services/codex-model-cache-service.js';
import { createLogger } from '@automaker/utils';
const logger = createLogger('Codex');
export function createCodexRoutes(service: CodexUsageService): Router {
export function createCodexRoutes(
usageService: CodexUsageService,
modelCacheService: CodexModelCacheService
): Router {
const router = Router();
// Get current usage (attempts to fetch from Codex CLI)
router.get('/usage', async (req: Request, res: Response) => {
router.get('/usage', async (_req: Request, res: Response) => {
try {
// Check if Codex CLI is available first
const isAvailable = await service.isAvailable();
const isAvailable = await usageService.isAvailable();
if (!isAvailable) {
// IMPORTANT: This endpoint is behind Automaker session auth already.
// Use a 200 + error payload for Codex CLI issues so the UI doesn't
@@ -23,7 +27,7 @@ export function createCodexRoutes(service: CodexUsageService): Router {
return;
}
const usage = await service.fetchUsageData();
const usage = await usageService.fetchUsageData();
res.json(usage);
} catch (error) {
const message = error instanceof Error ? error.message : 'Unknown error';
@@ -52,5 +56,35 @@ export function createCodexRoutes(service: CodexUsageService): Router {
}
});
// Get available Codex models (cached)
router.get('/models', async (req: Request, res: Response) => {
try {
const forceRefresh = req.query.refresh === 'true';
const { models, cachedAt } = await modelCacheService.getModelsWithMetadata(forceRefresh);
if (models.length === 0) {
res.status(503).json({
success: false,
error: 'Codex CLI not available or not authenticated',
message: "Please install Codex CLI and run 'codex login' to authenticate",
});
return;
}
res.json({
success: true,
models,
cachedAt,
});
} catch (error) {
logger.error('Error fetching models:', error);
const message = error instanceof Error ? error.message : 'Unknown error';
res.status(500).json({
success: false,
error: message,
});
}
});
return router;
}

View File

@@ -188,6 +188,7 @@ export function createEnhanceHandler(
technical: prompts.enhancement.technicalSystemPrompt,
simplify: prompts.enhancement.simplifySystemPrompt,
acceptance: prompts.enhancement.acceptanceSystemPrompt,
'ux-reviewer': prompts.enhancement.uxReviewerSystemPrompt,
};
const systemPrompt = systemPromptMap[validMode];

View File

@@ -10,6 +10,7 @@ import { createGetHandler } from './routes/get.js';
import { createCreateHandler } from './routes/create.js';
import { createUpdateHandler } from './routes/update.js';
import { createBulkUpdateHandler } from './routes/bulk-update.js';
import { createBulkDeleteHandler } from './routes/bulk-delete.js';
import { createDeleteHandler } from './routes/delete.js';
import { createAgentOutputHandler, createRawOutputHandler } from './routes/agent-output.js';
import { createGenerateTitleHandler } from './routes/generate-title.js';
@@ -26,6 +27,11 @@ export function createFeaturesRoutes(featureLoader: FeatureLoader): Router {
validatePathParams('projectPath'),
createBulkUpdateHandler(featureLoader)
);
router.post(
'/bulk-delete',
validatePathParams('projectPath'),
createBulkDeleteHandler(featureLoader)
);
router.post('/delete', validatePathParams('projectPath'), createDeleteHandler(featureLoader));
router.post('/agent-output', createAgentOutputHandler(featureLoader));
router.post('/raw-output', createRawOutputHandler(featureLoader));

View File

@@ -0,0 +1,61 @@
/**
* POST /bulk-delete endpoint - Delete multiple features at once
*/
import type { Request, Response } from 'express';
import { FeatureLoader } from '../../../services/feature-loader.js';
import { getErrorMessage, logError } from '../common.js';
interface BulkDeleteRequest {
projectPath: string;
featureIds: string[];
}
interface BulkDeleteResult {
featureId: string;
success: boolean;
error?: string;
}
export function createBulkDeleteHandler(featureLoader: FeatureLoader) {
return async (req: Request, res: Response): Promise<void> => {
try {
const { projectPath, featureIds } = req.body as BulkDeleteRequest;
if (!projectPath || !featureIds || !Array.isArray(featureIds) || featureIds.length === 0) {
res.status(400).json({
success: false,
error: 'projectPath and featureIds (non-empty array) are required',
});
return;
}
const results = await Promise.all(
featureIds.map(async (featureId) => {
const success = await featureLoader.delete(projectPath, featureId);
if (success) {
return { featureId, success: true };
}
return {
featureId,
success: false,
error: 'Deletion failed. Check server logs for details.',
};
})
);
const successCount = results.reduce((count, r) => count + (r.success ? 1 : 0), 0);
const failureCount = results.length - successCount;
res.json({
success: failureCount === 0,
deletedCount: successCount,
failedCount: failureCount,
results,
});
} catch (error) {
logError(error, 'Bulk delete features failed');
res.status(500).json({ success: false, error: getErrorMessage(error) });
}
};
}

View File

@@ -10,14 +10,21 @@ import { getErrorMessage, logError } from '../common.js';
export function createUpdateHandler(featureLoader: FeatureLoader) {
return async (req: Request, res: Response): Promise<void> => {
try {
const { projectPath, featureId, updates, descriptionHistorySource, enhancementMode } =
req.body as {
projectPath: string;
featureId: string;
updates: Partial<Feature>;
descriptionHistorySource?: 'enhance' | 'edit';
enhancementMode?: 'improve' | 'technical' | 'simplify' | 'acceptance';
};
const {
projectPath,
featureId,
updates,
descriptionHistorySource,
enhancementMode,
preEnhancementDescription,
} = req.body as {
projectPath: string;
featureId: string;
updates: Partial<Feature>;
descriptionHistorySource?: 'enhance' | 'edit';
enhancementMode?: 'improve' | 'technical' | 'simplify' | 'acceptance' | 'ux-reviewer';
preEnhancementDescription?: string;
};
if (!projectPath || !featureId || !updates) {
res.status(400).json({
@@ -32,7 +39,8 @@ export function createUpdateHandler(featureLoader: FeatureLoader) {
featureId,
updates,
descriptionHistorySource,
enhancementMode
enhancementMode,
preEnhancementDescription
);
res.json({ success: true, feature: updated });
} catch (error) {

View File

@@ -2,18 +2,23 @@
* POST /list endpoint - List all git worktrees
*
* Returns actual git worktrees from `git worktree list`.
* Also scans .worktrees/ directory to discover worktrees that may have been
* created externally or whose git state was corrupted.
* Does NOT include tracked branches - only real worktrees with separate directories.
*/
import type { Request, Response } from 'express';
import { exec } from 'child_process';
import { promisify } from 'util';
import path from 'path';
import * as secureFs from '../../../lib/secure-fs.js';
import { isGitRepo } from '@automaker/git-utils';
import { getErrorMessage, logError, normalizePath } from '../common.js';
import { readAllWorktreeMetadata, type WorktreePRInfo } from '../../../lib/worktree-metadata.js';
import { createLogger } from '@automaker/utils';
const execAsync = promisify(exec);
const logger = createLogger('Worktree');
interface WorktreeInfo {
path: string;
@@ -35,6 +40,87 @@ async function getCurrentBranch(cwd: string): Promise<string> {
}
}
/**
* Scan the .worktrees directory to discover worktrees that may exist on disk
* but are not registered with git (e.g., created externally or corrupted state).
*/
async function scanWorktreesDirectory(
projectPath: string,
knownWorktreePaths: Set<string>
): Promise<Array<{ path: string; branch: string }>> {
const discovered: Array<{ path: string; branch: string }> = [];
const worktreesDir = path.join(projectPath, '.worktrees');
try {
// Check if .worktrees directory exists
await secureFs.access(worktreesDir);
} catch {
// .worktrees directory doesn't exist
return discovered;
}
try {
const entries = await secureFs.readdir(worktreesDir, { withFileTypes: true });
for (const entry of entries) {
if (!entry.isDirectory()) continue;
const worktreePath = path.join(worktreesDir, entry.name);
const normalizedPath = normalizePath(worktreePath);
// Skip if already known from git worktree list
if (knownWorktreePaths.has(normalizedPath)) continue;
// Check if this is a valid git repository
const gitPath = path.join(worktreePath, '.git');
try {
const gitStat = await secureFs.stat(gitPath);
// Git worktrees have a .git FILE (not directory) that points to the parent repo
// Regular repos have a .git DIRECTORY
if (gitStat.isFile() || gitStat.isDirectory()) {
// Try to get the branch name
const branch = await getCurrentBranch(worktreePath);
if (branch) {
logger.info(
`Discovered worktree in .worktrees/ not in git worktree list: ${entry.name} (branch: ${branch})`
);
discovered.push({
path: normalizedPath,
branch,
});
} else {
// Try to get branch from HEAD if branch --show-current fails (detached HEAD)
try {
const { stdout: headRef } = await execAsync('git rev-parse --abbrev-ref HEAD', {
cwd: worktreePath,
});
const headBranch = headRef.trim();
if (headBranch && headBranch !== 'HEAD') {
logger.info(
`Discovered worktree in .worktrees/ not in git worktree list: ${entry.name} (branch: ${headBranch})`
);
discovered.push({
path: normalizedPath,
branch: headBranch,
});
}
} catch {
// Can't determine branch, skip this directory
}
}
}
} catch {
// Not a git repo, skip
}
}
} catch (error) {
logger.warn(`Failed to scan .worktrees directory: ${getErrorMessage(error)}`);
}
return discovered;
}
export function createListHandler() {
return async (req: Request, res: Response): Promise<void> => {
try {
@@ -116,6 +202,22 @@ export function createListHandler() {
}
}
// Scan .worktrees directory to discover worktrees that exist on disk
// but are not registered with git (e.g., created externally)
const knownPaths = new Set(worktrees.map((w) => w.path));
const discoveredWorktrees = await scanWorktreesDirectory(projectPath, knownPaths);
// Add discovered worktrees to the list
for (const discovered of discoveredWorktrees) {
worktrees.push({
path: discovered.path,
branch: discovered.branch,
isMain: false,
isCurrent: discovered.branch === currentBranch,
hasWorktree: true,
});
}
// Read all worktree metadata to get PR info
const allMetadata = await readAllWorktreeMetadata(projectPath);

View File

@@ -31,7 +31,13 @@ import {
const logger = createLogger('AutoMode');
import { resolveModelString, resolvePhaseModel, DEFAULT_MODELS } from '@automaker/model-resolver';
import { resolveDependencies, areDependenciesSatisfied } from '@automaker/dependency-resolver';
import { getFeatureDir, getAutomakerDir, getFeaturesDir } from '@automaker/platform';
import {
getFeatureDir,
getAutomakerDir,
getFeaturesDir,
getExecutionStatePath,
ensureAutomakerDir,
} from '@automaker/platform';
import { exec } from 'child_process';
import { promisify } from 'util';
import path from 'path';
@@ -201,6 +207,29 @@ interface AutoModeConfig {
projectPath: string;
}
/**
* Execution state for recovery after server restart
* Tracks which features were running and auto-loop configuration
*/
interface ExecutionState {
version: 1;
autoLoopWasRunning: boolean;
maxConcurrency: number;
projectPath: string;
runningFeatureIds: string[];
savedAt: string;
}
// Default empty execution state
const DEFAULT_EXECUTION_STATE: ExecutionState = {
version: 1,
autoLoopWasRunning: false,
maxConcurrency: 3,
projectPath: '',
runningFeatureIds: [],
savedAt: '',
};
// Constants for consecutive failure tracking
const CONSECUTIVE_FAILURE_THRESHOLD = 3; // Pause after 3 consecutive failures
const FAILURE_WINDOW_MS = 60000; // Failures within 1 minute count as consecutive
@@ -322,6 +351,9 @@ export class AutoModeService {
projectPath,
});
// Save execution state for recovery after restart
await this.saveExecutionState(projectPath);
// Note: Memory folder initialization is now handled by loadContextFiles
// Run the loop in the background
@@ -390,17 +422,23 @@ export class AutoModeService {
*/
async stopAutoLoop(): Promise<number> {
const wasRunning = this.autoLoopRunning;
const projectPath = this.config?.projectPath;
this.autoLoopRunning = false;
if (this.autoLoopAbortController) {
this.autoLoopAbortController.abort();
this.autoLoopAbortController = null;
}
// Clear execution state when auto-loop is explicitly stopped
if (projectPath) {
await this.clearExecutionState(projectPath);
}
// Emit stop event immediately when user explicitly stops
if (wasRunning) {
this.emitAutoModeEvent('auto_mode_stopped', {
message: 'Auto mode stopped',
projectPath: this.config?.projectPath,
projectPath,
});
}
@@ -441,6 +479,11 @@ export class AutoModeService {
};
this.runningFeatures.set(featureId, tempRunningFeature);
// Save execution state when feature starts
if (isAutoMode) {
await this.saveExecutionState(projectPath);
}
try {
// Validate that project path is allowed using centralized validation
validateWorkingDirectory(projectPath);
@@ -695,6 +738,11 @@ export class AutoModeService {
`Pending approvals at cleanup: ${Array.from(this.pendingApprovals.keys()).join(', ') || 'none'}`
);
this.runningFeatures.delete(featureId);
// Update execution state after feature completes
if (this.autoLoopRunning && projectPath) {
await this.saveExecutionState(projectPath);
}
}
}
@@ -2950,6 +2998,149 @@ Begin implementing task ${task.id} now.`;
});
}
// ============================================================================
// Execution State Persistence - For recovery after server restart
// ============================================================================
/**
* Save execution state to disk for recovery after server restart
*/
private async saveExecutionState(projectPath: string): Promise<void> {
try {
await ensureAutomakerDir(projectPath);
const statePath = getExecutionStatePath(projectPath);
const state: ExecutionState = {
version: 1,
autoLoopWasRunning: this.autoLoopRunning,
maxConcurrency: this.config?.maxConcurrency ?? 3,
projectPath,
runningFeatureIds: Array.from(this.runningFeatures.keys()),
savedAt: new Date().toISOString(),
};
await secureFs.writeFile(statePath, JSON.stringify(state, null, 2), 'utf-8');
logger.info(`Saved execution state: ${state.runningFeatureIds.length} running features`);
} catch (error) {
logger.error('Failed to save execution state:', error);
}
}
/**
* Load execution state from disk
*/
private async loadExecutionState(projectPath: string): Promise<ExecutionState> {
try {
const statePath = getExecutionStatePath(projectPath);
const content = (await secureFs.readFile(statePath, 'utf-8')) as string;
const state = JSON.parse(content) as ExecutionState;
return state;
} catch (error) {
if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
logger.error('Failed to load execution state:', error);
}
return DEFAULT_EXECUTION_STATE;
}
}
/**
* Clear execution state (called on successful shutdown or when auto-loop stops)
*/
private async clearExecutionState(projectPath: string): Promise<void> {
try {
const statePath = getExecutionStatePath(projectPath);
await secureFs.unlink(statePath);
logger.info('Cleared execution state');
} catch (error) {
if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
logger.error('Failed to clear execution state:', error);
}
}
}
/**
* Check for and resume interrupted features after server restart
* This should be called during server initialization
*/
async resumeInterruptedFeatures(projectPath: string): Promise<void> {
logger.info('Checking for interrupted features to resume...');
// Load all features and find those that were interrupted
const featuresDir = getFeaturesDir(projectPath);
try {
const entries = await secureFs.readdir(featuresDir, { withFileTypes: true });
const interruptedFeatures: Feature[] = [];
for (const entry of entries) {
if (entry.isDirectory()) {
const featurePath = path.join(featuresDir, entry.name, 'feature.json');
try {
const data = (await secureFs.readFile(featurePath, 'utf-8')) as string;
const feature = JSON.parse(data) as Feature;
// Check if feature was interrupted (in_progress or pipeline_*)
if (
feature.status === 'in_progress' ||
(feature.status && feature.status.startsWith('pipeline_'))
) {
// Verify it has existing context (agent-output.md)
const featureDir = getFeatureDir(projectPath, feature.id);
const contextPath = path.join(featureDir, 'agent-output.md');
try {
await secureFs.access(contextPath);
interruptedFeatures.push(feature);
logger.info(
`Found interrupted feature: ${feature.id} (${feature.title}) - status: ${feature.status}`
);
} catch {
// No context file, skip this feature - it will be restarted fresh
logger.info(`Interrupted feature ${feature.id} has no context, will restart fresh`);
}
}
} catch {
// Skip invalid features
}
}
}
if (interruptedFeatures.length === 0) {
logger.info('No interrupted features found');
return;
}
logger.info(`Found ${interruptedFeatures.length} interrupted feature(s) to resume`);
// Emit event to notify UI
this.emitAutoModeEvent('auto_mode_resuming_features', {
message: `Resuming ${interruptedFeatures.length} interrupted feature(s) after server restart`,
projectPath,
featureIds: interruptedFeatures.map((f) => f.id),
features: interruptedFeatures.map((f) => ({
id: f.id,
title: f.title,
status: f.status,
})),
});
// Resume each interrupted feature
for (const feature of interruptedFeatures) {
try {
logger.info(`Resuming feature: ${feature.id} (${feature.title})`);
// Use resumeFeature which will detect the existing context and continue
await this.resumeFeature(projectPath, feature.id, true);
} catch (error) {
logger.error(`Failed to resume feature ${feature.id}:`, error);
// Continue with other features
}
}
} catch (error) {
if ((error as NodeJS.ErrnoException).code === 'ENOENT') {
logger.info('No features directory found, nothing to resume');
} else {
logger.error('Error checking for interrupted features:', error);
}
}
}
/**
* Extract and record learnings from a completed feature
* Uses a quick Claude call to identify important decisions and patterns

View File

@@ -2,6 +2,7 @@ import { spawn } from 'child_process';
import * as os from 'os';
import * as pty from 'node-pty';
import { ClaudeUsage } from '../routes/claude/types.js';
import { createLogger } from '@automaker/utils';
/**
* Claude Usage Service
@@ -14,6 +15,8 @@ import { ClaudeUsage } from '../routes/claude/types.js';
* - macOS: Uses 'expect' command for PTY
* - Windows/Linux: Uses node-pty for PTY
*/
const logger = createLogger('ClaudeUsage');
export class ClaudeUsageService {
private claudeBinary = 'claude';
private timeout = 30000; // 30 second timeout
@@ -164,21 +167,40 @@ export class ClaudeUsageService {
const shell = this.isWindows ? 'cmd.exe' : '/bin/sh';
const args = this.isWindows ? ['/c', 'claude', '/usage'] : ['-c', 'claude /usage'];
const ptyProcess = pty.spawn(shell, args, {
name: 'xterm-256color',
cols: 120,
rows: 30,
cwd: workingDirectory,
env: {
...process.env,
TERM: 'xterm-256color',
} as Record<string, string>,
});
let ptyProcess: any = null;
try {
ptyProcess = pty.spawn(shell, args, {
name: 'xterm-256color',
cols: 120,
rows: 30,
cwd: workingDirectory,
env: {
...process.env,
TERM: 'xterm-256color',
} as Record<string, string>,
});
} catch (spawnError) {
// pty.spawn() can throw synchronously if the native module fails to load
// or if PTY is not available in the current environment (e.g., containers without /dev/pts)
const errorMessage = spawnError instanceof Error ? spawnError.message : String(spawnError);
logger.error('[executeClaudeUsageCommandPty] Failed to spawn PTY:', errorMessage);
// Return a user-friendly error instead of crashing
reject(
new Error(
`Unable to access terminal: ${errorMessage}. Claude CLI may not be available or PTY support is limited in this environment.`
)
);
return;
}
const timeoutId = setTimeout(() => {
if (!settled) {
settled = true;
ptyProcess.kill();
if (ptyProcess && !ptyProcess.killed) {
ptyProcess.kill();
}
// Don't fail if we have data - return it instead
if (output.includes('Current session')) {
resolve(output);
@@ -188,7 +210,7 @@ export class ClaudeUsageService {
}
}, this.timeout);
ptyProcess.onData((data) => {
ptyProcess.onData((data: string) => {
output += data;
// Check if we've seen the usage data (look for "Current session")
@@ -196,12 +218,12 @@ export class ClaudeUsageService {
hasSeenUsageData = true;
// Wait for full output, then send escape to exit
setTimeout(() => {
if (!settled) {
if (!settled && ptyProcess && !ptyProcess.killed) {
ptyProcess.write('\x1b'); // Send escape key
// Fallback: if ESC doesn't exit (Linux), use SIGTERM after 2s
setTimeout(() => {
if (!settled) {
if (!settled && ptyProcess && !ptyProcess.killed) {
ptyProcess.kill('SIGTERM');
}
}, 2000);
@@ -212,14 +234,14 @@ export class ClaudeUsageService {
// Fallback: if we see "Esc to cancel" but haven't seen usage data yet
if (!hasSeenUsageData && output.includes('Esc to cancel')) {
setTimeout(() => {
if (!settled) {
if (!settled && ptyProcess && !ptyProcess.killed) {
ptyProcess.write('\x1b'); // Send escape key
}
}, 3000);
}
});
ptyProcess.onExit(({ exitCode }) => {
ptyProcess.onExit(({ exitCode }: { exitCode: number }) => {
clearTimeout(timeoutId);
if (settled) return;
settled = true;

View File

@@ -0,0 +1,212 @@
import { spawn, type ChildProcess } from 'child_process';
import readline from 'readline';
import { findCodexCliPath } from '@automaker/platform';
import { createLogger } from '@automaker/utils';
import type {
AppServerModelResponse,
AppServerAccountResponse,
AppServerRateLimitsResponse,
JsonRpcRequest,
} from '@automaker/types';
const logger = createLogger('CodexAppServer');
/**
* CodexAppServerService
*
* Centralized service for communicating with Codex CLI's app-server via JSON-RPC protocol.
* Handles process spawning, JSON-RPC messaging, and cleanup.
*
* Connection strategy: Spawn on-demand (new process for each method call)
*/
export class CodexAppServerService {
private cachedCliPath: string | null = null;
/**
* Check if Codex CLI is available on the system
*/
async isAvailable(): Promise<boolean> {
this.cachedCliPath = await findCodexCliPath();
return Boolean(this.cachedCliPath);
}
/**
* Fetch available models from app-server
*/
async getModels(): Promise<AppServerModelResponse | null> {
const result = await this.executeJsonRpc<AppServerModelResponse>((sendRequest) => {
return sendRequest('model/list', {});
});
if (result) {
logger.info(`[getModels] ✓ Fetched ${result.data.length} models`);
}
return result;
}
/**
* Fetch account information from app-server
*/
async getAccount(): Promise<AppServerAccountResponse | null> {
return this.executeJsonRpc<AppServerAccountResponse>((sendRequest) => {
return sendRequest('account/read', { refreshToken: false });
});
}
/**
* Fetch rate limits from app-server
*/
async getRateLimits(): Promise<AppServerRateLimitsResponse | null> {
return this.executeJsonRpc<AppServerRateLimitsResponse>((sendRequest) => {
return sendRequest('account/rateLimits/read', {});
});
}
/**
* Execute JSON-RPC requests via Codex app-server
*
* This method:
* 1. Spawns a new `codex app-server` process
* 2. Handles JSON-RPC initialization handshake
* 3. Executes user-provided requests
* 4. Cleans up the process
*
* @param requestFn - Function that receives sendRequest helper and returns a promise
* @returns Result of the JSON-RPC request or null on failure
*/
private async executeJsonRpc<T>(
requestFn: (sendRequest: <R>(method: string, params?: unknown) => Promise<R>) => Promise<T>
): Promise<T | null> {
let childProcess: ChildProcess | null = null;
try {
const cliPath = this.cachedCliPath || (await findCodexCliPath());
if (!cliPath) {
return null;
}
// On Windows, .cmd files must be run through shell
const needsShell = process.platform === 'win32' && cliPath.toLowerCase().endsWith('.cmd');
childProcess = spawn(cliPath, ['app-server'], {
cwd: process.cwd(),
env: {
...process.env,
TERM: 'dumb',
},
stdio: ['pipe', 'pipe', 'pipe'],
shell: needsShell,
});
if (!childProcess.stdin || !childProcess.stdout) {
throw new Error('Failed to create stdio pipes');
}
// Setup readline for reading JSONL responses
const rl = readline.createInterface({
input: childProcess.stdout,
crlfDelay: Infinity,
});
// Message ID counter for JSON-RPC
let messageId = 0;
const pendingRequests = new Map<
number,
{
resolve: (value: unknown) => void;
reject: (error: Error) => void;
timeout: NodeJS.Timeout;
}
>();
// Process incoming messages
rl.on('line', (line) => {
if (!line.trim()) return;
try {
const message = JSON.parse(line);
// Handle response to our request
if ('id' in message && message.id !== undefined) {
const pending = pendingRequests.get(message.id);
if (pending) {
clearTimeout(pending.timeout);
pendingRequests.delete(message.id);
if (message.error) {
pending.reject(new Error(message.error.message || 'Unknown error'));
} else {
pending.resolve(message.result);
}
}
}
// Ignore notifications (no id field)
} catch {
// Ignore parse errors for non-JSON lines
}
});
// Helper to send JSON-RPC request and wait for response
const sendRequest = <R>(method: string, params?: unknown): Promise<R> => {
return new Promise((resolve, reject) => {
const id = ++messageId;
const request: JsonRpcRequest = {
method,
id,
params: params ?? {},
};
// Set timeout for request (10 seconds)
const timeout = setTimeout(() => {
pendingRequests.delete(id);
reject(new Error(`Request timeout: ${method}`));
}, 10000);
pendingRequests.set(id, {
resolve: resolve as (value: unknown) => void,
reject,
timeout,
});
childProcess!.stdin!.write(JSON.stringify(request) + '\n');
});
};
// Helper to send notification (no response expected)
const sendNotification = (method: string, params?: unknown): void => {
const notification = params ? { method, params } : { method };
childProcess!.stdin!.write(JSON.stringify(notification) + '\n');
};
// 1. Initialize the app-server
await sendRequest('initialize', {
clientInfo: {
name: 'automaker',
title: 'AutoMaker',
version: '1.0.0',
},
});
// 2. Send initialized notification
sendNotification('initialized');
// 3. Execute user-provided requests
const result = await requestFn(sendRequest);
// Clean up
rl.close();
childProcess.kill('SIGTERM');
return result;
} catch (error) {
logger.error('[executeJsonRpc] Failed:', error);
return null;
} finally {
// Ensure process is killed
if (childProcess && !childProcess.killed) {
childProcess.kill('SIGTERM');
}
}
}
}

View File

@@ -0,0 +1,258 @@
import path from 'path';
import { secureFs } from '@automaker/platform';
import { createLogger } from '@automaker/utils';
import type { AppServerModel } from '@automaker/types';
import type { CodexAppServerService } from './codex-app-server-service.js';
const logger = createLogger('CodexModelCache');
/**
* Codex model with UI-compatible format
*/
export interface CodexModel {
id: string;
label: string;
description: string;
hasThinking: boolean;
supportsVision: boolean;
tier: 'premium' | 'standard' | 'basic';
isDefault: boolean;
}
/**
* Cache structure stored on disk
*/
interface CodexModelCache {
models: CodexModel[];
cachedAt: number;
ttl: number;
}
/**
* CodexModelCacheService
*
* Caches Codex models fetched from app-server with TTL-based invalidation and disk persistence.
*
* Features:
* - 1-hour TTL (configurable)
* - Atomic file writes (temp file + rename)
* - Thread-safe (deduplicates concurrent refresh requests)
* - Auto-bootstrap on service creation
* - Graceful fallback (returns empty array on errors)
*/
export class CodexModelCacheService {
private cacheFilePath: string;
private ttl: number;
private appServerService: CodexAppServerService;
private inFlightRefresh: Promise<CodexModel[]> | null = null;
constructor(
dataDir: string,
appServerService: CodexAppServerService,
ttl: number = 3600000 // 1 hour default
) {
this.cacheFilePath = path.join(dataDir, 'codex-models-cache.json');
this.ttl = ttl;
this.appServerService = appServerService;
}
/**
* Get models from cache or fetch if stale
*
* @param forceRefresh - If true, bypass cache and fetch fresh data
* @returns Array of Codex models (empty array if unavailable)
*/
async getModels(forceRefresh = false): Promise<CodexModel[]> {
// If force refresh, skip cache
if (forceRefresh) {
return this.refreshModels();
}
// Try to load from cache
const cached = await this.loadFromCache();
if (cached) {
const age = Date.now() - cached.cachedAt;
const isStale = age > cached.ttl;
if (!isStale) {
logger.info(
`[getModels] ✓ Using cached models (${cached.models.length} models, age: ${Math.round(age / 60000)}min)`
);
return cached.models;
}
}
// Cache is stale or missing, refresh
return this.refreshModels();
}
/**
* Get models with cache metadata
*
* @param forceRefresh - If true, bypass cache and fetch fresh data
* @returns Object containing models and cache timestamp
*/
async getModelsWithMetadata(
forceRefresh = false
): Promise<{ models: CodexModel[]; cachedAt: number }> {
const models = await this.getModels(forceRefresh);
// Try to get the actual cache timestamp
const cached = await this.loadFromCache();
const cachedAt = cached?.cachedAt ?? Date.now();
return { models, cachedAt };
}
/**
* Refresh models from app-server and update cache
*
* Thread-safe: Deduplicates concurrent refresh requests
*/
async refreshModels(): Promise<CodexModel[]> {
// Deduplicate concurrent refresh requests
if (this.inFlightRefresh) {
return this.inFlightRefresh;
}
// Start new refresh
this.inFlightRefresh = this.doRefresh();
try {
const models = await this.inFlightRefresh;
return models;
} finally {
this.inFlightRefresh = null;
}
}
/**
* Clear the cache file
*/
async clearCache(): Promise<void> {
logger.info('[clearCache] Clearing cache...');
try {
await secureFs.unlink(this.cacheFilePath);
logger.info('[clearCache] Cache cleared');
} catch (error) {
if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
logger.error('[clearCache] Failed to clear cache:', error);
}
}
}
/**
* Internal method to perform the actual refresh
*/
private async doRefresh(): Promise<CodexModel[]> {
try {
// Check if app-server is available
const isAvailable = await this.appServerService.isAvailable();
if (!isAvailable) {
return [];
}
// Fetch models from app-server
const response = await this.appServerService.getModels();
if (!response || !response.data) {
return [];
}
// Transform models to UI format
const models = response.data.map((model) => this.transformModel(model));
// Save to cache
await this.saveToCache(models);
logger.info(`[refreshModels] ✓ Fetched fresh models (${models.length} models)`);
return models;
} catch (error) {
logger.error('[doRefresh] Refresh failed:', error);
return [];
}
}
/**
* Transform app-server model to UI-compatible format
*/
private transformModel(appServerModel: AppServerModel): CodexModel {
return {
id: `codex-${appServerModel.id}`, // Add 'codex-' prefix for compatibility
label: appServerModel.displayName,
description: appServerModel.description,
hasThinking: appServerModel.supportedReasoningEfforts.length > 0,
supportsVision: true, // All Codex models support vision
tier: this.inferTier(appServerModel.id),
isDefault: appServerModel.isDefault,
};
}
/**
* Infer tier from model ID
*/
private inferTier(modelId: string): 'premium' | 'standard' | 'basic' {
if (modelId.includes('max') || modelId.includes('gpt-5.2-codex')) {
return 'premium';
}
if (modelId.includes('mini')) {
return 'basic';
}
return 'standard';
}
/**
* Load cache from disk
*/
private async loadFromCache(): Promise<CodexModelCache | null> {
try {
const content = await secureFs.readFile(this.cacheFilePath, 'utf-8');
const cache = JSON.parse(content.toString()) as CodexModelCache;
// Validate cache structure
if (!Array.isArray(cache.models) || typeof cache.cachedAt !== 'number') {
logger.warn('[loadFromCache] Invalid cache structure, ignoring');
return null;
}
return cache;
} catch (error) {
if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
logger.warn('[loadFromCache] Failed to read cache:', error);
}
return null;
}
}
/**
* Save cache to disk (atomic write)
*/
private async saveToCache(models: CodexModel[]): Promise<void> {
const cache: CodexModelCache = {
models,
cachedAt: Date.now(),
ttl: this.ttl,
};
const tempPath = `${this.cacheFilePath}.tmp.${Date.now()}`;
try {
// Write to temp file
const content = JSON.stringify(cache, null, 2);
await secureFs.writeFile(tempPath, content, 'utf-8');
// Atomic rename
await secureFs.rename(tempPath, this.cacheFilePath);
} catch (error) {
logger.error('[saveToCache] Failed to save cache:', error);
// Clean up temp file
try {
await secureFs.unlink(tempPath);
} catch {
// Ignore cleanup errors
}
}
}
}

View File

@@ -1,11 +1,11 @@
import {
findCodexCliPath,
spawnProcess,
getCodexAuthPath,
systemPathExists,
systemPathReadFile,
} from '@automaker/platform';
import { createLogger } from '@automaker/utils';
import type { CodexAppServerService } from './codex-app-server-service.js';
const logger = createLogger('CodexUsage');
@@ -18,19 +18,12 @@ export interface CodexRateLimitWindow {
resetsAt: number;
}
export interface CodexCreditsSnapshot {
balance?: string;
unlimited?: boolean;
hasCredits?: boolean;
}
export type CodexPlanType = 'free' | 'plus' | 'pro' | 'team' | 'enterprise' | 'edu' | 'unknown';
export interface CodexUsageData {
rateLimits: {
primary?: CodexRateLimitWindow;
secondary?: CodexRateLimitWindow;
credits?: CodexCreditsSnapshot;
planType?: CodexPlanType;
} | null;
lastUpdated: string;
@@ -39,13 +32,24 @@ export interface CodexUsageData {
/**
* Codex Usage Service
*
* Attempts to fetch usage data from Codex CLI and OpenAI API.
* Codex CLI doesn't provide a direct usage command, but we can:
* 1. Parse usage info from error responses (rate limit errors contain plan info)
* 2. Check for OpenAI API usage if API key is available
* Fetches usage data from Codex CLI using the app-server JSON-RPC API.
* Falls back to auth file parsing if app-server is unavailable.
*/
export class CodexUsageService {
private cachedCliPath: string | null = null;
private appServerService: CodexAppServerService | null = null;
private accountPlanTypeArray: CodexPlanType[] = [
'free',
'plus',
'pro',
'team',
'enterprise',
'edu',
];
constructor(appServerService?: CodexAppServerService) {
this.appServerService = appServerService || null;
}
/**
* Check if Codex CLI is available on the system
@@ -58,60 +62,131 @@ export class CodexUsageService {
/**
* Attempt to fetch usage data
*
* Tries multiple approaches:
* 1. Always try to get plan type from auth file first (authoritative source)
* 2. Check for OpenAI API key in environment for API usage
* 3. Make a test request to capture rate limit headers from CLI
* 4. Combine results from auth file and CLI
* Priority order:
* 1. Codex app-server JSON-RPC API (most reliable, provides real-time data)
* 2. Auth file JWT parsing (fallback for plan type)
*/
async fetchUsageData(): Promise<CodexUsageData> {
logger.info('[fetchUsageData] Starting...');
const cliPath = this.cachedCliPath || (await findCodexCliPath());
if (!cliPath) {
logger.error('[fetchUsageData] Codex CLI not found');
throw new Error('Codex CLI not found. Please install it with: npm install -g @openai/codex');
}
// Always try to get plan type from auth file first - this is the authoritative source
const authPlanType = await this.getPlanTypeFromAuthFile();
logger.info(`[fetchUsageData] Using CLI path: ${cliPath}`);
// Check if user has an API key that we can use
const hasApiKey = !!process.env.OPENAI_API_KEY;
if (hasApiKey) {
// Try to get usage from OpenAI API
const openaiUsage = await this.fetchOpenAIUsage();
if (openaiUsage) {
// Merge with auth file plan type if available
if (authPlanType && openaiUsage.rateLimits) {
openaiUsage.rateLimits.planType = authPlanType;
}
return openaiUsage;
}
// Try to get usage from Codex app-server (most reliable method)
const appServerUsage = await this.fetchFromAppServer();
if (appServerUsage) {
logger.info('[fetchUsageData] ✓ Fetched usage from app-server');
return appServerUsage;
}
// Try to get usage from Codex CLI by making a simple request
const codexUsage = await this.fetchCodexUsage(cliPath, authPlanType);
if (codexUsage) {
return codexUsage;
}
logger.info('[fetchUsageData] App-server failed, trying auth file fallback...');
// Fallback: try to parse full usage from auth file
// Fallback: try to parse usage from auth file
const authUsage = await this.fetchFromAuthFile();
if (authUsage) {
logger.info('[fetchUsageData] ✓ Fetched usage from auth file');
return authUsage;
}
// If all else fails, return a message with helpful information
throw new Error(
'Codex usage statistics require additional configuration. ' +
'To enable usage tracking:\n\n' +
'1. Set your OpenAI API key in the environment:\n' +
' export OPENAI_API_KEY=sk-...\n\n' +
'2. Or check your usage at:\n' +
' https://platform.openai.com/usage\n\n' +
'Note: If using Codex CLI with ChatGPT OAuth authentication, ' +
'usage data must be queried through your OpenAI account.'
);
logger.info('[fetchUsageData] All methods failed, returning unknown');
// If all else fails, return unknown
return {
rateLimits: {
planType: 'unknown',
},
lastUpdated: new Date().toISOString(),
};
}
/**
* Fetch usage data from Codex app-server using JSON-RPC API
* This is the most reliable method as it gets real-time data from OpenAI
*/
private async fetchFromAppServer(): Promise<CodexUsageData | null> {
try {
// Use CodexAppServerService if available
if (!this.appServerService) {
return null;
}
// Fetch account and rate limits in parallel
const [accountResult, rateLimitsResult] = await Promise.all([
this.appServerService.getAccount(),
this.appServerService.getRateLimits(),
]);
if (!accountResult) {
return null;
}
// Build response
// Prefer planType from rateLimits (more accurate/current) over account (can be stale)
let planType: CodexPlanType = 'unknown';
// First try rate limits planType (most accurate)
const rateLimitsPlanType = rateLimitsResult?.rateLimits?.planType;
if (rateLimitsPlanType) {
const normalizedType = rateLimitsPlanType.toLowerCase() as CodexPlanType;
if (this.accountPlanTypeArray.includes(normalizedType)) {
planType = normalizedType;
}
}
// Fall back to account planType if rate limits didn't have it
if (planType === 'unknown' && accountResult.account?.planType) {
const normalizedType = accountResult.account.planType.toLowerCase() as CodexPlanType;
if (this.accountPlanTypeArray.includes(normalizedType)) {
planType = normalizedType;
}
}
const result: CodexUsageData = {
rateLimits: {
planType,
},
lastUpdated: new Date().toISOString(),
};
// Add rate limit info if available
if (rateLimitsResult?.rateLimits?.primary) {
const primary = rateLimitsResult.rateLimits.primary;
result.rateLimits!.primary = {
limit: -1, // Not provided by API
used: -1, // Not provided by API
remaining: -1, // Not provided by API
usedPercent: primary.usedPercent,
windowDurationMins: primary.windowDurationMins,
resetsAt: primary.resetsAt,
};
}
// Add secondary rate limit if available
if (rateLimitsResult?.rateLimits?.secondary) {
const secondary = rateLimitsResult.rateLimits.secondary;
result.rateLimits!.secondary = {
limit: -1, // Not provided by API
used: -1, // Not provided by API
remaining: -1, // Not provided by API
usedPercent: secondary.usedPercent,
windowDurationMins: secondary.windowDurationMins,
resetsAt: secondary.resetsAt,
};
}
logger.info(
`[fetchFromAppServer] ✓ Plan: ${planType}, Primary: ${result.rateLimits?.primary?.usedPercent || 'N/A'}%, Secondary: ${result.rateLimits?.secondary?.usedPercent || 'N/A'}%`
);
return result;
} catch (error) {
logger.error('[fetchFromAppServer] Failed:', error);
return null;
}
}
/**
@@ -121,9 +196,11 @@ export class CodexUsageService {
private async getPlanTypeFromAuthFile(): Promise<CodexPlanType> {
try {
const authFilePath = getCodexAuthPath();
const exists = await systemPathExists(authFilePath);
logger.info(`[getPlanTypeFromAuthFile] Auth file path: ${authFilePath}`);
const exists = systemPathExists(authFilePath);
if (!exists) {
logger.warn('[getPlanTypeFromAuthFile] Auth file does not exist');
return 'unknown';
}
@@ -131,16 +208,24 @@ export class CodexUsageService {
const authData = JSON.parse(authContent);
if (!authData.tokens?.id_token) {
logger.info('[getPlanTypeFromAuthFile] No id_token in auth file');
return 'unknown';
}
const claims = this.parseJwt(authData.tokens.id_token);
if (!claims) {
logger.info('[getPlanTypeFromAuthFile] Failed to parse JWT');
return 'unknown';
}
logger.info('[getPlanTypeFromAuthFile] JWT claims keys:', Object.keys(claims));
// Extract plan type from nested OpenAI auth object with type validation
const openaiAuthClaim = claims['https://api.openai.com/auth'];
logger.info(
'[getPlanTypeFromAuthFile] OpenAI auth claim:',
JSON.stringify(openaiAuthClaim, null, 2)
);
let accountType: string | undefined;
let isSubscriptionExpired = false;
@@ -188,154 +273,23 @@ export class CodexUsageService {
}
if (accountType) {
const normalizedType = accountType.toLowerCase();
if (['free', 'plus', 'pro', 'team', 'enterprise', 'edu'].includes(normalizedType)) {
return normalizedType as CodexPlanType;
}
}
} catch (error) {
logger.error('Failed to get plan type from auth file:', error);
}
return 'unknown';
}
/**
* Try to fetch usage from OpenAI API using the API key
*/
private async fetchOpenAIUsage(): Promise<CodexUsageData | null> {
const apiKey = process.env.OPENAI_API_KEY;
if (!apiKey) {
return null;
}
try {
const endTime = Math.floor(Date.now() / 1000);
const startTime = endTime - 7 * 24 * 60 * 60; // Last 7 days
const response = await fetch(
`https://api.openai.com/v1/organization/usage/completions?start_time=${startTime}&end_time=${endTime}&limit=1`,
{
headers: {
Authorization: `Bearer ${apiKey}`,
'Content-Type': 'application/json',
},
}
);
if (response.ok) {
const data = await response.json();
return this.parseOpenAIUsage(data);
}
} catch (error) {
logger.error('Failed to fetch from OpenAI API:', error);
}
return null;
}
/**
* Parse OpenAI usage API response
*/
private parseOpenAIUsage(data: any): CodexUsageData {
let totalInputTokens = 0;
let totalOutputTokens = 0;
if (data.data && Array.isArray(data.data)) {
for (const bucket of data.data) {
if (bucket.results && Array.isArray(bucket.results)) {
for (const result of bucket.results) {
totalInputTokens += result.input_tokens || 0;
totalOutputTokens += result.output_tokens || 0;
}
}
}
}
return {
rateLimits: {
planType: 'unknown',
credits: {
hasCredits: true,
},
},
lastUpdated: new Date().toISOString(),
};
}
/**
* Try to fetch usage by making a test request to Codex CLI
* and parsing rate limit information from the response
*/
private async fetchCodexUsage(
cliPath: string,
authPlanType: CodexPlanType
): Promise<CodexUsageData | null> {
try {
// Make a simple request to trigger rate limit info if at limit
const result = await spawnProcess({
command: cliPath,
args: ['exec', '--', 'echo', 'test'],
cwd: process.cwd(),
env: {
...process.env,
TERM: 'dumb',
},
timeout: 10000,
});
// Parse the output for rate limit information
const combinedOutput = (result.stdout + result.stderr).toLowerCase();
// Check if we got a rate limit error
const rateLimitMatch = combinedOutput.match(
/usage_limit_reached.*?"plan_type":"([^"]+)".*?"resets_at":(\d+).*?"resets_in_seconds":(\d+)/
);
if (rateLimitMatch) {
// Rate limit error contains the plan type - use that as it's the most authoritative
const planType = rateLimitMatch[1] as CodexPlanType;
const resetsAt = parseInt(rateLimitMatch[2], 10);
const resetsInSeconds = parseInt(rateLimitMatch[3], 10);
const normalizedType = accountType.toLowerCase() as CodexPlanType;
logger.info(
`Rate limit hit - plan: ${planType}, resets in ${Math.ceil(resetsInSeconds / 60)} mins`
`[getPlanTypeFromAuthFile] Account type: "${accountType}", normalized: "${normalizedType}"`
);
return {
rateLimits: {
planType,
primary: {
limit: 0,
used: 0,
remaining: 0,
usedPercent: 100,
windowDurationMins: Math.ceil(resetsInSeconds / 60),
resetsAt,
},
},
lastUpdated: new Date().toISOString(),
};
if (this.accountPlanTypeArray.includes(normalizedType)) {
logger.info(`[getPlanTypeFromAuthFile] Returning plan type: ${normalizedType}`);
return normalizedType;
}
} else {
logger.info('[getPlanTypeFromAuthFile] No account type found in claims');
}
// No rate limit error - use the plan type from auth file
const isFreePlan = authPlanType === 'free';
return {
rateLimits: {
planType: authPlanType,
credits: {
hasCredits: true,
unlimited: !isFreePlan && authPlanType !== 'unknown',
},
},
lastUpdated: new Date().toISOString(),
};
} catch (error) {
logger.error('Failed to fetch from Codex CLI:', error);
logger.error('[getPlanTypeFromAuthFile] Failed to get plan type from auth file:', error);
}
return null;
logger.info('[getPlanTypeFromAuthFile] Returning unknown');
return 'unknown';
}
/**
@@ -343,27 +297,27 @@ export class CodexUsageService {
* Reuses getPlanTypeFromAuthFile to avoid code duplication
*/
private async fetchFromAuthFile(): Promise<CodexUsageData | null> {
logger.info('[fetchFromAuthFile] Starting...');
try {
const planType = await this.getPlanTypeFromAuthFile();
logger.info(`[fetchFromAuthFile] Got plan type: ${planType}`);
if (planType === 'unknown') {
logger.info('[fetchFromAuthFile] Plan type unknown, returning null');
return null;
}
const isFreePlan = planType === 'free';
return {
const result: CodexUsageData = {
rateLimits: {
planType,
credits: {
hasCredits: true,
unlimited: !isFreePlan,
},
},
lastUpdated: new Date().toISOString(),
};
logger.info('[fetchFromAuthFile] Returning result:', JSON.stringify(result, null, 2));
return result;
} catch (error) {
logger.error('Failed to parse auth file:', error);
logger.error('[fetchFromAuthFile] Failed to parse auth file:', error);
}
return null;
@@ -372,7 +326,7 @@ export class CodexUsageService {
/**
* Parse JWT token to extract claims
*/
private parseJwt(token: string): any {
private parseJwt(token: string): Record<string, unknown> | null {
try {
const parts = token.split('.');
@@ -383,18 +337,8 @@ export class CodexUsageService {
const base64Url = parts[1];
const base64 = base64Url.replace(/-/g, '+').replace(/_/g, '/');
// Use Buffer for Node.js environment instead of atob
let jsonPayload: string;
if (typeof Buffer !== 'undefined') {
jsonPayload = Buffer.from(base64, 'base64').toString('utf-8');
} else {
jsonPayload = decodeURIComponent(
atob(base64)
.split('')
.map((c) => '%' + ('00' + c.charCodeAt(0).toString(16)).slice(-2))
.join('')
);
}
// Use Buffer for Node.js environment
const jsonPayload = Buffer.from(base64, 'base64').toString('utf-8');
return JSON.parse(jsonPayload);
} catch {

View File

@@ -308,13 +308,15 @@ export class FeatureLoader {
* @param updates - Partial feature updates
* @param descriptionHistorySource - Source of description change ('enhance' or 'edit')
* @param enhancementMode - Enhancement mode if source is 'enhance'
* @param preEnhancementDescription - Description before enhancement (for restoring original)
*/
async update(
projectPath: string,
featureId: string,
updates: Partial<Feature>,
descriptionHistorySource?: 'enhance' | 'edit',
enhancementMode?: 'improve' | 'technical' | 'simplify' | 'acceptance'
enhancementMode?: 'improve' | 'technical' | 'simplify' | 'acceptance' | 'ux-reviewer',
preEnhancementDescription?: string
): Promise<Feature> {
const feature = await this.get(projectPath, featureId);
if (!feature) {
@@ -338,9 +340,31 @@ export class FeatureLoader {
updates.description !== feature.description &&
updates.description.trim()
) {
const timestamp = new Date().toISOString();
// If this is an enhancement and we have the pre-enhancement description,
// add the original text to history first (so user can restore to it)
if (
descriptionHistorySource === 'enhance' &&
preEnhancementDescription &&
preEnhancementDescription.trim()
) {
// Check if this pre-enhancement text is different from the last history entry
const lastEntry = updatedHistory[updatedHistory.length - 1];
if (!lastEntry || lastEntry.description !== preEnhancementDescription) {
const preEnhanceEntry: DescriptionHistoryEntry = {
description: preEnhancementDescription,
timestamp,
source: updatedHistory.length === 0 ? 'initial' : 'edit',
};
updatedHistory = [...updatedHistory, preEnhanceEntry];
}
}
// Add the new/enhanced description to history
const historyEntry: DescriptionHistoryEntry = {
description: updates.description,
timestamp: new Date().toISOString(),
timestamp,
source: descriptionHistorySource || 'edit',
...(descriptionHistorySource === 'enhance' && enhancementMode ? { enhancementMode } : {}),
};

View File

@@ -22,7 +22,6 @@ import type {
Credentials,
ProjectSettings,
KeyboardShortcuts,
AIProfile,
ProjectRef,
TrashedProjectRef,
BoardBackgroundSettings,
@@ -299,7 +298,6 @@ export class SettingsService {
ignoreEmptyArrayOverwrite('trashedProjects');
ignoreEmptyArrayOverwrite('projectHistory');
ignoreEmptyArrayOverwrite('recentFolders');
ignoreEmptyArrayOverwrite('aiProfiles');
ignoreEmptyArrayOverwrite('mcpServers');
ignoreEmptyArrayOverwrite('enabledCursorModels');
@@ -602,8 +600,6 @@ export class SettingsService {
theme: (appState.theme as GlobalSettings['theme']) || 'dark',
sidebarOpen: appState.sidebarOpen !== undefined ? (appState.sidebarOpen as boolean) : true,
chatHistoryOpen: (appState.chatHistoryOpen as boolean) || false,
kanbanCardDetailLevel:
(appState.kanbanCardDetailLevel as GlobalSettings['kanbanCardDetailLevel']) || 'standard',
maxConcurrency: (appState.maxConcurrency as number) || 3,
defaultSkipTests:
appState.defaultSkipTests !== undefined ? (appState.defaultSkipTests as boolean) : true,
@@ -617,18 +613,15 @@ export class SettingsService {
: false,
useWorktrees:
appState.useWorktrees !== undefined ? (appState.useWorktrees as boolean) : true,
showProfilesOnly: (appState.showProfilesOnly as boolean) || false,
defaultPlanningMode:
(appState.defaultPlanningMode as GlobalSettings['defaultPlanningMode']) || 'skip',
defaultRequirePlanApproval: (appState.defaultRequirePlanApproval as boolean) || false,
defaultAIProfileId: (appState.defaultAIProfileId as string | null) || null,
muteDoneSound: (appState.muteDoneSound as boolean) || false,
enhancementModel:
(appState.enhancementModel as GlobalSettings['enhancementModel']) || 'sonnet',
keyboardShortcuts:
(appState.keyboardShortcuts as KeyboardShortcuts) ||
DEFAULT_GLOBAL_SETTINGS.keyboardShortcuts,
aiProfiles: (appState.aiProfiles as AIProfile[]) || [],
projects: (appState.projects as ProjectRef[]) || [],
trashedProjects: (appState.trashedProjects as TrashedProjectRef[]) || [],
projectHistory: (appState.projectHistory as string[]) || [],

View File

@@ -7,13 +7,11 @@
export type {
ThemeMode,
KanbanCardDetailLevel,
ModelAlias,
PlanningMode,
ThinkingLevel,
ModelProvider,
KeyboardShortcuts,
AIProfile,
ProjectRef,
TrashedProjectRef,
ChatSessionRef,