mirror of
https://github.com/AutoMaker-Org/automaker.git
synced 2026-03-21 11:23:07 +00:00
Compare commits
23 Commits
47bd7a76cf
...
fix/icon-p
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7609968b81 | ||
|
|
6408f514a4 | ||
|
|
6b97219f55 | ||
|
|
09a4d3f15a | ||
|
|
51e9a23ba1 | ||
|
|
0330c70261 | ||
|
|
e7504b247f | ||
|
|
9305ecc242 | ||
|
|
2f071a1ba3 | ||
|
|
1d732916f1 | ||
|
|
629fd24d9f | ||
|
|
72cb942788 | ||
|
|
91bff21d58 | ||
|
|
dfa719079f | ||
|
|
28becb177b | ||
|
|
f785f1204b | ||
|
|
f3edfbf24e | ||
|
|
3ddf26f666 | ||
|
|
c81ea768a7 | ||
|
|
0e020f7e4a | ||
|
|
0a5540c9a2 | ||
|
|
7df2182818 | ||
|
|
ee52333636 |
5
.automaker-lock
Normal file
5
.automaker-lock
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"pid": 95678,
|
||||
"featureId": "feature-1769860192729-yjxrtx35yh",
|
||||
"startedAt": "2026-03-05T18:26:48.876Z"
|
||||
}
|
||||
18
.github/workflows/e2e-tests.yml
vendored
18
.github/workflows/e2e-tests.yml
vendored
@@ -46,7 +46,8 @@ jobs:
|
||||
echo "SERVER_PID=$SERVER_PID" >> $GITHUB_ENV
|
||||
|
||||
env:
|
||||
PORT: 3008
|
||||
PORT: 3108
|
||||
TEST_SERVER_PORT: 3108
|
||||
NODE_ENV: test
|
||||
# Use a deterministic API key so Playwright can log in reliably
|
||||
AUTOMAKER_API_KEY: test-api-key-for-e2e-tests
|
||||
@@ -81,13 +82,13 @@ jobs:
|
||||
|
||||
# Wait for health endpoint
|
||||
for i in {1..60}; do
|
||||
if curl -s -f http://localhost:3008/api/health > /dev/null 2>&1; then
|
||||
if curl -s -f http://localhost:3108/api/health > /dev/null 2>&1; then
|
||||
echo "Backend server is ready!"
|
||||
echo "=== Backend logs ==="
|
||||
cat backend.log
|
||||
echo ""
|
||||
echo "Health check response:"
|
||||
curl -s http://localhost:3008/api/health | jq . 2>/dev/null || echo "Health check: $(curl -s http://localhost:3008/api/health 2>/dev/null || echo 'No response')"
|
||||
curl -s http://localhost:3108/api/health | jq . 2>/dev/null || echo "Health check: $(curl -s http://localhost:3108/api/health 2>/dev/null || echo 'No response')"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
@@ -111,11 +112,11 @@ jobs:
|
||||
ps aux | grep -E "(node|tsx)" | grep -v grep || echo "No node processes found"
|
||||
echo ""
|
||||
echo "=== Port status ==="
|
||||
netstat -tlnp 2>/dev/null | grep :3008 || echo "Port 3008 not listening"
|
||||
lsof -i :3008 2>/dev/null || echo "lsof not available or port not in use"
|
||||
netstat -tlnp 2>/dev/null | grep :3108 || echo "Port 3108 not listening"
|
||||
lsof -i :3108 2>/dev/null || echo "lsof not available or port not in use"
|
||||
echo ""
|
||||
echo "=== Health endpoint test ==="
|
||||
curl -v http://localhost:3008/api/health 2>&1 || echo "Health endpoint failed"
|
||||
curl -v http://localhost:3108/api/health 2>&1 || echo "Health endpoint failed"
|
||||
|
||||
# Kill the server process if it's still hanging
|
||||
if kill -0 $SERVER_PID 2>/dev/null; then
|
||||
@@ -132,7 +133,8 @@ jobs:
|
||||
run: npm run test --workspace=apps/ui
|
||||
env:
|
||||
CI: true
|
||||
VITE_SERVER_URL: http://localhost:3008
|
||||
VITE_SERVER_URL: http://localhost:3108
|
||||
SERVER_URL: http://localhost:3108
|
||||
VITE_SKIP_SETUP: 'true'
|
||||
# Keep UI-side login/defaults consistent
|
||||
AUTOMAKER_API_KEY: test-api-key-for-e2e-tests
|
||||
@@ -147,7 +149,7 @@ jobs:
|
||||
ps aux | grep -E "(node|tsx)" | grep -v grep || echo "No node processes found"
|
||||
echo ""
|
||||
echo "=== Port status ==="
|
||||
netstat -tlnp 2>/dev/null | grep :3008 || echo "Port 3008 not listening"
|
||||
netstat -tlnp 2>/dev/null | grep :3108 || echo "Port 3108 not listening"
|
||||
|
||||
- name: Upload Playwright report
|
||||
uses: actions/upload-artifact@v4
|
||||
|
||||
@@ -209,9 +209,10 @@ COPY libs ./libs
|
||||
COPY apps/ui ./apps/ui
|
||||
|
||||
# Build packages in dependency order, then build UI
|
||||
# VITE_SERVER_URL tells the UI where to find the API server
|
||||
# Use ARG to allow overriding at build time: --build-arg VITE_SERVER_URL=http://api.example.com
|
||||
ARG VITE_SERVER_URL=http://localhost:3008
|
||||
# When VITE_SERVER_URL is empty, the UI uses relative URLs (e.g., /api/...) which nginx proxies
|
||||
# to the server container. This avoids CORS issues entirely in Docker Compose setups.
|
||||
# Override at build time if needed: --build-arg VITE_SERVER_URL=http://api.example.com
|
||||
ARG VITE_SERVER_URL=
|
||||
ENV VITE_SKIP_ELECTRON=true
|
||||
ENV VITE_SERVER_URL=${VITE_SERVER_URL}
|
||||
RUN npm run build:packages && npm run build --workspace=apps/ui
|
||||
|
||||
2
OPENCODE_CONFIG_CONTENT
Normal file
2
OPENCODE_CONFIG_CONTENT
Normal file
@@ -0,0 +1,2 @@
|
||||
{
|
||||
"$schema": "https://opencode.ai/config.json",}
|
||||
@@ -52,6 +52,12 @@ HOST=0.0.0.0
|
||||
# Port to run the server on
|
||||
PORT=3008
|
||||
|
||||
# Port to run the server on for testing
|
||||
TEST_SERVER_PORT=3108
|
||||
|
||||
# Port to run the UI on for testing
|
||||
TEST_PORT=3107
|
||||
|
||||
# Data directory for sessions and metadata
|
||||
DATA_DIR=./data
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@automaker/server",
|
||||
"version": "0.13.0",
|
||||
"version": "0.15.0",
|
||||
"description": "Backend server for Automaker - provides API for both web and Electron modes",
|
||||
"author": "AutoMaker Team",
|
||||
"license": "SEE LICENSE IN LICENSE",
|
||||
|
||||
@@ -267,6 +267,26 @@ app.use(
|
||||
// CORS configuration
|
||||
// When using credentials (cookies), origin cannot be '*'
|
||||
// We dynamically allow the requesting origin for local development
|
||||
|
||||
// Check if origin is a local/private network address
|
||||
function isLocalOrigin(origin: string): boolean {
|
||||
try {
|
||||
const url = new URL(origin);
|
||||
const hostname = url.hostname;
|
||||
return (
|
||||
hostname === 'localhost' ||
|
||||
hostname === '127.0.0.1' ||
|
||||
hostname === '[::1]' ||
|
||||
hostname === '0.0.0.0' ||
|
||||
hostname.startsWith('192.168.') ||
|
||||
hostname.startsWith('10.') ||
|
||||
/^172\.(1[6-9]|2[0-9]|3[0-1])\./.test(hostname)
|
||||
);
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
app.use(
|
||||
cors({
|
||||
origin: (origin, callback) => {
|
||||
@@ -277,35 +297,25 @@ app.use(
|
||||
}
|
||||
|
||||
// If CORS_ORIGIN is set, use it (can be comma-separated list)
|
||||
const allowedOrigins = process.env.CORS_ORIGIN?.split(',').map((o) => o.trim());
|
||||
if (allowedOrigins && allowedOrigins.length > 0 && allowedOrigins[0] !== '*') {
|
||||
if (allowedOrigins.includes(origin)) {
|
||||
callback(null, origin);
|
||||
} else {
|
||||
callback(new Error('Not allowed by CORS'));
|
||||
const allowedOrigins = process.env.CORS_ORIGIN?.split(',')
|
||||
.map((o) => o.trim())
|
||||
.filter(Boolean);
|
||||
if (allowedOrigins && allowedOrigins.length > 0) {
|
||||
if (allowedOrigins.includes('*')) {
|
||||
callback(null, true);
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// For local development, allow all localhost/loopback origins (any port)
|
||||
try {
|
||||
const url = new URL(origin);
|
||||
const hostname = url.hostname;
|
||||
|
||||
if (
|
||||
hostname === 'localhost' ||
|
||||
hostname === '127.0.0.1' ||
|
||||
hostname === '::1' ||
|
||||
hostname === '0.0.0.0' ||
|
||||
hostname.startsWith('192.168.') ||
|
||||
hostname.startsWith('10.') ||
|
||||
hostname.startsWith('172.')
|
||||
) {
|
||||
if (allowedOrigins.includes(origin)) {
|
||||
callback(null, origin);
|
||||
return;
|
||||
}
|
||||
} catch {
|
||||
// Ignore URL parsing errors
|
||||
// Fall through to local network check below
|
||||
}
|
||||
|
||||
// Allow all localhost/loopback/private network origins (any port)
|
||||
if (isLocalOrigin(origin)) {
|
||||
callback(null, origin);
|
||||
return;
|
||||
}
|
||||
|
||||
// Reject other origins by default for security
|
||||
|
||||
37
apps/server/src/lib/exec-utils.ts
Normal file
37
apps/server/src/lib/exec-utils.ts
Normal file
@@ -0,0 +1,37 @@
|
||||
/**
|
||||
* Shared execution utilities
|
||||
*
|
||||
* Common helpers for spawning child processes with the correct environment.
|
||||
* Used by both route handlers and service layers.
|
||||
*/
|
||||
|
||||
import { createLogger } from '@automaker/utils';
|
||||
|
||||
const logger = createLogger('ExecUtils');
|
||||
|
||||
// Extended PATH to include common tool installation locations
|
||||
export const extendedPath = [
|
||||
process.env.PATH,
|
||||
'/opt/homebrew/bin',
|
||||
'/usr/local/bin',
|
||||
'/home/linuxbrew/.linuxbrew/bin',
|
||||
`${process.env.HOME}/.local/bin`,
|
||||
]
|
||||
.filter(Boolean)
|
||||
.join(':');
|
||||
|
||||
export const execEnv = {
|
||||
...process.env,
|
||||
PATH: extendedPath,
|
||||
};
|
||||
|
||||
export function getErrorMessage(error: unknown): string {
|
||||
if (error instanceof Error) {
|
||||
return error.message;
|
||||
}
|
||||
return String(error);
|
||||
}
|
||||
|
||||
export function logError(error: unknown, context: string): void {
|
||||
logger.error(`${context}:`, error);
|
||||
}
|
||||
@@ -133,12 +133,16 @@ export const TOOL_PRESETS = {
|
||||
'Read',
|
||||
'Write',
|
||||
'Edit',
|
||||
'MultiEdit',
|
||||
'Glob',
|
||||
'Grep',
|
||||
'LS',
|
||||
'Bash',
|
||||
'WebSearch',
|
||||
'WebFetch',
|
||||
'TodoWrite',
|
||||
'Task',
|
||||
'Skill',
|
||||
] as const,
|
||||
|
||||
/** Tools for chat/interactive mode */
|
||||
@@ -146,12 +150,16 @@ export const TOOL_PRESETS = {
|
||||
'Read',
|
||||
'Write',
|
||||
'Edit',
|
||||
'MultiEdit',
|
||||
'Glob',
|
||||
'Grep',
|
||||
'LS',
|
||||
'Bash',
|
||||
'WebSearch',
|
||||
'WebFetch',
|
||||
'TodoWrite',
|
||||
'Task',
|
||||
'Skill',
|
||||
] as const,
|
||||
} as const;
|
||||
|
||||
@@ -282,11 +290,15 @@ function buildThinkingOptions(thinkingLevel?: ThinkingLevel): Partial<Options> {
|
||||
}
|
||||
|
||||
/**
|
||||
* Build system prompt configuration based on autoLoadClaudeMd setting.
|
||||
* When autoLoadClaudeMd is true:
|
||||
* - Uses preset mode with 'claude_code' to enable CLAUDE.md auto-loading
|
||||
* - If there's a custom systemPrompt, appends it to the preset
|
||||
* - Sets settingSources to ['project'] for SDK to load CLAUDE.md files
|
||||
* Build system prompt and settingSources based on two independent settings:
|
||||
* - useClaudeCodeSystemPrompt: controls whether to use the 'claude_code' preset as the base prompt
|
||||
* - autoLoadClaudeMd: controls whether to add settingSources for SDK to load CLAUDE.md files
|
||||
*
|
||||
* These combine independently (4 possible states):
|
||||
* 1. Both ON: preset + settingSources (full Claude Code experience)
|
||||
* 2. useClaudeCodeSystemPrompt ON, autoLoadClaudeMd OFF: preset only (no CLAUDE.md auto-loading)
|
||||
* 3. useClaudeCodeSystemPrompt OFF, autoLoadClaudeMd ON: plain string + settingSources
|
||||
* 4. Both OFF: plain string only
|
||||
*
|
||||
* @param config - The SDK options config
|
||||
* @returns Object with systemPrompt and settingSources for SDK options
|
||||
@@ -295,27 +307,34 @@ function buildClaudeMdOptions(config: CreateSdkOptionsConfig): {
|
||||
systemPrompt?: string | SystemPromptConfig;
|
||||
settingSources?: Array<'user' | 'project' | 'local'>;
|
||||
} {
|
||||
if (!config.autoLoadClaudeMd) {
|
||||
// Standard mode - just pass through the system prompt as-is
|
||||
return config.systemPrompt ? { systemPrompt: config.systemPrompt } : {};
|
||||
}
|
||||
|
||||
// Auto-load CLAUDE.md mode - use preset with settingSources
|
||||
const result: {
|
||||
systemPrompt: SystemPromptConfig;
|
||||
settingSources: Array<'user' | 'project' | 'local'>;
|
||||
} = {
|
||||
systemPrompt: {
|
||||
systemPrompt?: string | SystemPromptConfig;
|
||||
settingSources?: Array<'user' | 'project' | 'local'>;
|
||||
} = {};
|
||||
|
||||
// Determine system prompt format based on useClaudeCodeSystemPrompt
|
||||
if (config.useClaudeCodeSystemPrompt) {
|
||||
// Use Claude Code's built-in system prompt as the base
|
||||
const presetConfig: SystemPromptConfig = {
|
||||
type: 'preset',
|
||||
preset: 'claude_code',
|
||||
},
|
||||
// Load both user (~/.claude/CLAUDE.md) and project (.claude/CLAUDE.md) settings
|
||||
settingSources: ['user', 'project'],
|
||||
};
|
||||
};
|
||||
// If there's a custom system prompt, append it to the preset
|
||||
if (config.systemPrompt) {
|
||||
presetConfig.append = config.systemPrompt;
|
||||
}
|
||||
result.systemPrompt = presetConfig;
|
||||
} else {
|
||||
// Standard mode - just pass through the system prompt as-is
|
||||
if (config.systemPrompt) {
|
||||
result.systemPrompt = config.systemPrompt;
|
||||
}
|
||||
}
|
||||
|
||||
// If there's a custom system prompt, append it to the preset
|
||||
if (config.systemPrompt) {
|
||||
result.systemPrompt.append = config.systemPrompt;
|
||||
// Determine settingSources based on autoLoadClaudeMd
|
||||
if (config.autoLoadClaudeMd) {
|
||||
// Load both user (~/.claude/CLAUDE.md) and project (.claude/CLAUDE.md) settings
|
||||
result.settingSources = ['user', 'project'];
|
||||
}
|
||||
|
||||
return result;
|
||||
@@ -323,12 +342,14 @@ function buildClaudeMdOptions(config: CreateSdkOptionsConfig): {
|
||||
|
||||
/**
|
||||
* System prompt configuration for SDK options
|
||||
* When using preset mode with claude_code, CLAUDE.md files are automatically loaded
|
||||
* The 'claude_code' preset provides the system prompt only — it does NOT auto-load
|
||||
* CLAUDE.md files. CLAUDE.md auto-loading is controlled independently by
|
||||
* settingSources (set via autoLoadClaudeMd). These two settings are orthogonal.
|
||||
*/
|
||||
export interface SystemPromptConfig {
|
||||
/** Use preset mode with claude_code to enable CLAUDE.md auto-loading */
|
||||
/** Use preset mode to select the base system prompt */
|
||||
type: 'preset';
|
||||
/** The preset to use - 'claude_code' enables CLAUDE.md loading */
|
||||
/** The preset to use - 'claude_code' uses the Claude Code system prompt */
|
||||
preset: 'claude_code';
|
||||
/** Optional additional prompt to append to the preset */
|
||||
append?: string;
|
||||
@@ -362,11 +383,19 @@ export interface CreateSdkOptionsConfig {
|
||||
/** Enable auto-loading of CLAUDE.md files via SDK's settingSources */
|
||||
autoLoadClaudeMd?: boolean;
|
||||
|
||||
/** Use Claude Code's built-in system prompt (claude_code preset) as the base prompt */
|
||||
useClaudeCodeSystemPrompt?: boolean;
|
||||
|
||||
/** MCP servers to make available to the agent */
|
||||
mcpServers?: Record<string, McpServerConfig>;
|
||||
|
||||
/** Extended thinking level for Claude models */
|
||||
thinkingLevel?: ThinkingLevel;
|
||||
|
||||
/** Optional user-configured max turns override (from settings).
|
||||
* When provided, overrides the preset MAX_TURNS for the use case.
|
||||
* Range: 1-2000. */
|
||||
maxTurns?: number;
|
||||
}
|
||||
|
||||
// Re-export MCP types from @automaker/types for convenience
|
||||
@@ -403,7 +432,7 @@ export function createSpecGenerationOptions(config: CreateSdkOptionsConfig): Opt
|
||||
// See: https://github.com/AutoMaker-Org/automaker/issues/149
|
||||
permissionMode: 'default',
|
||||
model: getModelForUseCase('spec', config.model),
|
||||
maxTurns: MAX_TURNS.maximum,
|
||||
maxTurns: config.maxTurns ?? MAX_TURNS.maximum,
|
||||
cwd: config.cwd,
|
||||
allowedTools: [...TOOL_PRESETS.specGeneration],
|
||||
...claudeMdOptions,
|
||||
@@ -437,7 +466,7 @@ export function createFeatureGenerationOptions(config: CreateSdkOptionsConfig):
|
||||
// Override permissionMode - feature generation only needs read-only tools
|
||||
permissionMode: 'default',
|
||||
model: getModelForUseCase('features', config.model),
|
||||
maxTurns: MAX_TURNS.quick,
|
||||
maxTurns: config.maxTurns ?? MAX_TURNS.quick,
|
||||
cwd: config.cwd,
|
||||
allowedTools: [...TOOL_PRESETS.readOnly],
|
||||
...claudeMdOptions,
|
||||
@@ -468,7 +497,7 @@ export function createSuggestionsOptions(config: CreateSdkOptionsConfig): Option
|
||||
return {
|
||||
...getBaseOptions(),
|
||||
model: getModelForUseCase('suggestions', config.model),
|
||||
maxTurns: MAX_TURNS.extended,
|
||||
maxTurns: config.maxTurns ?? MAX_TURNS.extended,
|
||||
cwd: config.cwd,
|
||||
allowedTools: [...TOOL_PRESETS.readOnly],
|
||||
...claudeMdOptions,
|
||||
@@ -506,7 +535,7 @@ export function createChatOptions(config: CreateSdkOptionsConfig): Options {
|
||||
return {
|
||||
...getBaseOptions(),
|
||||
model: getModelForUseCase('chat', effectiveModel),
|
||||
maxTurns: MAX_TURNS.standard,
|
||||
maxTurns: config.maxTurns ?? MAX_TURNS.standard,
|
||||
cwd: config.cwd,
|
||||
allowedTools: [...TOOL_PRESETS.chat],
|
||||
...claudeMdOptions,
|
||||
@@ -541,7 +570,7 @@ export function createAutoModeOptions(config: CreateSdkOptionsConfig): Options {
|
||||
return {
|
||||
...getBaseOptions(),
|
||||
model: getModelForUseCase('auto', config.model),
|
||||
maxTurns: MAX_TURNS.maximum,
|
||||
maxTurns: config.maxTurns ?? MAX_TURNS.maximum,
|
||||
cwd: config.cwd,
|
||||
allowedTools: [...TOOL_PRESETS.fullAccess],
|
||||
...claudeMdOptions,
|
||||
|
||||
@@ -33,9 +33,16 @@ import {
|
||||
|
||||
const logger = createLogger('SettingsHelper');
|
||||
|
||||
/** Default number of agent turns used when no value is configured. */
|
||||
export const DEFAULT_MAX_TURNS = 10000;
|
||||
|
||||
/** Upper bound for the max-turns clamp; values above this are capped here. */
|
||||
export const MAX_ALLOWED_TURNS = 10000;
|
||||
|
||||
/**
|
||||
* Get the autoLoadClaudeMd setting, with project settings taking precedence over global.
|
||||
* Returns false if settings service is not available.
|
||||
* Falls back to global settings and defaults to true when unset.
|
||||
* Returns true if settings service is not available.
|
||||
*
|
||||
* @param projectPath - Path to the project
|
||||
* @param settingsService - Optional settings service instance
|
||||
@@ -48,8 +55,8 @@ export async function getAutoLoadClaudeMdSetting(
|
||||
logPrefix = '[SettingsHelper]'
|
||||
): Promise<boolean> {
|
||||
if (!settingsService) {
|
||||
logger.info(`${logPrefix} SettingsService not available, autoLoadClaudeMd disabled`);
|
||||
return false;
|
||||
logger.info(`${logPrefix} SettingsService not available, autoLoadClaudeMd defaulting to true`);
|
||||
return true;
|
||||
}
|
||||
|
||||
try {
|
||||
@@ -64,7 +71,7 @@ export async function getAutoLoadClaudeMdSetting(
|
||||
|
||||
// Fall back to global settings
|
||||
const globalSettings = await settingsService.getGlobalSettings();
|
||||
const result = globalSettings.autoLoadClaudeMd ?? false;
|
||||
const result = globalSettings.autoLoadClaudeMd ?? true;
|
||||
logger.info(`${logPrefix} autoLoadClaudeMd from global settings: ${result}`);
|
||||
return result;
|
||||
} catch (error) {
|
||||
@@ -73,6 +80,84 @@ export async function getAutoLoadClaudeMdSetting(
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the useClaudeCodeSystemPrompt setting, with project settings taking precedence over global.
|
||||
* Falls back to global settings and defaults to true when unset.
|
||||
* Returns true if settings service is not available.
|
||||
*
|
||||
* @param projectPath - Path to the project
|
||||
* @param settingsService - Optional settings service instance
|
||||
* @param logPrefix - Prefix for log messages (e.g., '[AgentService]')
|
||||
* @returns Promise resolving to the useClaudeCodeSystemPrompt setting value
|
||||
*/
|
||||
export async function getUseClaudeCodeSystemPromptSetting(
|
||||
projectPath: string,
|
||||
settingsService?: SettingsService | null,
|
||||
logPrefix = '[SettingsHelper]'
|
||||
): Promise<boolean> {
|
||||
if (!settingsService) {
|
||||
logger.info(
|
||||
`${logPrefix} SettingsService not available, useClaudeCodeSystemPrompt defaulting to true`
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
try {
|
||||
// Check project settings first (takes precedence)
|
||||
const projectSettings = await settingsService.getProjectSettings(projectPath);
|
||||
if (projectSettings.useClaudeCodeSystemPrompt !== undefined) {
|
||||
logger.info(
|
||||
`${logPrefix} useClaudeCodeSystemPrompt from project settings: ${projectSettings.useClaudeCodeSystemPrompt}`
|
||||
);
|
||||
return projectSettings.useClaudeCodeSystemPrompt;
|
||||
}
|
||||
|
||||
// Fall back to global settings
|
||||
const globalSettings = await settingsService.getGlobalSettings();
|
||||
const result = globalSettings.useClaudeCodeSystemPrompt ?? true;
|
||||
logger.info(`${logPrefix} useClaudeCodeSystemPrompt from global settings: ${result}`);
|
||||
return result;
|
||||
} catch (error) {
|
||||
logger.error(`${logPrefix} Failed to load useClaudeCodeSystemPrompt setting:`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the default max turns setting from global settings.
|
||||
*
|
||||
* Reads the user's configured `defaultMaxTurns` setting, which controls the maximum
|
||||
* number of agent turns (tool-call round-trips) for feature execution.
|
||||
*
|
||||
* @param settingsService - Settings service instance (may be null)
|
||||
* @param logPrefix - Logging prefix for debugging
|
||||
* @returns The user's configured max turns, or {@link DEFAULT_MAX_TURNS} as default
|
||||
*/
|
||||
export async function getDefaultMaxTurnsSetting(
|
||||
settingsService?: SettingsService | null,
|
||||
logPrefix = '[SettingsHelper]'
|
||||
): Promise<number> {
|
||||
if (!settingsService) {
|
||||
logger.info(
|
||||
`${logPrefix} SettingsService not available, using default maxTurns=${DEFAULT_MAX_TURNS}`
|
||||
);
|
||||
return DEFAULT_MAX_TURNS;
|
||||
}
|
||||
|
||||
try {
|
||||
const globalSettings = await settingsService.getGlobalSettings();
|
||||
const raw = globalSettings.defaultMaxTurns;
|
||||
const result = Number.isFinite(raw) ? (raw as number) : DEFAULT_MAX_TURNS;
|
||||
// Clamp to valid range
|
||||
const clamped = Math.max(1, Math.min(MAX_ALLOWED_TURNS, Math.floor(result)));
|
||||
logger.debug(`${logPrefix} defaultMaxTurns from global settings: ${clamped}`);
|
||||
return clamped;
|
||||
} catch (error) {
|
||||
logger.error(`${logPrefix} Failed to load defaultMaxTurns setting:`, error);
|
||||
return DEFAULT_MAX_TURNS;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Filters out CLAUDE.md from context files when autoLoadClaudeMd is enabled
|
||||
* and rebuilds the formatted prompt without it.
|
||||
|
||||
@@ -33,8 +33,23 @@ const logger = createLogger('ClaudeProvider');
|
||||
*/
|
||||
type ProviderConfig = ClaudeApiProfile | ClaudeCompatibleProvider;
|
||||
|
||||
// System vars are always passed from process.env regardless of profile
|
||||
const SYSTEM_ENV_VARS = ['PATH', 'HOME', 'SHELL', 'TERM', 'USER', 'LANG', 'LC_ALL'];
|
||||
// System vars are always passed from process.env regardless of profile.
|
||||
// Includes filesystem, locale, and temp directory vars that the Claude CLI
|
||||
// needs internally for config resolution and temp file creation.
|
||||
const SYSTEM_ENV_VARS = [
|
||||
'PATH',
|
||||
'HOME',
|
||||
'SHELL',
|
||||
'TERM',
|
||||
'USER',
|
||||
'LANG',
|
||||
'LC_ALL',
|
||||
'TMPDIR',
|
||||
'XDG_CONFIG_HOME',
|
||||
'XDG_DATA_HOME',
|
||||
'XDG_CACHE_HOME',
|
||||
'XDG_STATE_HOME',
|
||||
];
|
||||
|
||||
/**
|
||||
* Check if the config is a ClaudeCompatibleProvider (new system)
|
||||
@@ -180,7 +195,7 @@ export class ClaudeProvider extends BaseProvider {
|
||||
model,
|
||||
cwd,
|
||||
systemPrompt,
|
||||
maxTurns = 100,
|
||||
maxTurns = 1000,
|
||||
allowedTools,
|
||||
abortController,
|
||||
conversationHistory,
|
||||
@@ -213,6 +228,8 @@ export class ClaudeProvider extends BaseProvider {
|
||||
env: buildEnv(providerConfig, credentials),
|
||||
// Pass through allowedTools if provided by caller (decided by sdk-options.ts)
|
||||
...(allowedTools && { allowedTools }),
|
||||
// Restrict available built-in tools if specified (tools: [] disables all tools)
|
||||
...(options.tools && { tools: options.tools }),
|
||||
// AUTONOMOUS MODE: Always bypass permissions for fully autonomous operation
|
||||
permissionMode: 'bypassPermissions',
|
||||
allowDangerouslySkipPermissions: true,
|
||||
|
||||
@@ -33,7 +33,6 @@ import {
|
||||
supportsReasoningEffort,
|
||||
validateBareModelId,
|
||||
calculateReasoningTimeout,
|
||||
DEFAULT_TIMEOUT_MS,
|
||||
type CodexApprovalPolicy,
|
||||
type CodexSandboxMode,
|
||||
type CodexAuthStatus,
|
||||
@@ -52,6 +51,7 @@ import { CODEX_MODELS } from './codex-models.js';
|
||||
|
||||
const CODEX_COMMAND = 'codex';
|
||||
const CODEX_EXEC_SUBCOMMAND = 'exec';
|
||||
const CODEX_RESUME_SUBCOMMAND = 'resume';
|
||||
const CODEX_JSON_FLAG = '--json';
|
||||
const CODEX_MODEL_FLAG = '--model';
|
||||
const CODEX_VERSION_FLAG = '--version';
|
||||
@@ -98,7 +98,7 @@ const TEXT_ENCODING = 'utf-8';
|
||||
*
|
||||
* @see calculateReasoningTimeout from @automaker/types
|
||||
*/
|
||||
const CODEX_CLI_TIMEOUT_MS = DEFAULT_TIMEOUT_MS;
|
||||
const CODEX_CLI_TIMEOUT_MS = 120000; // 2 minutes — matches CLI provider base timeout
|
||||
const CODEX_FEATURE_GENERATION_BASE_TIMEOUT_MS = 300000; // 5 minutes for feature generation
|
||||
const SYSTEM_PROMPT_SEPARATOR = '\n\n';
|
||||
const CODEX_INSTRUCTIONS_DIR = '.codex';
|
||||
@@ -127,11 +127,16 @@ const DEFAULT_ALLOWED_TOOLS = [
|
||||
'Read',
|
||||
'Write',
|
||||
'Edit',
|
||||
'MultiEdit',
|
||||
'Glob',
|
||||
'Grep',
|
||||
'LS',
|
||||
'Bash',
|
||||
'WebSearch',
|
||||
'WebFetch',
|
||||
'TodoWrite',
|
||||
'Task',
|
||||
'Skill',
|
||||
] as const;
|
||||
const SEARCH_TOOL_NAMES = new Set(['WebSearch', 'WebFetch']);
|
||||
const MIN_MAX_TURNS = 1;
|
||||
@@ -356,9 +361,14 @@ function resolveSystemPrompt(systemPrompt?: unknown): string | null {
|
||||
return null;
|
||||
}
|
||||
|
||||
function buildPromptText(options: ExecuteOptions): string {
|
||||
return typeof options.prompt === 'string'
|
||||
? options.prompt
|
||||
: extractTextFromContent(options.prompt);
|
||||
}
|
||||
|
||||
function buildCombinedPrompt(options: ExecuteOptions, systemPromptText?: string | null): string {
|
||||
const promptText =
|
||||
typeof options.prompt === 'string' ? options.prompt : extractTextFromContent(options.prompt);
|
||||
const promptText = buildPromptText(options);
|
||||
const historyText = options.conversationHistory
|
||||
? formatHistoryAsText(options.conversationHistory)
|
||||
: '';
|
||||
@@ -371,6 +381,11 @@ function buildCombinedPrompt(options: ExecuteOptions, systemPromptText?: string
|
||||
return `${historyText}${systemSection}${HISTORY_HEADER}${promptText}`;
|
||||
}
|
||||
|
||||
function buildResumePrompt(options: ExecuteOptions): string {
|
||||
const promptText = buildPromptText(options);
|
||||
return `${HISTORY_HEADER}${promptText}`;
|
||||
}
|
||||
|
||||
function formatConfigValue(value: string | number | boolean): string {
|
||||
return String(value);
|
||||
}
|
||||
@@ -738,6 +753,16 @@ export class CodexProvider extends BaseProvider {
|
||||
);
|
||||
const baseSystemPrompt = resolveSystemPrompt(options.systemPrompt);
|
||||
const resolvedMaxTurns = resolveMaxTurns(options.maxTurns);
|
||||
if (resolvedMaxTurns === null && options.maxTurns === undefined) {
|
||||
logger.warn(
|
||||
`[executeQuery] maxTurns not provided — Codex CLI will use its internal default. ` +
|
||||
`This may cause premature completion. Model: ${options.model}`
|
||||
);
|
||||
} else {
|
||||
logger.info(
|
||||
`[executeQuery] maxTurns: requested=${options.maxTurns}, resolved=${resolvedMaxTurns}, model=${options.model}`
|
||||
);
|
||||
}
|
||||
const resolvedAllowedTools = options.allowedTools ?? Array.from(DEFAULT_ALLOWED_TOOLS);
|
||||
const restrictTools = !hasMcpServers || options.mcpUnrestrictedTools === false;
|
||||
const wantsOutputSchema = Boolean(
|
||||
@@ -784,16 +809,22 @@ export class CodexProvider extends BaseProvider {
|
||||
}
|
||||
const searchEnabled =
|
||||
codexSettings.enableWebSearch || resolveSearchEnabled(resolvedAllowedTools, restrictTools);
|
||||
const schemaPath = await writeOutputSchemaFile(options.cwd, options.outputFormat);
|
||||
const imageBlocks = codexSettings.enableImages ? extractImageBlocks(options.prompt) : [];
|
||||
const imagePaths = await writeImageFiles(options.cwd, imageBlocks);
|
||||
const isResumeQuery = Boolean(options.sdkSessionId);
|
||||
const schemaPath = isResumeQuery
|
||||
? null
|
||||
: await writeOutputSchemaFile(options.cwd, options.outputFormat);
|
||||
const imageBlocks =
|
||||
!isResumeQuery && codexSettings.enableImages ? extractImageBlocks(options.prompt) : [];
|
||||
const imagePaths = isResumeQuery ? [] : await writeImageFiles(options.cwd, imageBlocks);
|
||||
const approvalPolicy =
|
||||
hasMcpServers && options.mcpAutoApproveTools !== undefined
|
||||
? options.mcpAutoApproveTools
|
||||
? 'never'
|
||||
: 'on-request'
|
||||
: codexSettings.approvalPolicy;
|
||||
const promptText = buildCombinedPrompt(options, combinedSystemPrompt);
|
||||
const promptText = isResumeQuery
|
||||
? buildResumePrompt(options)
|
||||
: buildCombinedPrompt(options, combinedSystemPrompt);
|
||||
const commandPath = executionPlan.cliPath || CODEX_COMMAND;
|
||||
|
||||
// Build config overrides for max turns and reasoning effort
|
||||
@@ -823,21 +854,30 @@ export class CodexProvider extends BaseProvider {
|
||||
const preExecArgs: string[] = [];
|
||||
|
||||
// Add additional directories with write access
|
||||
if (codexSettings.additionalDirs && codexSettings.additionalDirs.length > 0) {
|
||||
if (
|
||||
!isResumeQuery &&
|
||||
codexSettings.additionalDirs &&
|
||||
codexSettings.additionalDirs.length > 0
|
||||
) {
|
||||
for (const dir of codexSettings.additionalDirs) {
|
||||
preExecArgs.push(CODEX_ADD_DIR_FLAG, dir);
|
||||
}
|
||||
}
|
||||
|
||||
// If images were written to disk, add the image directory so the CLI can access them
|
||||
// If images were written to disk, add the image directory so the CLI can access them.
|
||||
// Note: imagePaths is set to [] when isResumeQuery is true, so this check is sufficient.
|
||||
if (imagePaths.length > 0) {
|
||||
const imageDir = path.join(options.cwd, CODEX_INSTRUCTIONS_DIR, IMAGE_TEMP_DIR);
|
||||
preExecArgs.push(CODEX_ADD_DIR_FLAG, imageDir);
|
||||
}
|
||||
|
||||
// Model is already bare (no prefix) - validated by executeQuery
|
||||
const codexCommand = isResumeQuery
|
||||
? [CODEX_EXEC_SUBCOMMAND, CODEX_RESUME_SUBCOMMAND]
|
||||
: [CODEX_EXEC_SUBCOMMAND];
|
||||
|
||||
const args = [
|
||||
CODEX_EXEC_SUBCOMMAND,
|
||||
...codexCommand,
|
||||
CODEX_YOLO_FLAG,
|
||||
CODEX_SKIP_GIT_REPO_CHECK_FLAG,
|
||||
...preExecArgs,
|
||||
@@ -846,6 +886,7 @@ export class CodexProvider extends BaseProvider {
|
||||
CODEX_JSON_FLAG,
|
||||
...configOverrideArgs,
|
||||
...(schemaPath ? [CODEX_OUTPUT_SCHEMA_FLAG, schemaPath] : []),
|
||||
...(options.sdkSessionId ? [options.sdkSessionId] : []),
|
||||
'-', // Read prompt from stdin to avoid shell escaping issues
|
||||
];
|
||||
|
||||
|
||||
@@ -30,6 +30,7 @@ import {
|
||||
type CopilotRuntimeModel,
|
||||
} from '@automaker/types';
|
||||
import { createLogger, isAbortError } from '@automaker/utils';
|
||||
import { resolveModelString } from '@automaker/model-resolver';
|
||||
import { CopilotClient, type PermissionRequest } from '@github/copilot-sdk';
|
||||
import {
|
||||
normalizeTodos,
|
||||
@@ -116,6 +117,12 @@ export interface CopilotError extends Error {
|
||||
suggestion?: string;
|
||||
}
|
||||
|
||||
type CopilotSession = Awaited<ReturnType<CopilotClient['createSession']>>;
|
||||
type CopilotSessionOptions = Parameters<CopilotClient['createSession']>[0];
|
||||
type ResumableCopilotClient = CopilotClient & {
|
||||
resumeSession?: (sessionId: string, options: CopilotSessionOptions) => Promise<CopilotSession>;
|
||||
};
|
||||
|
||||
// =============================================================================
|
||||
// Tool Name Normalization
|
||||
// =============================================================================
|
||||
@@ -382,9 +389,14 @@ export class CopilotProvider extends CliProvider {
|
||||
|
||||
case 'session.error': {
|
||||
const errorEvent = sdkEvent as SdkSessionErrorEvent;
|
||||
const enrichedError =
|
||||
errorEvent.data.message ||
|
||||
(errorEvent.data.code
|
||||
? `Copilot agent error (code: ${errorEvent.data.code})`
|
||||
: 'Copilot agent error');
|
||||
return {
|
||||
type: 'error',
|
||||
error: errorEvent.data.message || 'Unknown error',
|
||||
error: enrichedError,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -516,7 +528,11 @@ export class CopilotProvider extends CliProvider {
|
||||
}
|
||||
|
||||
const promptText = this.extractPromptText(options);
|
||||
const bareModel = options.model || DEFAULT_BARE_MODEL;
|
||||
// resolveModelString may return dash-separated canonical names (e.g. "claude-sonnet-4-6"),
|
||||
// but the Copilot SDK expects dot-separated version suffixes (e.g. "claude-sonnet-4.6").
|
||||
// Normalize by converting the last dash-separated numeric pair to dot notation.
|
||||
const resolvedModel = resolveModelString(options.model || DEFAULT_BARE_MODEL);
|
||||
const bareModel = resolvedModel.replace(/-(\d+)-(\d+)$/, '-$1.$2');
|
||||
const workingDirectory = options.cwd || process.cwd();
|
||||
|
||||
logger.debug(
|
||||
@@ -554,12 +570,14 @@ export class CopilotProvider extends CliProvider {
|
||||
});
|
||||
};
|
||||
|
||||
// Declare session outside try so it's accessible in the catch block for cleanup.
|
||||
let session: CopilotSession | undefined;
|
||||
|
||||
try {
|
||||
await client.start();
|
||||
logger.debug(`CopilotClient started with cwd: ${workingDirectory}`);
|
||||
|
||||
// Create session with streaming enabled for real-time events
|
||||
const session = await client.createSession({
|
||||
const sessionOptions: CopilotSessionOptions = {
|
||||
model: bareModel,
|
||||
streaming: true,
|
||||
// AUTONOMOUS MODE: Auto-approve all permission requests.
|
||||
@@ -572,13 +590,33 @@ export class CopilotProvider extends CliProvider {
|
||||
logger.debug(`Permission request: ${request.kind}`);
|
||||
return { kind: 'approved' };
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
const sessionId = session.sessionId;
|
||||
logger.debug(`Session created: ${sessionId}`);
|
||||
// Resume the previous Copilot session when possible; otherwise create a fresh one.
|
||||
const resumableClient = client as ResumableCopilotClient;
|
||||
let sessionResumed = false;
|
||||
if (options.sdkSessionId && typeof resumableClient.resumeSession === 'function') {
|
||||
try {
|
||||
session = await resumableClient.resumeSession(options.sdkSessionId, sessionOptions);
|
||||
sessionResumed = true;
|
||||
logger.debug(`Resumed Copilot session: ${session.sessionId}`);
|
||||
} catch (resumeError) {
|
||||
logger.warn(
|
||||
`Failed to resume Copilot session "${options.sdkSessionId}", creating a new session: ${resumeError}`
|
||||
);
|
||||
session = await client.createSession(sessionOptions);
|
||||
}
|
||||
} else {
|
||||
session = await client.createSession(sessionOptions);
|
||||
}
|
||||
|
||||
// session is always assigned by this point (both branches above assign it)
|
||||
const activeSession = session!;
|
||||
const sessionId = activeSession.sessionId;
|
||||
logger.debug(`Session ${sessionResumed ? 'resumed' : 'created'}: ${sessionId}`);
|
||||
|
||||
// Set up event handler to push events to queue
|
||||
session.on((event: SdkEvent) => {
|
||||
activeSession.on((event: SdkEvent) => {
|
||||
logger.debug(`SDK event: ${event.type}`);
|
||||
|
||||
if (event.type === 'session.idle') {
|
||||
@@ -596,7 +634,7 @@ export class CopilotProvider extends CliProvider {
|
||||
});
|
||||
|
||||
// Send the prompt (non-blocking)
|
||||
await session.send({ prompt: promptText });
|
||||
await activeSession.send({ prompt: promptText });
|
||||
|
||||
// Process events as they arrive
|
||||
while (!sessionComplete || eventQueue.length > 0) {
|
||||
@@ -604,7 +642,7 @@ export class CopilotProvider extends CliProvider {
|
||||
|
||||
// Check for errors first (before processing events to avoid race condition)
|
||||
if (sessionError) {
|
||||
await session.destroy();
|
||||
await activeSession.destroy();
|
||||
await client.stop();
|
||||
throw sessionError;
|
||||
}
|
||||
@@ -624,11 +662,19 @@ export class CopilotProvider extends CliProvider {
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
await session.destroy();
|
||||
await activeSession.destroy();
|
||||
await client.stop();
|
||||
logger.debug('CopilotClient stopped successfully');
|
||||
} catch (error) {
|
||||
// Ensure client is stopped on error
|
||||
// Ensure session is destroyed and client is stopped on error to prevent leaks.
|
||||
// The session may have been created/resumed before the error occurred.
|
||||
if (session) {
|
||||
try {
|
||||
await session.destroy();
|
||||
} catch (sessionCleanupError) {
|
||||
logger.debug(`Failed to destroy session during cleanup: ${sessionCleanupError}`);
|
||||
}
|
||||
}
|
||||
try {
|
||||
await client.stop();
|
||||
} catch (cleanupError) {
|
||||
|
||||
@@ -450,6 +450,11 @@ export class CursorProvider extends CliProvider {
|
||||
cliArgs.push('--model', model);
|
||||
}
|
||||
|
||||
// Resume an existing chat when a provider session ID is available
|
||||
if (options.sdkSessionId) {
|
||||
cliArgs.push('--resume', options.sdkSessionId);
|
||||
}
|
||||
|
||||
// Use '-' to indicate reading prompt from stdin
|
||||
cliArgs.push('-');
|
||||
|
||||
@@ -557,10 +562,14 @@ export class CursorProvider extends CliProvider {
|
||||
const resultEvent = cursorEvent as CursorResultEvent;
|
||||
|
||||
if (resultEvent.is_error) {
|
||||
const errorText = resultEvent.error || resultEvent.result || '';
|
||||
const enrichedError =
|
||||
errorText ||
|
||||
`Cursor agent failed (duration: ${resultEvent.duration_ms}ms, subtype: ${resultEvent.subtype}, session: ${resultEvent.session_id ?? 'none'})`;
|
||||
return {
|
||||
type: 'error',
|
||||
session_id: resultEvent.session_id,
|
||||
error: resultEvent.error || resultEvent.result || 'Unknown error',
|
||||
error: enrichedError,
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ import type {
|
||||
import { validateBareModelId } from '@automaker/types';
|
||||
import { GEMINI_MODEL_MAP, type GeminiAuthStatus } from '@automaker/types';
|
||||
import { createLogger, isAbortError } from '@automaker/utils';
|
||||
import { spawnJSONLProcess } from '@automaker/platform';
|
||||
import { spawnJSONLProcess, type SubprocessOptions } from '@automaker/platform';
|
||||
import { normalizeTodos } from './tool-normalization.js';
|
||||
|
||||
// Create logger for this module
|
||||
@@ -263,6 +263,14 @@ export class GeminiProvider extends CliProvider {
|
||||
// Use explicit approval-mode for clearer semantics
|
||||
cliArgs.push('--approval-mode', 'yolo');
|
||||
|
||||
// Force headless (non-interactive) mode with --prompt flag.
|
||||
// The actual prompt content is passed via stdin (see buildSubprocessOptions()),
|
||||
// but we MUST include -p to trigger headless mode. Without it, Gemini CLI
|
||||
// starts in interactive mode which adds significant startup overhead
|
||||
// (interactive REPL setup, extra context loading, etc.).
|
||||
// Per Gemini CLI docs: stdin content is "appended to" the -p value.
|
||||
cliArgs.push('--prompt', '');
|
||||
|
||||
// Explicitly include the working directory in allowed workspace directories
|
||||
// This ensures Gemini CLI allows file operations in the project directory,
|
||||
// even if it has a different workspace cached from a previous session
|
||||
@@ -270,13 +278,15 @@ export class GeminiProvider extends CliProvider {
|
||||
cliArgs.push('--include-directories', options.cwd);
|
||||
}
|
||||
|
||||
// Resume an existing Gemini session when one is available
|
||||
if (options.sdkSessionId) {
|
||||
cliArgs.push('--resume', options.sdkSessionId);
|
||||
}
|
||||
|
||||
// Note: Gemini CLI doesn't have a --thinking-level flag.
|
||||
// Thinking capabilities are determined by the model selection (e.g., gemini-2.5-pro).
|
||||
// The model handles thinking internally based on the task complexity.
|
||||
|
||||
// The prompt will be passed as the last positional argument
|
||||
// We'll append it in executeQuery after extracting the text
|
||||
|
||||
return cliArgs;
|
||||
}
|
||||
|
||||
@@ -371,10 +381,13 @@ export class GeminiProvider extends CliProvider {
|
||||
const resultEvent = geminiEvent as GeminiResultEvent;
|
||||
|
||||
if (resultEvent.status === 'error') {
|
||||
const enrichedError =
|
||||
resultEvent.error ||
|
||||
`Gemini agent failed (duration: ${resultEvent.stats?.duration_ms ?? 'unknown'}ms, session: ${resultEvent.session_id ?? 'none'})`;
|
||||
return {
|
||||
type: 'error',
|
||||
session_id: resultEvent.session_id,
|
||||
error: resultEvent.error || 'Unknown error',
|
||||
error: enrichedError,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -391,10 +404,12 @@ export class GeminiProvider extends CliProvider {
|
||||
|
||||
case 'error': {
|
||||
const errorEvent = geminiEvent as GeminiResultEvent;
|
||||
const enrichedError =
|
||||
errorEvent.error || `Gemini agent failed (session: ${errorEvent.session_id ?? 'none'})`;
|
||||
return {
|
||||
type: 'error',
|
||||
session_id: errorEvent.session_id,
|
||||
error: errorEvent.error || 'Unknown error',
|
||||
error: enrichedError,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -408,6 +423,32 @@ export class GeminiProvider extends CliProvider {
|
||||
// CliProvider Overrides
|
||||
// ==========================================================================
|
||||
|
||||
/**
|
||||
* Build subprocess options with stdin data for prompt and speed-optimized env vars.
|
||||
*
|
||||
* Passes the prompt via stdin instead of --prompt CLI arg to:
|
||||
* - Avoid shell argument size limits with large prompts (system prompt + context)
|
||||
* - Avoid shell escaping issues with special characters in prompts
|
||||
* - Match the pattern used by Cursor, OpenCode, and Codex providers
|
||||
*
|
||||
* Also injects environment variables to reduce Gemini CLI startup overhead:
|
||||
* - GEMINI_TELEMETRY_ENABLED=false: Disables OpenTelemetry collection
|
||||
*/
|
||||
protected buildSubprocessOptions(options: ExecuteOptions, cliArgs: string[]): SubprocessOptions {
|
||||
const subprocessOptions = super.buildSubprocessOptions(options, cliArgs);
|
||||
|
||||
// Pass prompt via stdin to avoid shell interpretation of special characters
|
||||
// and shell argument size limits with large system prompts + context files
|
||||
subprocessOptions.stdinData = this.extractPromptText(options);
|
||||
|
||||
// Disable telemetry to reduce startup overhead
|
||||
if (subprocessOptions.env) {
|
||||
subprocessOptions.env['GEMINI_TELEMETRY_ENABLED'] = 'false';
|
||||
}
|
||||
|
||||
return subprocessOptions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Override error mapping for Gemini-specific error codes
|
||||
*/
|
||||
@@ -517,14 +558,21 @@ export class GeminiProvider extends CliProvider {
|
||||
);
|
||||
}
|
||||
|
||||
// Extract prompt text to pass as positional argument
|
||||
const promptText = this.extractPromptText(options);
|
||||
// Ensure .geminiignore exists in the working directory to prevent Gemini CLI
|
||||
// from scanning .git and node_modules directories during startup. This reduces
|
||||
// startup time significantly (reported: 35s → 11s) by skipping large directories
|
||||
// that Gemini CLI would otherwise traverse for context discovery.
|
||||
await this.ensureGeminiIgnore(options.cwd || process.cwd());
|
||||
|
||||
// Build CLI args and append the prompt as the last positional argument
|
||||
const cliArgs = this.buildCliArgs(options);
|
||||
cliArgs.push(promptText); // Gemini CLI uses positional args for the prompt
|
||||
// Embed system prompt into the user prompt so Gemini CLI receives
|
||||
// project context (CLAUDE.md, CODE_QUALITY.md, etc.) that would
|
||||
// otherwise be silently dropped since Gemini CLI has no --system-prompt flag.
|
||||
const effectiveOptions = this.embedSystemPromptIntoPrompt(options);
|
||||
|
||||
const subprocessOptions = this.buildSubprocessOptions(options, cliArgs);
|
||||
// Build CLI args for headless execution.
|
||||
const cliArgs = this.buildCliArgs(effectiveOptions);
|
||||
|
||||
const subprocessOptions = this.buildSubprocessOptions(effectiveOptions, cliArgs);
|
||||
|
||||
let sessionId: string | undefined;
|
||||
|
||||
@@ -577,6 +625,49 @@ export class GeminiProvider extends CliProvider {
|
||||
// Gemini-Specific Methods
|
||||
// ==========================================================================
|
||||
|
||||
/**
|
||||
* Ensure a .geminiignore file exists in the working directory.
|
||||
*
|
||||
* Gemini CLI scans the working directory for context discovery during startup.
|
||||
* Excluding .git and node_modules dramatically reduces startup time by preventing
|
||||
* traversal of large directories (reported improvement: 35s → 11s).
|
||||
*
|
||||
* Only creates the file if it doesn't already exist to avoid overwriting user config.
|
||||
*/
|
||||
private async ensureGeminiIgnore(cwd: string): Promise<void> {
|
||||
const ignorePath = path.join(cwd, '.geminiignore');
|
||||
const content = [
|
||||
'# Auto-generated by Automaker to speed up Gemini CLI startup',
|
||||
'# Prevents Gemini CLI from scanning large directories during context discovery',
|
||||
'.git',
|
||||
'node_modules',
|
||||
'dist',
|
||||
'build',
|
||||
'.next',
|
||||
'.nuxt',
|
||||
'coverage',
|
||||
'.automaker',
|
||||
'.worktrees',
|
||||
'.vscode',
|
||||
'.idea',
|
||||
'*.lock',
|
||||
'',
|
||||
].join('\n');
|
||||
try {
|
||||
// Use 'wx' flag for atomic creation - fails if file exists (EEXIST)
|
||||
await fs.writeFile(ignorePath, content, { encoding: 'utf-8', flag: 'wx' });
|
||||
logger.debug(`Created .geminiignore at ${ignorePath}`);
|
||||
} catch (writeError) {
|
||||
// EEXIST means file already exists - that's fine, preserve user's file
|
||||
if ((writeError as NodeJS.ErrnoException).code === 'EEXIST') {
|
||||
logger.debug(`.geminiignore already exists at ${ignorePath}, preserving existing file`);
|
||||
return;
|
||||
}
|
||||
// Non-fatal: startup will just be slower without the ignore file
|
||||
logger.debug(`Failed to create .geminiignore: ${writeError}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a GeminiError with details
|
||||
*/
|
||||
|
||||
@@ -19,10 +19,11 @@ export function createAnalyzeProjectHandler(autoModeService: AutoModeServiceComp
|
||||
return;
|
||||
}
|
||||
|
||||
// Start analysis in background
|
||||
autoModeService.analyzeProject(projectPath).catch((error) => {
|
||||
logger.error(`[AutoMode] Project analysis error:`, error);
|
||||
});
|
||||
// Kick off analysis in the background; attach a rejection handler so
|
||||
// unhandled-promise warnings don't surface and errors are at least logged.
|
||||
// Synchronous throws (e.g. "not implemented") still propagate here.
|
||||
const analysisPromise = autoModeService.analyzeProject(projectPath);
|
||||
analysisPromise.catch((err) => logError(err, 'Background analyzeProject failed'));
|
||||
|
||||
res.json({ success: true, message: 'Project analysis started' });
|
||||
} catch (error) {
|
||||
|
||||
@@ -26,23 +26,9 @@ export function createRunFeatureHandler(autoModeService: AutoModeServiceCompat)
|
||||
return;
|
||||
}
|
||||
|
||||
// Check per-worktree capacity before starting
|
||||
const capacity = await autoModeService.checkWorktreeCapacity(projectPath, featureId);
|
||||
if (!capacity.hasCapacity) {
|
||||
const worktreeDesc = capacity.branchName
|
||||
? `worktree "${capacity.branchName}"`
|
||||
: 'main worktree';
|
||||
res.status(429).json({
|
||||
success: false,
|
||||
error: `Agent limit reached for ${worktreeDesc} (${capacity.currentAgents}/${capacity.maxAgents}). Wait for running tasks to complete or increase the limit.`,
|
||||
details: {
|
||||
currentAgents: capacity.currentAgents,
|
||||
maxAgents: capacity.maxAgents,
|
||||
branchName: capacity.branchName,
|
||||
},
|
||||
});
|
||||
return;
|
||||
}
|
||||
// Note: No concurrency limit check here. Manual feature starts always run
|
||||
// immediately and bypass the concurrency limit. Their presence IS counted
|
||||
// by the auto-loop coordinator when deciding whether to dispatch new auto-mode tasks.
|
||||
|
||||
// Start execution in background
|
||||
// executeFeature derives workDir from feature.branchName
|
||||
|
||||
@@ -114,9 +114,20 @@ export function mapBacklogPlanError(rawMessage: string): string {
|
||||
return 'Claude CLI could not be launched. Make sure the Claude CLI is installed and available in PATH, or check that Node.js is correctly installed. Try running "which claude" or "claude --version" in your terminal to verify.';
|
||||
}
|
||||
|
||||
// Claude Code process crash
|
||||
// Claude Code process crash - extract exit code for diagnostics
|
||||
if (rawMessage.includes('Claude Code process exited')) {
|
||||
return 'Claude exited unexpectedly. Try again. If it keeps happening, re-run `claude login` or update your API key in Setup.';
|
||||
const exitCodeMatch = rawMessage.match(/exited with code (\d+)/);
|
||||
const exitCode = exitCodeMatch ? exitCodeMatch[1] : 'unknown';
|
||||
logger.error(`[BacklogPlan] Claude process exit code: ${exitCode}`);
|
||||
return `Claude exited unexpectedly (exit code: ${exitCode}). This is usually a transient issue. Try again. If it keeps happening, re-run \`claude login\` or update your API key in Setup.`;
|
||||
}
|
||||
|
||||
// Claude Code process killed by signal
|
||||
if (rawMessage.includes('Claude Code process terminated by signal')) {
|
||||
const signalMatch = rawMessage.match(/terminated by signal (\w+)/);
|
||||
const signal = signalMatch ? signalMatch[1] : 'unknown';
|
||||
logger.error(`[BacklogPlan] Claude process terminated by signal: ${signal}`);
|
||||
return `Claude was terminated by signal ${signal}. This may indicate a resource issue. Try again.`;
|
||||
}
|
||||
|
||||
// Rate limiting
|
||||
|
||||
@@ -3,6 +3,9 @@
|
||||
*
|
||||
* Model is configurable via phaseModels.backlogPlanningModel in settings
|
||||
* (defaults to Sonnet). Can be overridden per-call via model parameter.
|
||||
*
|
||||
* Includes automatic retry for transient CLI failures (e.g., "Claude Code
|
||||
* process exited unexpectedly") to improve reliability.
|
||||
*/
|
||||
|
||||
import type { EventEmitter } from '../../lib/events.js';
|
||||
@@ -12,8 +15,10 @@ import {
|
||||
isCursorModel,
|
||||
stripProviderPrefix,
|
||||
type ThinkingLevel,
|
||||
type SystemPromptPreset,
|
||||
} from '@automaker/types';
|
||||
import { resolvePhaseModel } from '@automaker/model-resolver';
|
||||
import { getCurrentBranch } from '@automaker/git-utils';
|
||||
import { FeatureLoader } from '../../services/feature-loader.js';
|
||||
import { ProviderFactory } from '../../providers/provider-factory.js';
|
||||
import { extractJsonWithArray } from '../../lib/json-extractor.js';
|
||||
@@ -27,10 +32,28 @@ import {
|
||||
import type { SettingsService } from '../../services/settings-service.js';
|
||||
import {
|
||||
getAutoLoadClaudeMdSetting,
|
||||
getUseClaudeCodeSystemPromptSetting,
|
||||
getPromptCustomization,
|
||||
getPhaseModelWithOverrides,
|
||||
getProviderByModelId,
|
||||
} from '../../lib/settings-helpers.js';
|
||||
|
||||
/** Maximum number of retry attempts for transient CLI failures */
|
||||
const MAX_RETRIES = 2;
|
||||
/** Delay between retries in milliseconds */
|
||||
const RETRY_DELAY_MS = 2000;
|
||||
|
||||
/**
|
||||
* Check if an error is retryable (transient CLI process failure)
|
||||
*/
|
||||
function isRetryableError(error: unknown): boolean {
|
||||
const message = error instanceof Error ? error.message : String(error);
|
||||
return (
|
||||
message.includes('Claude Code process exited') ||
|
||||
message.includes('Claude Code process terminated by signal')
|
||||
);
|
||||
}
|
||||
|
||||
const featureLoader = new FeatureLoader();
|
||||
|
||||
/**
|
||||
@@ -84,6 +107,53 @@ function parsePlanResponse(response: string): BacklogPlanResult {
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to parse a valid plan response without fallback behavior.
|
||||
* Returns null if parsing fails.
|
||||
*/
|
||||
function tryParsePlanResponse(response: string): BacklogPlanResult | null {
|
||||
if (!response || response.trim().length === 0) {
|
||||
return null;
|
||||
}
|
||||
return extractJsonWithArray<BacklogPlanResult>(response, 'changes', { logger });
|
||||
}
|
||||
|
||||
/**
|
||||
* Choose the most reliable response text between streamed assistant chunks
|
||||
* and provider final result payload.
|
||||
*/
|
||||
function selectBestResponseText(accumulatedText: string, providerResultText: string): string {
|
||||
const hasAccumulated = accumulatedText.trim().length > 0;
|
||||
const hasProviderResult = providerResultText.trim().length > 0;
|
||||
|
||||
if (!hasProviderResult) {
|
||||
return accumulatedText;
|
||||
}
|
||||
if (!hasAccumulated) {
|
||||
return providerResultText;
|
||||
}
|
||||
|
||||
const accumulatedParsed = tryParsePlanResponse(accumulatedText);
|
||||
const providerParsed = tryParsePlanResponse(providerResultText);
|
||||
|
||||
if (providerParsed && !accumulatedParsed) {
|
||||
logger.info('[BacklogPlan] Using provider result (parseable JSON)');
|
||||
return providerResultText;
|
||||
}
|
||||
if (accumulatedParsed && !providerParsed) {
|
||||
logger.info('[BacklogPlan] Keeping accumulated text (parseable JSON)');
|
||||
return accumulatedText;
|
||||
}
|
||||
|
||||
if (providerResultText.length > accumulatedText.length) {
|
||||
logger.info('[BacklogPlan] Using provider result (longer content)');
|
||||
return providerResultText;
|
||||
}
|
||||
|
||||
logger.info('[BacklogPlan] Keeping accumulated text (longer content)');
|
||||
return accumulatedText;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a backlog modification plan based on user prompt
|
||||
*/
|
||||
@@ -93,11 +163,40 @@ export async function generateBacklogPlan(
|
||||
events: EventEmitter,
|
||||
abortController: AbortController,
|
||||
settingsService?: SettingsService,
|
||||
model?: string
|
||||
model?: string,
|
||||
branchName?: string
|
||||
): Promise<BacklogPlanResult> {
|
||||
try {
|
||||
// Load current features
|
||||
const features = await featureLoader.getAll(projectPath);
|
||||
const allFeatures = await featureLoader.getAll(projectPath);
|
||||
|
||||
// Filter features by branch if specified (worktree-scoped backlog)
|
||||
let features: Feature[];
|
||||
if (branchName) {
|
||||
// Determine the primary branch so unassigned features show for the main worktree
|
||||
let primaryBranch: string | null = null;
|
||||
try {
|
||||
primaryBranch = await getCurrentBranch(projectPath);
|
||||
} catch {
|
||||
// If git fails, fall back to 'main' so unassigned features are visible
|
||||
// when branchName matches a common default branch name
|
||||
primaryBranch = 'main';
|
||||
}
|
||||
const isMainBranch = branchName === primaryBranch;
|
||||
|
||||
features = allFeatures.filter((f) => {
|
||||
if (!f.branchName) {
|
||||
// Unassigned features belong to the main/primary worktree
|
||||
return isMainBranch;
|
||||
}
|
||||
return f.branchName === branchName;
|
||||
});
|
||||
logger.info(
|
||||
`[BacklogPlan] Filtered to ${features.length}/${allFeatures.length} features for branch: ${branchName}`
|
||||
);
|
||||
} else {
|
||||
features = allFeatures;
|
||||
}
|
||||
|
||||
events.emit('backlog-plan:event', {
|
||||
type: 'backlog_plan_progress',
|
||||
@@ -133,6 +232,35 @@ export async function generateBacklogPlan(
|
||||
effectiveModel = resolved.model;
|
||||
thinkingLevel = resolved.thinkingLevel;
|
||||
credentials = await settingsService?.getCredentials();
|
||||
// Resolve Claude-compatible provider when client sends a model (e.g. MiniMax, GLM)
|
||||
if (settingsService) {
|
||||
const providerResult = await getProviderByModelId(
|
||||
effectiveModel,
|
||||
settingsService,
|
||||
'[BacklogPlan]'
|
||||
);
|
||||
if (providerResult.provider) {
|
||||
claudeCompatibleProvider = providerResult.provider;
|
||||
if (providerResult.credentials) {
|
||||
credentials = providerResult.credentials;
|
||||
}
|
||||
}
|
||||
// Fallback: use phase settings provider if model lookup found nothing (e.g. model
|
||||
// string format differs from provider's model id, but backlog planning phase has providerId).
|
||||
if (!claudeCompatibleProvider) {
|
||||
const phaseResult = await getPhaseModelWithOverrides(
|
||||
'backlogPlanningModel',
|
||||
settingsService,
|
||||
projectPath,
|
||||
'[BacklogPlan]'
|
||||
);
|
||||
const phaseResolved = resolvePhaseModel(phaseResult.phaseModel);
|
||||
if (phaseResult.provider && phaseResolved.model === effectiveModel) {
|
||||
claudeCompatibleProvider = phaseResult.provider;
|
||||
credentials = phaseResult.credentials ?? credentials;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (settingsService) {
|
||||
// Use settings-based model with provider info
|
||||
const phaseResult = await getPhaseModelWithOverrides(
|
||||
@@ -162,17 +290,23 @@ export async function generateBacklogPlan(
|
||||
// Strip provider prefix - providers expect bare model IDs
|
||||
const bareModel = stripProviderPrefix(effectiveModel);
|
||||
|
||||
// Get autoLoadClaudeMd setting
|
||||
// Get autoLoadClaudeMd and useClaudeCodeSystemPrompt settings
|
||||
const autoLoadClaudeMd = await getAutoLoadClaudeMdSetting(
|
||||
projectPath,
|
||||
settingsService,
|
||||
'[BacklogPlan]'
|
||||
);
|
||||
const useClaudeCodeSystemPrompt = await getUseClaudeCodeSystemPromptSetting(
|
||||
projectPath,
|
||||
settingsService,
|
||||
'[BacklogPlan]'
|
||||
);
|
||||
|
||||
// For Cursor models, we need to combine prompts with explicit instructions
|
||||
// because Cursor doesn't support systemPrompt separation like Claude SDK
|
||||
let finalPrompt = userPrompt;
|
||||
let finalSystemPrompt: string | undefined = systemPrompt;
|
||||
let finalSystemPrompt: string | SystemPromptPreset | undefined = systemPrompt;
|
||||
let finalSettingSources: Array<'user' | 'project' | 'local'> | undefined;
|
||||
|
||||
if (isCursorModel(effectiveModel)) {
|
||||
logger.info('[BacklogPlan] Using Cursor model - adding explicit no-file-write instructions');
|
||||
@@ -187,54 +321,145 @@ CRITICAL INSTRUCTIONS:
|
||||
|
||||
${userPrompt}`;
|
||||
finalSystemPrompt = undefined; // System prompt is now embedded in the user prompt
|
||||
} else if (claudeCompatibleProvider) {
|
||||
// Claude-compatible providers (MiniMax, GLM, etc.) use a plain API; do not use
|
||||
// the claude_code preset (which is for Claude CLI/subprocess and can break the request).
|
||||
finalSystemPrompt = systemPrompt;
|
||||
} else if (useClaudeCodeSystemPrompt) {
|
||||
// Use claude_code preset for native Claude so the SDK subprocess
|
||||
// authenticates via CLI OAuth or API key the same way all other SDK calls do.
|
||||
finalSystemPrompt = {
|
||||
type: 'preset',
|
||||
preset: 'claude_code',
|
||||
append: systemPrompt,
|
||||
};
|
||||
}
|
||||
// Include settingSources when autoLoadClaudeMd is enabled
|
||||
if (autoLoadClaudeMd) {
|
||||
finalSettingSources = ['user', 'project'];
|
||||
}
|
||||
|
||||
// Execute the query
|
||||
const stream = provider.executeQuery({
|
||||
// Execute the query with retry logic for transient CLI failures
|
||||
const queryOptions = {
|
||||
prompt: finalPrompt,
|
||||
model: bareModel,
|
||||
cwd: projectPath,
|
||||
systemPrompt: finalSystemPrompt,
|
||||
maxTurns: 1,
|
||||
allowedTools: [], // No tools needed for this
|
||||
tools: [] as string[], // Disable all built-in tools - plan generation only needs text output
|
||||
abortController,
|
||||
settingSources: autoLoadClaudeMd ? ['user', 'project'] : undefined,
|
||||
readOnly: true, // Plan generation only generates text, doesn't write files
|
||||
settingSources: finalSettingSources,
|
||||
thinkingLevel, // Pass thinking level for extended thinking
|
||||
claudeCompatibleProvider, // Pass provider for alternative endpoint configuration
|
||||
credentials, // Pass credentials for resolving 'credentials' apiKeySource
|
||||
});
|
||||
};
|
||||
|
||||
let responseText = '';
|
||||
let bestResponseText = ''; // Preserve best response across all retry attempts
|
||||
let recoveredResult: BacklogPlanResult | null = null;
|
||||
let lastError: unknown = null;
|
||||
|
||||
for await (const msg of stream) {
|
||||
for (let attempt = 0; attempt <= MAX_RETRIES; attempt++) {
|
||||
if (abortController.signal.aborted) {
|
||||
throw new Error('Generation aborted');
|
||||
}
|
||||
|
||||
if (msg.type === 'assistant') {
|
||||
if (msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === 'text') {
|
||||
responseText += block.text;
|
||||
if (attempt > 0) {
|
||||
logger.info(
|
||||
`[BacklogPlan] Retry attempt ${attempt}/${MAX_RETRIES} after transient failure`
|
||||
);
|
||||
events.emit('backlog-plan:event', {
|
||||
type: 'backlog_plan_progress',
|
||||
content: `Retrying... (attempt ${attempt + 1}/${MAX_RETRIES + 1})`,
|
||||
});
|
||||
await new Promise((resolve) => setTimeout(resolve, RETRY_DELAY_MS));
|
||||
}
|
||||
|
||||
let accumulatedText = '';
|
||||
let providerResultText = '';
|
||||
|
||||
try {
|
||||
const stream = provider.executeQuery(queryOptions);
|
||||
|
||||
for await (const msg of stream) {
|
||||
if (abortController.signal.aborted) {
|
||||
throw new Error('Generation aborted');
|
||||
}
|
||||
|
||||
if (msg.type === 'assistant') {
|
||||
if (msg.message?.content) {
|
||||
for (const block of msg.message.content) {
|
||||
if (block.type === 'text') {
|
||||
accumulatedText += block.text;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (msg.type === 'result' && msg.subtype === 'success' && msg.result) {
|
||||
providerResultText = msg.result;
|
||||
logger.info(
|
||||
'[BacklogPlan] Received result from provider, length:',
|
||||
providerResultText.length
|
||||
);
|
||||
logger.info('[BacklogPlan] Accumulated response length:', accumulatedText.length);
|
||||
}
|
||||
}
|
||||
} else if (msg.type === 'result' && msg.subtype === 'success' && msg.result) {
|
||||
// Use result if it's a final accumulated message (from Cursor provider)
|
||||
logger.info('[BacklogPlan] Received result from Cursor, length:', msg.result.length);
|
||||
logger.info('[BacklogPlan] Previous responseText length:', responseText.length);
|
||||
if (msg.result.length > responseText.length) {
|
||||
logger.info('[BacklogPlan] Using Cursor result (longer than accumulated text)');
|
||||
responseText = msg.result;
|
||||
} else {
|
||||
logger.info('[BacklogPlan] Keeping accumulated text (longer than Cursor result)');
|
||||
|
||||
responseText = selectBestResponseText(accumulatedText, providerResultText);
|
||||
|
||||
// If we got here, the stream completed successfully
|
||||
lastError = null;
|
||||
break;
|
||||
} catch (error) {
|
||||
lastError = error;
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
responseText = selectBestResponseText(accumulatedText, providerResultText);
|
||||
|
||||
// Preserve the best response text across all attempts so that if a retry
|
||||
// crashes immediately (empty response), we can still recover from an earlier attempt
|
||||
bestResponseText = selectBestResponseText(bestResponseText, responseText);
|
||||
|
||||
// Claude SDK can occasionally exit non-zero after emitting a complete response.
|
||||
// If we already have valid JSON, recover instead of failing the entire planning flow.
|
||||
if (isRetryableError(error)) {
|
||||
const parsed = tryParsePlanResponse(bestResponseText);
|
||||
if (parsed) {
|
||||
logger.warn(
|
||||
'[BacklogPlan] Recovered from transient CLI exit using accumulated valid response'
|
||||
);
|
||||
recoveredResult = parsed;
|
||||
lastError = null;
|
||||
break;
|
||||
}
|
||||
|
||||
// On final retryable failure, degrade gracefully if we have text from any attempt.
|
||||
if (attempt >= MAX_RETRIES && bestResponseText.trim().length > 0) {
|
||||
logger.warn(
|
||||
'[BacklogPlan] Final retryable CLI failure with non-empty response, attempting fallback parse'
|
||||
);
|
||||
recoveredResult = parsePlanResponse(bestResponseText);
|
||||
lastError = null;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Only retry on transient CLI failures, not on user aborts or other errors
|
||||
if (!isRetryableError(error) || attempt >= MAX_RETRIES) {
|
||||
throw error;
|
||||
}
|
||||
|
||||
logger.warn(
|
||||
`[BacklogPlan] Transient CLI failure (attempt ${attempt + 1}/${MAX_RETRIES + 1}): ${errorMessage}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// If we exhausted retries, throw the last error
|
||||
if (lastError) {
|
||||
throw lastError;
|
||||
}
|
||||
|
||||
// Parse the response
|
||||
const result = parsePlanResponse(responseText);
|
||||
const result = recoveredResult ?? parsePlanResponse(responseText);
|
||||
|
||||
await saveBacklogPlan(projectPath, {
|
||||
savedAt: new Date().toISOString(),
|
||||
|
||||
@@ -17,10 +17,11 @@ import type { SettingsService } from '../../../services/settings-service.js';
|
||||
export function createGenerateHandler(events: EventEmitter, settingsService?: SettingsService) {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { projectPath, prompt, model } = req.body as {
|
||||
const { projectPath, prompt, model, branchName } = req.body as {
|
||||
projectPath: string;
|
||||
prompt: string;
|
||||
model?: string;
|
||||
branchName?: string;
|
||||
};
|
||||
|
||||
if (!projectPath) {
|
||||
@@ -42,28 +43,30 @@ export function createGenerateHandler(events: EventEmitter, settingsService?: Se
|
||||
return;
|
||||
}
|
||||
|
||||
setRunningState(true);
|
||||
const abortController = new AbortController();
|
||||
setRunningState(true, abortController);
|
||||
setRunningDetails({
|
||||
projectPath,
|
||||
prompt,
|
||||
model,
|
||||
startedAt: new Date().toISOString(),
|
||||
});
|
||||
const abortController = new AbortController();
|
||||
setRunningState(true, abortController);
|
||||
|
||||
// Start generation in background
|
||||
// Note: generateBacklogPlan handles its own error event emission,
|
||||
// so we only log here to avoid duplicate error toasts
|
||||
generateBacklogPlan(projectPath, prompt, events, abortController, settingsService, model)
|
||||
.catch((error) => {
|
||||
// Just log - error event already emitted by generateBacklogPlan
|
||||
logError(error, 'Generate backlog plan failed (background)');
|
||||
})
|
||||
.finally(() => {
|
||||
setRunningState(false, null);
|
||||
setRunningDetails(null);
|
||||
});
|
||||
// Note: generateBacklogPlan handles its own error event emission
|
||||
// and state cleanup in its finally block, so we only log here
|
||||
generateBacklogPlan(
|
||||
projectPath,
|
||||
prompt,
|
||||
events,
|
||||
abortController,
|
||||
settingsService,
|
||||
model,
|
||||
branchName
|
||||
).catch((error) => {
|
||||
// Just log - error event already emitted by generateBacklogPlan
|
||||
logError(error, 'Generate backlog plan failed (background)');
|
||||
});
|
||||
|
||||
res.json({ success: true });
|
||||
} catch (error) {
|
||||
|
||||
@@ -142,11 +142,33 @@ function mapDescribeImageError(rawMessage: string | undefined): {
|
||||
|
||||
if (!rawMessage) return baseResponse;
|
||||
|
||||
if (rawMessage.includes('Claude Code process exited')) {
|
||||
if (
|
||||
rawMessage.includes('Claude Code process exited') ||
|
||||
rawMessage.includes('Claude Code process terminated by signal')
|
||||
) {
|
||||
const exitCodeMatch = rawMessage.match(/exited with code (\d+)/);
|
||||
const signalMatch = rawMessage.match(/terminated by signal (\w+)/);
|
||||
const detail = exitCodeMatch
|
||||
? ` (exit code: ${exitCodeMatch[1]})`
|
||||
: signalMatch
|
||||
? ` (signal: ${signalMatch[1]})`
|
||||
: '';
|
||||
|
||||
// Crash/OS-kill signals suggest a process crash, not an auth failure —
|
||||
// omit auth recovery advice and suggest retry/reporting instead.
|
||||
const crashSignals = ['SIGSEGV', 'SIGABRT', 'SIGKILL', 'SIGBUS', 'SIGTRAP'];
|
||||
const isCrashSignal = signalMatch ? crashSignals.includes(signalMatch[1]) : false;
|
||||
|
||||
if (isCrashSignal) {
|
||||
return {
|
||||
statusCode: 503,
|
||||
userMessage: `Claude crashed unexpectedly${detail} while describing the image. This may be a transient condition. Please try again. If the problem persists, collect logs and report the issue.`,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
statusCode: 503,
|
||||
userMessage:
|
||||
'Claude exited unexpectedly while describing the image. Try again. If it keeps happening, re-run `claude login` or update your API key in Setup so Claude can restart cleanly.',
|
||||
userMessage: `Claude exited unexpectedly${detail} while describing the image. This is usually a transient issue. Try again. If it keeps happening, re-run \`claude login\` or update your API key in Setup.`,
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -33,13 +33,22 @@ export function createFeaturesRoutes(
|
||||
validatePathParams('projectPath'),
|
||||
createListHandler(featureLoader, autoModeService)
|
||||
);
|
||||
router.get(
|
||||
'/list',
|
||||
validatePathParams('projectPath'),
|
||||
createListHandler(featureLoader, autoModeService)
|
||||
);
|
||||
router.post('/get', validatePathParams('projectPath'), createGetHandler(featureLoader));
|
||||
router.post(
|
||||
'/create',
|
||||
validatePathParams('projectPath'),
|
||||
createCreateHandler(featureLoader, events)
|
||||
);
|
||||
router.post('/update', validatePathParams('projectPath'), createUpdateHandler(featureLoader));
|
||||
router.post(
|
||||
'/update',
|
||||
validatePathParams('projectPath'),
|
||||
createUpdateHandler(featureLoader, events)
|
||||
);
|
||||
router.post(
|
||||
'/bulk-update',
|
||||
validatePathParams('projectPath'),
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
/**
|
||||
* POST /list endpoint - List all features for a project
|
||||
* POST/GET /list endpoint - List all features for a project
|
||||
*
|
||||
* projectPath may come from req.body (POST) or req.query (GET fallback).
|
||||
*
|
||||
* Also performs orphan detection when a project is loaded to identify
|
||||
* features whose branches no longer exist. This runs on every project load/switch.
|
||||
@@ -19,7 +21,17 @@ export function createListHandler(
|
||||
) {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { projectPath } = req.body as { projectPath: string };
|
||||
const bodyProjectPath =
|
||||
typeof req.body === 'object' && req.body !== null
|
||||
? (req.body as { projectPath?: unknown }).projectPath
|
||||
: undefined;
|
||||
const queryProjectPath = req.query.projectPath;
|
||||
const projectPath =
|
||||
typeof bodyProjectPath === 'string'
|
||||
? bodyProjectPath
|
||||
: typeof queryProjectPath === 'string'
|
||||
? queryProjectPath
|
||||
: undefined;
|
||||
|
||||
if (!projectPath) {
|
||||
res.status(400).json({ success: false, error: 'projectPath is required' });
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
import type { Request, Response } from 'express';
|
||||
import { FeatureLoader } from '../../../services/feature-loader.js';
|
||||
import type { Feature, FeatureStatus } from '@automaker/types';
|
||||
import type { EventEmitter } from '../../../lib/events.js';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
|
||||
@@ -13,7 +14,7 @@ const logger = createLogger('features/update');
|
||||
// Statuses that should trigger syncing to app_spec.txt
|
||||
const SYNC_TRIGGER_STATUSES: FeatureStatus[] = ['verified', 'completed'];
|
||||
|
||||
export function createUpdateHandler(featureLoader: FeatureLoader) {
|
||||
export function createUpdateHandler(featureLoader: FeatureLoader, events?: EventEmitter) {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const {
|
||||
@@ -54,8 +55,18 @@ export function createUpdateHandler(featureLoader: FeatureLoader) {
|
||||
preEnhancementDescription
|
||||
);
|
||||
|
||||
// Trigger sync to app_spec.txt when status changes to verified or completed
|
||||
// Emit completion event and sync to app_spec.txt when status transitions to verified/completed
|
||||
if (newStatus && SYNC_TRIGGER_STATUSES.includes(newStatus) && previousStatus !== newStatus) {
|
||||
events?.emit('feature:completed', {
|
||||
featureId,
|
||||
featureName: updated.title,
|
||||
projectPath,
|
||||
passes: true,
|
||||
message:
|
||||
newStatus === 'verified' ? 'Feature verified manually' : 'Feature completed manually',
|
||||
executionMode: 'manual',
|
||||
});
|
||||
|
||||
try {
|
||||
const synced = await featureLoader.syncFeatureToAppSpec(projectPath, updated);
|
||||
if (synced) {
|
||||
|
||||
@@ -20,6 +20,9 @@ import { createImageHandler } from './routes/image.js';
|
||||
import { createSaveBoardBackgroundHandler } from './routes/save-board-background.js';
|
||||
import { createDeleteBoardBackgroundHandler } from './routes/delete-board-background.js';
|
||||
import { createBrowseProjectFilesHandler } from './routes/browse-project-files.js';
|
||||
import { createCopyHandler } from './routes/copy.js';
|
||||
import { createMoveHandler } from './routes/move.js';
|
||||
import { createDownloadHandler } from './routes/download.js';
|
||||
|
||||
export function createFsRoutes(_events: EventEmitter): Router {
|
||||
const router = Router();
|
||||
@@ -39,6 +42,9 @@ export function createFsRoutes(_events: EventEmitter): Router {
|
||||
router.post('/save-board-background', createSaveBoardBackgroundHandler());
|
||||
router.post('/delete-board-background', createDeleteBoardBackgroundHandler());
|
||||
router.post('/browse-project-files', createBrowseProjectFilesHandler());
|
||||
router.post('/copy', createCopyHandler());
|
||||
router.post('/move', createMoveHandler());
|
||||
router.post('/download', createDownloadHandler());
|
||||
|
||||
return router;
|
||||
}
|
||||
|
||||
99
apps/server/src/routes/fs/routes/copy.ts
Normal file
99
apps/server/src/routes/fs/routes/copy.ts
Normal file
@@ -0,0 +1,99 @@
|
||||
/**
|
||||
* POST /copy endpoint - Copy file or directory to a new location
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import * as secureFs from '../../../lib/secure-fs.js';
|
||||
import path from 'path';
|
||||
import { PathNotAllowedError } from '@automaker/platform';
|
||||
import { mkdirSafe } from '@automaker/utils';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
|
||||
/**
|
||||
* Recursively copy a directory and its contents
|
||||
*/
|
||||
async function copyDirectoryRecursive(src: string, dest: string): Promise<void> {
|
||||
await mkdirSafe(dest);
|
||||
const entries = await secureFs.readdir(src, { withFileTypes: true });
|
||||
|
||||
for (const entry of entries) {
|
||||
const srcPath = path.join(src, entry.name);
|
||||
const destPath = path.join(dest, entry.name);
|
||||
|
||||
if (entry.isDirectory()) {
|
||||
await copyDirectoryRecursive(srcPath, destPath);
|
||||
} else {
|
||||
await secureFs.copyFile(srcPath, destPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export function createCopyHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { sourcePath, destinationPath, overwrite } = req.body as {
|
||||
sourcePath: string;
|
||||
destinationPath: string;
|
||||
overwrite?: boolean;
|
||||
};
|
||||
|
||||
if (!sourcePath || !destinationPath) {
|
||||
res
|
||||
.status(400)
|
||||
.json({ success: false, error: 'sourcePath and destinationPath are required' });
|
||||
return;
|
||||
}
|
||||
|
||||
// Prevent copying a folder into itself or its own descendant (infinite recursion)
|
||||
const resolvedSrc = path.resolve(sourcePath);
|
||||
const resolvedDest = path.resolve(destinationPath);
|
||||
if (resolvedDest === resolvedSrc || resolvedDest.startsWith(resolvedSrc + path.sep)) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'Cannot copy a folder into itself or one of its own descendants',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if destination already exists
|
||||
try {
|
||||
await secureFs.stat(destinationPath);
|
||||
// Destination exists
|
||||
if (!overwrite) {
|
||||
res.status(409).json({
|
||||
success: false,
|
||||
error: 'Destination already exists',
|
||||
exists: true,
|
||||
});
|
||||
return;
|
||||
}
|
||||
// If overwrite is true, remove the existing destination first to avoid merging
|
||||
await secureFs.rm(destinationPath, { recursive: true });
|
||||
} catch {
|
||||
// Destination doesn't exist - good to proceed
|
||||
}
|
||||
|
||||
// Ensure parent directory exists
|
||||
await mkdirSafe(path.dirname(path.resolve(destinationPath)));
|
||||
|
||||
// Check if source is a directory
|
||||
const stats = await secureFs.stat(sourcePath);
|
||||
|
||||
if (stats.isDirectory()) {
|
||||
await copyDirectoryRecursive(sourcePath, destinationPath);
|
||||
} else {
|
||||
await secureFs.copyFile(sourcePath, destinationPath);
|
||||
}
|
||||
|
||||
res.json({ success: true });
|
||||
} catch (error) {
|
||||
if (error instanceof PathNotAllowedError) {
|
||||
res.status(403).json({ success: false, error: getErrorMessage(error) });
|
||||
return;
|
||||
}
|
||||
|
||||
logError(error, 'Copy file failed');
|
||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||
}
|
||||
};
|
||||
}
|
||||
142
apps/server/src/routes/fs/routes/download.ts
Normal file
142
apps/server/src/routes/fs/routes/download.ts
Normal file
@@ -0,0 +1,142 @@
|
||||
/**
|
||||
* POST /download endpoint - Download a file, or GET /download for streaming
|
||||
* For folders, creates a zip archive on the fly
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import * as secureFs from '../../../lib/secure-fs.js';
|
||||
import path from 'path';
|
||||
import { PathNotAllowedError } from '@automaker/platform';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
import { createReadStream } from 'fs';
|
||||
import { execFile } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import { tmpdir } from 'os';
|
||||
|
||||
const execFileAsync = promisify(execFile);
|
||||
|
||||
/**
|
||||
* Get total size of a directory recursively
|
||||
*/
|
||||
async function getDirectorySize(dirPath: string): Promise<number> {
|
||||
let totalSize = 0;
|
||||
const entries = await secureFs.readdir(dirPath, { withFileTypes: true });
|
||||
|
||||
for (const entry of entries) {
|
||||
const entryPath = path.join(dirPath, entry.name);
|
||||
if (entry.isDirectory()) {
|
||||
totalSize += await getDirectorySize(entryPath);
|
||||
} else {
|
||||
const stats = await secureFs.stat(entryPath);
|
||||
totalSize += Number(stats.size);
|
||||
}
|
||||
}
|
||||
|
||||
return totalSize;
|
||||
}
|
||||
|
||||
export function createDownloadHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { filePath } = req.body as { filePath: string };
|
||||
|
||||
if (!filePath) {
|
||||
res.status(400).json({ success: false, error: 'filePath is required' });
|
||||
return;
|
||||
}
|
||||
|
||||
const stats = await secureFs.stat(filePath);
|
||||
const fileName = path.basename(filePath);
|
||||
|
||||
if (stats.isDirectory()) {
|
||||
// For directories, create a zip archive
|
||||
const dirSize = await getDirectorySize(filePath);
|
||||
const MAX_DIR_SIZE = 100 * 1024 * 1024; // 100MB limit
|
||||
|
||||
if (dirSize > MAX_DIR_SIZE) {
|
||||
res.status(413).json({
|
||||
success: false,
|
||||
error: `Directory is too large to download (${(dirSize / (1024 * 1024)).toFixed(1)}MB). Maximum size is ${MAX_DIR_SIZE / (1024 * 1024)}MB.`,
|
||||
size: dirSize,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Create a temporary zip file
|
||||
const zipFileName = `${fileName}.zip`;
|
||||
const tmpZipPath = path.join(tmpdir(), `automaker-download-${Date.now()}-${zipFileName}`);
|
||||
|
||||
try {
|
||||
// Use system zip command (available on macOS and Linux)
|
||||
// Use execFile to avoid shell injection via user-provided paths
|
||||
await execFileAsync('zip', ['-r', tmpZipPath, fileName], {
|
||||
cwd: path.dirname(filePath),
|
||||
maxBuffer: 50 * 1024 * 1024,
|
||||
});
|
||||
|
||||
const zipStats = await secureFs.stat(tmpZipPath);
|
||||
|
||||
res.setHeader('Content-Type', 'application/zip');
|
||||
res.setHeader('Content-Disposition', `attachment; filename="${zipFileName}"`);
|
||||
res.setHeader('Content-Length', zipStats.size.toString());
|
||||
res.setHeader('X-Directory-Size', dirSize.toString());
|
||||
|
||||
const stream = createReadStream(tmpZipPath);
|
||||
stream.pipe(res);
|
||||
|
||||
stream.on('end', async () => {
|
||||
// Cleanup temp file
|
||||
try {
|
||||
await secureFs.rm(tmpZipPath);
|
||||
} catch {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
});
|
||||
|
||||
stream.on('error', async (err) => {
|
||||
logError(err, 'Download stream error');
|
||||
try {
|
||||
await secureFs.rm(tmpZipPath);
|
||||
} catch {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
if (!res.headersSent) {
|
||||
res.status(500).json({ success: false, error: 'Stream error during download' });
|
||||
}
|
||||
});
|
||||
} catch (zipError) {
|
||||
// Cleanup on zip failure
|
||||
try {
|
||||
await secureFs.rm(tmpZipPath);
|
||||
} catch {
|
||||
// Ignore
|
||||
}
|
||||
throw zipError;
|
||||
}
|
||||
} else {
|
||||
// For individual files, stream directly
|
||||
res.setHeader('Content-Type', 'application/octet-stream');
|
||||
res.setHeader('Content-Disposition', `attachment; filename="${fileName}"`);
|
||||
res.setHeader('Content-Length', stats.size.toString());
|
||||
|
||||
const stream = createReadStream(filePath);
|
||||
stream.pipe(res);
|
||||
|
||||
stream.on('error', (err) => {
|
||||
logError(err, 'Download stream error');
|
||||
if (!res.headersSent) {
|
||||
res.status(500).json({ success: false, error: 'Stream error during download' });
|
||||
}
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
if (error instanceof PathNotAllowedError) {
|
||||
res.status(403).json({ success: false, error: getErrorMessage(error) });
|
||||
return;
|
||||
}
|
||||
|
||||
logError(error, 'Download failed');
|
||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||
}
|
||||
};
|
||||
}
|
||||
79
apps/server/src/routes/fs/routes/move.ts
Normal file
79
apps/server/src/routes/fs/routes/move.ts
Normal file
@@ -0,0 +1,79 @@
|
||||
/**
|
||||
* POST /move endpoint - Move (rename) file or directory to a new location
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import * as secureFs from '../../../lib/secure-fs.js';
|
||||
import path from 'path';
|
||||
import { PathNotAllowedError } from '@automaker/platform';
|
||||
import { mkdirSafe } from '@automaker/utils';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
|
||||
export function createMoveHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { sourcePath, destinationPath, overwrite } = req.body as {
|
||||
sourcePath: string;
|
||||
destinationPath: string;
|
||||
overwrite?: boolean;
|
||||
};
|
||||
|
||||
if (!sourcePath || !destinationPath) {
|
||||
res
|
||||
.status(400)
|
||||
.json({ success: false, error: 'sourcePath and destinationPath are required' });
|
||||
return;
|
||||
}
|
||||
|
||||
// Prevent moving to same location or into its own descendant
|
||||
const resolvedSrc = path.resolve(sourcePath);
|
||||
const resolvedDest = path.resolve(destinationPath);
|
||||
if (resolvedDest === resolvedSrc) {
|
||||
// No-op: source and destination are the same
|
||||
res.json({ success: true });
|
||||
return;
|
||||
}
|
||||
if (resolvedDest.startsWith(resolvedSrc + path.sep)) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'Cannot move a folder into one of its own descendants',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if destination already exists
|
||||
try {
|
||||
await secureFs.stat(destinationPath);
|
||||
// Destination exists
|
||||
if (!overwrite) {
|
||||
res.status(409).json({
|
||||
success: false,
|
||||
error: 'Destination already exists',
|
||||
exists: true,
|
||||
});
|
||||
return;
|
||||
}
|
||||
// If overwrite is true, remove the existing destination first
|
||||
await secureFs.rm(destinationPath, { recursive: true });
|
||||
} catch {
|
||||
// Destination doesn't exist - good to proceed
|
||||
}
|
||||
|
||||
// Ensure parent directory exists
|
||||
await mkdirSafe(path.dirname(path.resolve(destinationPath)));
|
||||
|
||||
// Use rename for the move operation
|
||||
await secureFs.rename(sourcePath, destinationPath);
|
||||
|
||||
res.json({ success: true });
|
||||
} catch (error) {
|
||||
if (error instanceof PathNotAllowedError) {
|
||||
res.status(403).json({ success: false, error: getErrorMessage(error) });
|
||||
return;
|
||||
}
|
||||
|
||||
logError(error, 'Move file failed');
|
||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -24,7 +24,9 @@ export function createWriteHandler() {
|
||||
|
||||
// Ensure parent directory exists (symlink-safe)
|
||||
await mkdirSafe(path.dirname(path.resolve(filePath)));
|
||||
await secureFs.writeFile(filePath, content, 'utf-8');
|
||||
// Default content to empty string if undefined/null to prevent writing
|
||||
// "undefined" as literal text (e.g. when content field is missing from request)
|
||||
await secureFs.writeFile(filePath, content ?? '', 'utf-8');
|
||||
|
||||
res.json({ success: true });
|
||||
} catch (error) {
|
||||
|
||||
@@ -7,6 +7,8 @@ import { validatePathParams } from '../../middleware/validate-paths.js';
|
||||
import { createDiffsHandler } from './routes/diffs.js';
|
||||
import { createFileDiffHandler } from './routes/file-diff.js';
|
||||
import { createStageFilesHandler } from './routes/stage-files.js';
|
||||
import { createDetailsHandler } from './routes/details.js';
|
||||
import { createEnhancedStatusHandler } from './routes/enhanced-status.js';
|
||||
|
||||
export function createGitRoutes(): Router {
|
||||
const router = Router();
|
||||
@@ -18,6 +20,8 @@ export function createGitRoutes(): Router {
|
||||
validatePathParams('projectPath', 'files[]'),
|
||||
createStageFilesHandler()
|
||||
);
|
||||
router.post('/details', validatePathParams('projectPath', 'filePath?'), createDetailsHandler());
|
||||
router.post('/enhanced-status', validatePathParams('projectPath'), createEnhancedStatusHandler());
|
||||
|
||||
return router;
|
||||
}
|
||||
|
||||
248
apps/server/src/routes/git/routes/details.ts
Normal file
248
apps/server/src/routes/git/routes/details.ts
Normal file
@@ -0,0 +1,248 @@
|
||||
/**
|
||||
* POST /details endpoint - Get detailed git info for a file or project
|
||||
* Returns branch, last commit info, diff stats, and conflict status
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { exec, execFile } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import * as secureFs from '../../../lib/secure-fs.js';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
const execFileAsync = promisify(execFile);
|
||||
|
||||
interface GitFileDetails {
|
||||
branch: string;
|
||||
lastCommitHash: string;
|
||||
lastCommitMessage: string;
|
||||
lastCommitAuthor: string;
|
||||
lastCommitTimestamp: string;
|
||||
linesAdded: number;
|
||||
linesRemoved: number;
|
||||
isConflicted: boolean;
|
||||
isStaged: boolean;
|
||||
isUnstaged: boolean;
|
||||
statusLabel: string;
|
||||
}
|
||||
|
||||
export function createDetailsHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { projectPath, filePath } = req.body as {
|
||||
projectPath: string;
|
||||
filePath?: string;
|
||||
};
|
||||
|
||||
if (!projectPath) {
|
||||
res.status(400).json({ success: false, error: 'projectPath required' });
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
// Get current branch
|
||||
const { stdout: branchRaw } = await execAsync('git rev-parse --abbrev-ref HEAD', {
|
||||
cwd: projectPath,
|
||||
});
|
||||
const branch = branchRaw.trim();
|
||||
|
||||
if (!filePath) {
|
||||
// Project-level details - just return branch info
|
||||
res.json({
|
||||
success: true,
|
||||
details: { branch },
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Get last commit info for this file
|
||||
let lastCommitHash = '';
|
||||
let lastCommitMessage = '';
|
||||
let lastCommitAuthor = '';
|
||||
let lastCommitTimestamp = '';
|
||||
|
||||
try {
|
||||
const { stdout: logOutput } = await execFileAsync(
|
||||
'git',
|
||||
['log', '-1', '--format=%H|%s|%an|%aI', '--', filePath],
|
||||
{ cwd: projectPath }
|
||||
);
|
||||
|
||||
if (logOutput.trim()) {
|
||||
const parts = logOutput.trim().split('|');
|
||||
lastCommitHash = parts[0] || '';
|
||||
lastCommitMessage = parts[1] || '';
|
||||
lastCommitAuthor = parts[2] || '';
|
||||
lastCommitTimestamp = parts[3] || '';
|
||||
}
|
||||
} catch {
|
||||
// File may not have any commits yet
|
||||
}
|
||||
|
||||
// Get diff stats (lines added/removed)
|
||||
let linesAdded = 0;
|
||||
let linesRemoved = 0;
|
||||
|
||||
try {
|
||||
// Check if file is untracked first
|
||||
const { stdout: statusLine } = await execFileAsync(
|
||||
'git',
|
||||
['status', '--porcelain', '--', filePath],
|
||||
{ cwd: projectPath }
|
||||
);
|
||||
|
||||
if (statusLine.trim().startsWith('??')) {
|
||||
// Untracked file - count all lines as added using Node.js instead of shell
|
||||
try {
|
||||
const fileContent = (await secureFs.readFile(filePath, 'utf-8')).toString();
|
||||
const lines = fileContent.split('\n');
|
||||
// Don't count trailing empty line from final newline
|
||||
linesAdded =
|
||||
lines.length > 0 && lines[lines.length - 1] === ''
|
||||
? lines.length - 1
|
||||
: lines.length;
|
||||
} catch {
|
||||
// Ignore
|
||||
}
|
||||
} else {
|
||||
const { stdout: diffStatRaw } = await execFileAsync(
|
||||
'git',
|
||||
['diff', '--numstat', 'HEAD', '--', filePath],
|
||||
{ cwd: projectPath }
|
||||
);
|
||||
|
||||
if (diffStatRaw.trim()) {
|
||||
const parts = diffStatRaw.trim().split('\t');
|
||||
linesAdded = parseInt(parts[0], 10) || 0;
|
||||
linesRemoved = parseInt(parts[1], 10) || 0;
|
||||
}
|
||||
|
||||
// Also check staged diff stats
|
||||
const { stdout: stagedDiffStatRaw } = await execFileAsync(
|
||||
'git',
|
||||
['diff', '--numstat', '--cached', '--', filePath],
|
||||
{ cwd: projectPath }
|
||||
);
|
||||
|
||||
if (stagedDiffStatRaw.trim()) {
|
||||
const parts = stagedDiffStatRaw.trim().split('\t');
|
||||
linesAdded += parseInt(parts[0], 10) || 0;
|
||||
linesRemoved += parseInt(parts[1], 10) || 0;
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Diff might not be available
|
||||
}
|
||||
|
||||
// Get conflict and staging status
|
||||
let isConflicted = false;
|
||||
let isStaged = false;
|
||||
let isUnstaged = false;
|
||||
let statusLabel = '';
|
||||
|
||||
try {
|
||||
const { stdout: statusOutput } = await execFileAsync(
|
||||
'git',
|
||||
['status', '--porcelain', '--', filePath],
|
||||
{ cwd: projectPath }
|
||||
);
|
||||
|
||||
if (statusOutput.trim()) {
|
||||
const indexStatus = statusOutput[0];
|
||||
const workTreeStatus = statusOutput[1];
|
||||
|
||||
// Check for conflicts (both modified, unmerged states)
|
||||
if (
|
||||
indexStatus === 'U' ||
|
||||
workTreeStatus === 'U' ||
|
||||
(indexStatus === 'A' && workTreeStatus === 'A') ||
|
||||
(indexStatus === 'D' && workTreeStatus === 'D')
|
||||
) {
|
||||
isConflicted = true;
|
||||
statusLabel = 'Conflicted';
|
||||
} else {
|
||||
// Staged changes (index has a status)
|
||||
if (indexStatus !== ' ' && indexStatus !== '?') {
|
||||
isStaged = true;
|
||||
}
|
||||
// Unstaged changes (work tree has a status)
|
||||
if (workTreeStatus !== ' ' && workTreeStatus !== '?') {
|
||||
isUnstaged = true;
|
||||
}
|
||||
|
||||
// Build status label
|
||||
if (isStaged && isUnstaged) {
|
||||
statusLabel = 'Staged + Modified';
|
||||
} else if (isStaged) {
|
||||
statusLabel = 'Staged';
|
||||
} else {
|
||||
const statusChar = workTreeStatus !== ' ' ? workTreeStatus : indexStatus;
|
||||
switch (statusChar) {
|
||||
case 'M':
|
||||
statusLabel = 'Modified';
|
||||
break;
|
||||
case 'A':
|
||||
statusLabel = 'Added';
|
||||
break;
|
||||
case 'D':
|
||||
statusLabel = 'Deleted';
|
||||
break;
|
||||
case 'R':
|
||||
statusLabel = 'Renamed';
|
||||
break;
|
||||
case 'C':
|
||||
statusLabel = 'Copied';
|
||||
break;
|
||||
case '?':
|
||||
statusLabel = 'Untracked';
|
||||
break;
|
||||
default:
|
||||
statusLabel = statusChar || '';
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Status might not be available
|
||||
}
|
||||
|
||||
const details: GitFileDetails = {
|
||||
branch,
|
||||
lastCommitHash,
|
||||
lastCommitMessage,
|
||||
lastCommitAuthor,
|
||||
lastCommitTimestamp,
|
||||
linesAdded,
|
||||
linesRemoved,
|
||||
isConflicted,
|
||||
isStaged,
|
||||
isUnstaged,
|
||||
statusLabel,
|
||||
};
|
||||
|
||||
res.json({ success: true, details });
|
||||
} catch (innerError) {
|
||||
logError(innerError, 'Git details failed');
|
||||
res.json({
|
||||
success: true,
|
||||
details: {
|
||||
branch: '',
|
||||
lastCommitHash: '',
|
||||
lastCommitMessage: '',
|
||||
lastCommitAuthor: '',
|
||||
lastCommitTimestamp: '',
|
||||
linesAdded: 0,
|
||||
linesRemoved: 0,
|
||||
isConflicted: false,
|
||||
isStaged: false,
|
||||
isUnstaged: false,
|
||||
statusLabel: '',
|
||||
},
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
logError(error, 'Get git details failed');
|
||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -23,6 +23,7 @@ export function createDiffsHandler() {
|
||||
diff: result.diff,
|
||||
files: result.files,
|
||||
hasChanges: result.hasChanges,
|
||||
...(result.mergeState ? { mergeState: result.mergeState } : {}),
|
||||
});
|
||||
} catch (innerError) {
|
||||
logError(innerError, 'Git diff failed');
|
||||
|
||||
176
apps/server/src/routes/git/routes/enhanced-status.ts
Normal file
176
apps/server/src/routes/git/routes/enhanced-status.ts
Normal file
@@ -0,0 +1,176 @@
|
||||
/**
|
||||
* POST /enhanced-status endpoint - Get enhanced git status with diff stats per file
|
||||
* Returns per-file status with lines added/removed and staged/unstaged differentiation
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { exec } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
|
||||
interface EnhancedFileStatus {
|
||||
path: string;
|
||||
indexStatus: string;
|
||||
workTreeStatus: string;
|
||||
isConflicted: boolean;
|
||||
isStaged: boolean;
|
||||
isUnstaged: boolean;
|
||||
linesAdded: number;
|
||||
linesRemoved: number;
|
||||
statusLabel: string;
|
||||
}
|
||||
|
||||
function getStatusLabel(indexStatus: string, workTreeStatus: string): string {
|
||||
// Check for conflicts
|
||||
if (
|
||||
indexStatus === 'U' ||
|
||||
workTreeStatus === 'U' ||
|
||||
(indexStatus === 'A' && workTreeStatus === 'A') ||
|
||||
(indexStatus === 'D' && workTreeStatus === 'D')
|
||||
) {
|
||||
return 'Conflicted';
|
||||
}
|
||||
|
||||
const hasStaged = indexStatus !== ' ' && indexStatus !== '?';
|
||||
const hasUnstaged = workTreeStatus !== ' ' && workTreeStatus !== '?';
|
||||
|
||||
if (hasStaged && hasUnstaged) return 'Staged + Modified';
|
||||
if (hasStaged) return 'Staged';
|
||||
|
||||
const statusChar = workTreeStatus !== ' ' ? workTreeStatus : indexStatus;
|
||||
switch (statusChar) {
|
||||
case 'M':
|
||||
return 'Modified';
|
||||
case 'A':
|
||||
return 'Added';
|
||||
case 'D':
|
||||
return 'Deleted';
|
||||
case 'R':
|
||||
return 'Renamed';
|
||||
case 'C':
|
||||
return 'Copied';
|
||||
case '?':
|
||||
return 'Untracked';
|
||||
default:
|
||||
return statusChar || '';
|
||||
}
|
||||
}
|
||||
|
||||
export function createEnhancedStatusHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { projectPath } = req.body as { projectPath: string };
|
||||
|
||||
if (!projectPath) {
|
||||
res.status(400).json({ success: false, error: 'projectPath required' });
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
// Get current branch
|
||||
const { stdout: branchRaw } = await execAsync('git rev-parse --abbrev-ref HEAD', {
|
||||
cwd: projectPath,
|
||||
});
|
||||
const branch = branchRaw.trim();
|
||||
|
||||
// Get porcelain status for all files
|
||||
const { stdout: statusOutput } = await execAsync('git status --porcelain', {
|
||||
cwd: projectPath,
|
||||
});
|
||||
|
||||
// Get diff numstat for working tree changes
|
||||
let workTreeStats: Record<string, { added: number; removed: number }> = {};
|
||||
try {
|
||||
const { stdout: numstatRaw } = await execAsync('git diff --numstat', {
|
||||
cwd: projectPath,
|
||||
maxBuffer: 10 * 1024 * 1024,
|
||||
});
|
||||
for (const line of numstatRaw.trim().split('\n').filter(Boolean)) {
|
||||
const parts = line.split('\t');
|
||||
if (parts.length >= 3) {
|
||||
const added = parseInt(parts[0], 10) || 0;
|
||||
const removed = parseInt(parts[1], 10) || 0;
|
||||
workTreeStats[parts[2]] = { added, removed };
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Ignore
|
||||
}
|
||||
|
||||
// Get diff numstat for staged changes
|
||||
let stagedStats: Record<string, { added: number; removed: number }> = {};
|
||||
try {
|
||||
const { stdout: stagedNumstatRaw } = await execAsync('git diff --numstat --cached', {
|
||||
cwd: projectPath,
|
||||
maxBuffer: 10 * 1024 * 1024,
|
||||
});
|
||||
for (const line of stagedNumstatRaw.trim().split('\n').filter(Boolean)) {
|
||||
const parts = line.split('\t');
|
||||
if (parts.length >= 3) {
|
||||
const added = parseInt(parts[0], 10) || 0;
|
||||
const removed = parseInt(parts[1], 10) || 0;
|
||||
stagedStats[parts[2]] = { added, removed };
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Ignore
|
||||
}
|
||||
|
||||
// Parse status and build enhanced file list
|
||||
const files: EnhancedFileStatus[] = [];
|
||||
|
||||
for (const line of statusOutput.split('\n').filter(Boolean)) {
|
||||
if (line.length < 4) continue;
|
||||
|
||||
const indexStatus = line[0];
|
||||
const workTreeStatus = line[1];
|
||||
const filePath = line.substring(3).trim();
|
||||
|
||||
// Handle renamed files (format: "R old -> new")
|
||||
const actualPath = filePath.includes(' -> ')
|
||||
? filePath.split(' -> ')[1].trim()
|
||||
: filePath;
|
||||
|
||||
const isConflicted =
|
||||
indexStatus === 'U' ||
|
||||
workTreeStatus === 'U' ||
|
||||
(indexStatus === 'A' && workTreeStatus === 'A') ||
|
||||
(indexStatus === 'D' && workTreeStatus === 'D');
|
||||
|
||||
const isStaged = indexStatus !== ' ' && indexStatus !== '?';
|
||||
const isUnstaged = workTreeStatus !== ' ' && workTreeStatus !== '?';
|
||||
|
||||
// Combine diff stats from both working tree and staged
|
||||
const wtStats = workTreeStats[actualPath] || { added: 0, removed: 0 };
|
||||
const stStats = stagedStats[actualPath] || { added: 0, removed: 0 };
|
||||
|
||||
files.push({
|
||||
path: actualPath,
|
||||
indexStatus,
|
||||
workTreeStatus,
|
||||
isConflicted,
|
||||
isStaged,
|
||||
isUnstaged,
|
||||
linesAdded: wtStats.added + stStats.added,
|
||||
linesRemoved: wtStats.removed + stStats.removed,
|
||||
statusLabel: getStatusLabel(indexStatus, workTreeStatus),
|
||||
});
|
||||
}
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
branch,
|
||||
files,
|
||||
});
|
||||
} catch (innerError) {
|
||||
logError(innerError, 'Git enhanced status failed');
|
||||
res.json({ success: true, branch: '', files: [] });
|
||||
}
|
||||
} catch (error) {
|
||||
logError(error, 'Get enhanced status failed');
|
||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -9,6 +9,8 @@ import { createCheckGitHubRemoteHandler } from './routes/check-github-remote.js'
|
||||
import { createListIssuesHandler } from './routes/list-issues.js';
|
||||
import { createListPRsHandler } from './routes/list-prs.js';
|
||||
import { createListCommentsHandler } from './routes/list-comments.js';
|
||||
import { createListPRReviewCommentsHandler } from './routes/list-pr-review-comments.js';
|
||||
import { createResolvePRCommentHandler } from './routes/resolve-pr-comment.js';
|
||||
import { createValidateIssueHandler } from './routes/validate-issue.js';
|
||||
import {
|
||||
createValidationStatusHandler,
|
||||
@@ -29,6 +31,16 @@ export function createGitHubRoutes(
|
||||
router.post('/issues', validatePathParams('projectPath'), createListIssuesHandler());
|
||||
router.post('/prs', validatePathParams('projectPath'), createListPRsHandler());
|
||||
router.post('/issue-comments', validatePathParams('projectPath'), createListCommentsHandler());
|
||||
router.post(
|
||||
'/pr-review-comments',
|
||||
validatePathParams('projectPath'),
|
||||
createListPRReviewCommentsHandler()
|
||||
);
|
||||
router.post(
|
||||
'/resolve-pr-comment',
|
||||
validatePathParams('projectPath'),
|
||||
createResolvePRCommentHandler()
|
||||
);
|
||||
router.post(
|
||||
'/validate-issue',
|
||||
validatePathParams('projectPath'),
|
||||
|
||||
@@ -1,38 +1,14 @@
|
||||
/**
|
||||
* Common utilities for GitHub routes
|
||||
*
|
||||
* Re-exports shared utilities from lib/exec-utils so route consumers
|
||||
* can continue importing from this module unchanged.
|
||||
*/
|
||||
|
||||
import { exec } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
|
||||
const logger = createLogger('GitHub');
|
||||
|
||||
export const execAsync = promisify(exec);
|
||||
|
||||
// Extended PATH to include common tool installation locations
|
||||
export const extendedPath = [
|
||||
process.env.PATH,
|
||||
'/opt/homebrew/bin',
|
||||
'/usr/local/bin',
|
||||
'/home/linuxbrew/.linuxbrew/bin',
|
||||
`${process.env.HOME}/.local/bin`,
|
||||
]
|
||||
.filter(Boolean)
|
||||
.join(':');
|
||||
|
||||
export const execEnv = {
|
||||
...process.env,
|
||||
PATH: extendedPath,
|
||||
};
|
||||
|
||||
export function getErrorMessage(error: unknown): string {
|
||||
if (error instanceof Error) {
|
||||
return error.message;
|
||||
}
|
||||
return String(error);
|
||||
}
|
||||
|
||||
export function logError(error: unknown, context: string): void {
|
||||
logger.error(`${context}:`, error);
|
||||
}
|
||||
// Re-export shared utilities from the canonical location
|
||||
export { extendedPath, execEnv, getErrorMessage, logError } from '../../../lib/exec-utils.js';
|
||||
|
||||
@@ -0,0 +1,72 @@
|
||||
/**
|
||||
* POST /pr-review-comments endpoint - Fetch review comments for a GitHub PR
|
||||
*
|
||||
* Fetches both regular PR comments and inline code review comments
|
||||
* for a specific pull request, providing file path and line context.
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { getErrorMessage, logError } from './common.js';
|
||||
import { checkGitHubRemote } from './check-github-remote.js';
|
||||
import {
|
||||
fetchPRReviewComments,
|
||||
fetchReviewThreadResolvedStatus,
|
||||
type PRReviewComment,
|
||||
type ListPRReviewCommentsResult,
|
||||
} from '../../../services/pr-review-comments.service.js';
|
||||
|
||||
// Re-export types so existing callers continue to work
|
||||
export type { PRReviewComment, ListPRReviewCommentsResult };
|
||||
// Re-export service functions so existing callers continue to work
|
||||
export { fetchPRReviewComments, fetchReviewThreadResolvedStatus };
|
||||
|
||||
interface ListPRReviewCommentsRequest {
|
||||
projectPath: string;
|
||||
prNumber: number;
|
||||
}
|
||||
|
||||
export function createListPRReviewCommentsHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { projectPath, prNumber } = req.body as ListPRReviewCommentsRequest;
|
||||
|
||||
if (!projectPath) {
|
||||
res.status(400).json({ success: false, error: 'projectPath is required' });
|
||||
return;
|
||||
}
|
||||
|
||||
if (!prNumber || typeof prNumber !== 'number') {
|
||||
res
|
||||
.status(400)
|
||||
.json({ success: false, error: 'prNumber is required and must be a number' });
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if this is a GitHub repo and get owner/repo
|
||||
const remoteStatus = await checkGitHubRemote(projectPath);
|
||||
if (!remoteStatus.hasGitHubRemote || !remoteStatus.owner || !remoteStatus.repo) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'Project does not have a GitHub remote',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const comments = await fetchPRReviewComments(
|
||||
projectPath,
|
||||
remoteStatus.owner,
|
||||
remoteStatus.repo,
|
||||
prNumber
|
||||
);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
comments,
|
||||
totalCount: comments.length,
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Fetch PR review comments failed');
|
||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||
}
|
||||
};
|
||||
}
|
||||
66
apps/server/src/routes/github/routes/resolve-pr-comment.ts
Normal file
66
apps/server/src/routes/github/routes/resolve-pr-comment.ts
Normal file
@@ -0,0 +1,66 @@
|
||||
/**
|
||||
* POST /resolve-pr-comment endpoint - Resolve or unresolve a GitHub PR review thread
|
||||
*
|
||||
* Uses the GitHub GraphQL API to resolve or unresolve a review thread
|
||||
* identified by its GraphQL node ID (threadId).
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { getErrorMessage, logError } from './common.js';
|
||||
import { checkGitHubRemote } from './check-github-remote.js';
|
||||
import { executeReviewThreadMutation } from '../../../services/github-pr-comment.service.js';
|
||||
|
||||
export interface ResolvePRCommentResult {
|
||||
success: boolean;
|
||||
isResolved?: boolean;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
interface ResolvePRCommentRequest {
|
||||
projectPath: string;
|
||||
threadId: string;
|
||||
resolve: boolean;
|
||||
}
|
||||
|
||||
export function createResolvePRCommentHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { projectPath, threadId, resolve } = req.body as ResolvePRCommentRequest;
|
||||
|
||||
if (!projectPath) {
|
||||
res.status(400).json({ success: false, error: 'projectPath is required' });
|
||||
return;
|
||||
}
|
||||
|
||||
if (!threadId) {
|
||||
res.status(400).json({ success: false, error: 'threadId is required' });
|
||||
return;
|
||||
}
|
||||
|
||||
if (typeof resolve !== 'boolean') {
|
||||
res.status(400).json({ success: false, error: 'resolve must be a boolean' });
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if this is a GitHub repo
|
||||
const remoteStatus = await checkGitHubRemote(projectPath);
|
||||
if (!remoteStatus.hasGitHubRemote) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'Project does not have a GitHub remote',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const result = await executeReviewThreadMutation(projectPath, threadId, resolve);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
isResolved: result.isResolved,
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Resolve PR comment failed');
|
||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -2,7 +2,12 @@
|
||||
* Common utilities for worktree routes
|
||||
*/
|
||||
|
||||
import { createLogger, isValidBranchName, MAX_BRANCH_NAME_LENGTH } from '@automaker/utils';
|
||||
import {
|
||||
createLogger,
|
||||
isValidBranchName,
|
||||
isValidRemoteName,
|
||||
MAX_BRANCH_NAME_LENGTH,
|
||||
} from '@automaker/utils';
|
||||
import { exec } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import { getErrorMessage as getErrorMessageShared, createLogError } from '../common.js';
|
||||
@@ -16,7 +21,7 @@ export const execAsync = promisify(exec);
|
||||
|
||||
// Re-export git validation utilities from the canonical shared module so
|
||||
// existing consumers that import from this file continue to work.
|
||||
export { isValidBranchName, MAX_BRANCH_NAME_LENGTH };
|
||||
export { isValidBranchName, isValidRemoteName, MAX_BRANCH_NAME_LENGTH };
|
||||
|
||||
// ============================================================================
|
||||
// Extended PATH configuration for Electron apps
|
||||
@@ -60,25 +65,6 @@ export const execEnv = {
|
||||
PATH: extendedPath,
|
||||
};
|
||||
|
||||
/**
|
||||
* Validate git remote name to prevent command injection.
|
||||
* Matches the strict validation used in add-remote.ts:
|
||||
* - Rejects empty strings and names that are too long
|
||||
* - Disallows names that start with '-' or '.'
|
||||
* - Forbids the substring '..'
|
||||
* - Rejects '/' characters
|
||||
* - Rejects NUL bytes
|
||||
* - Must consist only of alphanumerics, hyphens, underscores, and dots
|
||||
*/
|
||||
export function isValidRemoteName(name: string): boolean {
|
||||
if (!name || name.length === 0 || name.length >= MAX_BRANCH_NAME_LENGTH) return false;
|
||||
if (name.startsWith('-') || name.startsWith('.')) return false;
|
||||
if (name.includes('..')) return false;
|
||||
if (name.includes('/')) return false;
|
||||
if (name.includes('\0')) return false;
|
||||
return /^[a-zA-Z0-9._-]+$/.test(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if gh CLI is available on the system
|
||||
*/
|
||||
|
||||
@@ -67,6 +67,9 @@ import { createAbortOperationHandler } from './routes/abort-operation.js';
|
||||
import { createContinueOperationHandler } from './routes/continue-operation.js';
|
||||
import { createStageFilesHandler } from './routes/stage-files.js';
|
||||
import { createCheckChangesHandler } from './routes/check-changes.js';
|
||||
import { createSetTrackingHandler } from './routes/set-tracking.js';
|
||||
import { createSyncHandler } from './routes/sync.js';
|
||||
import { createUpdatePRNumberHandler } from './routes/update-pr-number.js';
|
||||
import type { SettingsService } from '../../services/settings-service.js';
|
||||
|
||||
export function createWorktreeRoutes(
|
||||
@@ -94,6 +97,12 @@ export function createWorktreeRoutes(
|
||||
router.post('/delete', validatePathParams('projectPath', 'worktreePath'), createDeleteHandler());
|
||||
router.post('/create-pr', createCreatePRHandler());
|
||||
router.post('/pr-info', createPRInfoHandler());
|
||||
router.post(
|
||||
'/update-pr-number',
|
||||
validatePathParams('worktreePath', 'projectPath?'),
|
||||
requireValidWorktree,
|
||||
createUpdatePRNumberHandler()
|
||||
);
|
||||
router.post(
|
||||
'/commit',
|
||||
validatePathParams('worktreePath'),
|
||||
@@ -118,6 +127,18 @@ export function createWorktreeRoutes(
|
||||
requireValidWorktree,
|
||||
createPullHandler()
|
||||
);
|
||||
router.post(
|
||||
'/sync',
|
||||
validatePathParams('worktreePath'),
|
||||
requireValidWorktree,
|
||||
createSyncHandler()
|
||||
);
|
||||
router.post(
|
||||
'/set-tracking',
|
||||
validatePathParams('worktreePath'),
|
||||
requireValidWorktree,
|
||||
createSetTrackingHandler()
|
||||
);
|
||||
router.post(
|
||||
'/checkout-branch',
|
||||
validatePathParams('worktreePath'),
|
||||
|
||||
@@ -22,6 +22,36 @@ import { getErrorMessage, logError, isValidBranchName } from '../common.js';
|
||||
import { execGitCommand } from '../../../lib/git.js';
|
||||
import type { EventEmitter } from '../../../lib/events.js';
|
||||
import { performCheckoutBranch } from '../../../services/checkout-branch-service.js';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
|
||||
const logger = createLogger('CheckoutBranchRoute');
|
||||
|
||||
/** Timeout for git fetch operations (30 seconds) */
|
||||
const FETCH_TIMEOUT_MS = 30_000;
|
||||
|
||||
/**
|
||||
* Fetch latest from all remotes (silently, with timeout).
|
||||
* Non-fatal: fetch errors are logged and swallowed so the workflow continues.
|
||||
*/
|
||||
async function fetchRemotes(cwd: string): Promise<void> {
|
||||
const controller = new AbortController();
|
||||
const timerId = setTimeout(() => controller.abort(), FETCH_TIMEOUT_MS);
|
||||
|
||||
try {
|
||||
await execGitCommand(['fetch', '--all', '--quiet'], cwd, undefined, controller);
|
||||
} catch (error) {
|
||||
if (error instanceof Error && error.message === 'Process aborted') {
|
||||
logger.warn(
|
||||
`fetchRemotes timed out after ${FETCH_TIMEOUT_MS}ms - continuing without latest remote refs`
|
||||
);
|
||||
} else {
|
||||
logger.warn(`fetchRemotes failed: ${getErrorMessage(error)} - continuing with local refs`);
|
||||
}
|
||||
// Non-fatal: continue with locally available refs
|
||||
} finally {
|
||||
clearTimeout(timerId);
|
||||
}
|
||||
}
|
||||
|
||||
export function createCheckoutBranchHandler(events?: EventEmitter) {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
@@ -127,6 +157,10 @@ export function createCheckoutBranchHandler(events?: EventEmitter) {
|
||||
}
|
||||
|
||||
// Original simple flow (no stash handling)
|
||||
// Fetch latest remote refs before creating the branch so that
|
||||
// base branch validation works for remote references like "origin/main"
|
||||
await fetchRemotes(resolvedPath);
|
||||
|
||||
const currentBranchOutput = await execGitCommand(
|
||||
['rev-parse', '--abbrev-ref', 'HEAD'],
|
||||
resolvedPath
|
||||
|
||||
@@ -17,6 +17,7 @@ import { spawnProcess } from '@automaker/platform';
|
||||
import { updateWorktreePRInfo } from '../../../lib/worktree-metadata.js';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import { validatePRState } from '@automaker/types';
|
||||
import { resolvePrTarget } from '../../../services/pr-service.js';
|
||||
|
||||
const logger = createLogger('CreatePR');
|
||||
|
||||
@@ -32,6 +33,7 @@ export function createCreatePRHandler() {
|
||||
baseBranch,
|
||||
draft,
|
||||
remote,
|
||||
targetRemote,
|
||||
} = req.body as {
|
||||
worktreePath: string;
|
||||
projectPath?: string;
|
||||
@@ -41,6 +43,8 @@ export function createCreatePRHandler() {
|
||||
baseBranch?: string;
|
||||
draft?: boolean;
|
||||
remote?: string;
|
||||
/** Remote to create the PR against (e.g. upstream). If not specified, inferred from repo setup. */
|
||||
targetRemote?: string;
|
||||
};
|
||||
|
||||
if (!worktreePath) {
|
||||
@@ -71,6 +75,52 @@ export function createCreatePRHandler() {
|
||||
return;
|
||||
}
|
||||
|
||||
// --- Input validation: run all validation before any git write operations ---
|
||||
|
||||
// Validate remote names before use to prevent command injection
|
||||
if (remote !== undefined && !isValidRemoteName(remote)) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'Invalid remote name contains unsafe characters',
|
||||
});
|
||||
return;
|
||||
}
|
||||
if (targetRemote !== undefined && !isValidRemoteName(targetRemote)) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'Invalid target remote name contains unsafe characters',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const pushRemote = remote || 'origin';
|
||||
|
||||
// Resolve repository URL, fork workflow, and target remote information.
|
||||
// This is needed for both the existing PR check and PR creation.
|
||||
// Resolve early so validation errors are caught before any writes.
|
||||
let repoUrl: string | null = null;
|
||||
let upstreamRepo: string | null = null;
|
||||
let originOwner: string | null = null;
|
||||
try {
|
||||
const prTarget = await resolvePrTarget({
|
||||
worktreePath,
|
||||
pushRemote,
|
||||
targetRemote,
|
||||
});
|
||||
repoUrl = prTarget.repoUrl;
|
||||
upstreamRepo = prTarget.upstreamRepo;
|
||||
originOwner = prTarget.originOwner;
|
||||
} catch (resolveErr) {
|
||||
// resolvePrTarget throws for validation errors (unknown targetRemote, missing pushRemote)
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: getErrorMessage(resolveErr),
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// --- Validation complete — proceed with git operations ---
|
||||
|
||||
// Check for uncommitted changes
|
||||
logger.debug(`Checking for uncommitted changes in: ${worktreePath}`);
|
||||
const { stdout: status } = await execAsync('git status --porcelain', {
|
||||
@@ -119,30 +169,19 @@ export function createCreatePRHandler() {
|
||||
}
|
||||
}
|
||||
|
||||
// Validate remote name before use to prevent command injection
|
||||
if (remote !== undefined && !isValidRemoteName(remote)) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'Invalid remote name contains unsafe characters',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Push the branch to remote (use selected remote or default to 'origin')
|
||||
const pushRemote = remote || 'origin';
|
||||
// Uses array-based execGitCommand to avoid shell injection from pushRemote/branchName.
|
||||
let pushError: string | null = null;
|
||||
try {
|
||||
await execAsync(`git push ${pushRemote} ${branchName}`, {
|
||||
cwd: worktreePath,
|
||||
env: execEnv,
|
||||
});
|
||||
await execGitCommand(['push', pushRemote, branchName], worktreePath, execEnv);
|
||||
} catch {
|
||||
// If push fails, try with --set-upstream
|
||||
try {
|
||||
await execAsync(`git push --set-upstream ${pushRemote} ${branchName}`, {
|
||||
cwd: worktreePath,
|
||||
env: execEnv,
|
||||
});
|
||||
await execGitCommand(
|
||||
['push', '--set-upstream', pushRemote, branchName],
|
||||
worktreePath,
|
||||
execEnv
|
||||
);
|
||||
} catch (error2: unknown) {
|
||||
// Capture push error for reporting
|
||||
const err = error2 as { stderr?: string; message?: string };
|
||||
@@ -164,82 +203,11 @@ export function createCreatePRHandler() {
|
||||
const base = baseBranch || 'main';
|
||||
const title = prTitle || branchName;
|
||||
const body = prBody || `Changes from branch ${branchName}`;
|
||||
const draftFlag = draft ? '--draft' : '';
|
||||
|
||||
let prUrl: string | null = null;
|
||||
let prError: string | null = null;
|
||||
let browserUrl: string | null = null;
|
||||
let ghCliAvailable = false;
|
||||
|
||||
// Get repository URL and detect fork workflow FIRST
|
||||
// This is needed for both the existing PR check and PR creation
|
||||
let repoUrl: string | null = null;
|
||||
let upstreamRepo: string | null = null;
|
||||
let originOwner: string | null = null;
|
||||
try {
|
||||
const { stdout: remotes } = await execAsync('git remote -v', {
|
||||
cwd: worktreePath,
|
||||
env: execEnv,
|
||||
});
|
||||
|
||||
// Parse remotes to detect fork workflow and get repo URL
|
||||
const lines = remotes.split(/\r?\n/); // Handle both Unix and Windows line endings
|
||||
for (const line of lines) {
|
||||
// Try multiple patterns to match different remote URL formats
|
||||
// Pattern 1: git@github.com:owner/repo.git (fetch)
|
||||
// Pattern 2: https://github.com/owner/repo.git (fetch)
|
||||
// Pattern 3: https://github.com/owner/repo (fetch)
|
||||
let match = line.match(/^(\w+)\s+.*[:/]([^/]+)\/([^/\s]+?)(?:\.git)?\s+\(fetch\)/);
|
||||
if (!match) {
|
||||
// Try SSH format: git@github.com:owner/repo.git
|
||||
match = line.match(/^(\w+)\s+git@[^:]+:([^/]+)\/([^\s]+?)(?:\.git)?\s+\(fetch\)/);
|
||||
}
|
||||
if (!match) {
|
||||
// Try HTTPS format: https://github.com/owner/repo.git
|
||||
match = line.match(
|
||||
/^(\w+)\s+https?:\/\/[^/]+\/([^/]+)\/([^\s]+?)(?:\.git)?\s+\(fetch\)/
|
||||
);
|
||||
}
|
||||
|
||||
if (match) {
|
||||
const [, remoteName, owner, repo] = match;
|
||||
if (remoteName === 'upstream') {
|
||||
upstreamRepo = `${owner}/${repo}`;
|
||||
repoUrl = `https://github.com/${owner}/${repo}`;
|
||||
} else if (remoteName === 'origin') {
|
||||
originOwner = owner;
|
||||
if (!repoUrl) {
|
||||
repoUrl = `https://github.com/${owner}/${repo}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Couldn't parse remotes - will try fallback
|
||||
}
|
||||
|
||||
// Fallback: Try to get repo URL from git config if remote parsing failed
|
||||
if (!repoUrl) {
|
||||
try {
|
||||
const { stdout: originUrl } = await execAsync('git config --get remote.origin.url', {
|
||||
cwd: worktreePath,
|
||||
env: execEnv,
|
||||
});
|
||||
const url = originUrl.trim();
|
||||
|
||||
// Parse URL to extract owner/repo
|
||||
// Handle both SSH (git@github.com:owner/repo.git) and HTTPS (https://github.com/owner/repo.git)
|
||||
let match = url.match(/[:/]([^/]+)\/([^/\s]+?)(?:\.git)?$/);
|
||||
if (match) {
|
||||
const [, owner, repo] = match;
|
||||
originOwner = owner;
|
||||
repoUrl = `https://github.com/${owner}/${repo}`;
|
||||
}
|
||||
} catch {
|
||||
// Failed to get repo URL from config
|
||||
}
|
||||
}
|
||||
|
||||
// Check if gh CLI is available (cross-platform)
|
||||
ghCliAvailable = await isGhCliAvailable();
|
||||
|
||||
@@ -247,13 +215,16 @@ export function createCreatePRHandler() {
|
||||
if (repoUrl) {
|
||||
const encodedTitle = encodeURIComponent(title);
|
||||
const encodedBody = encodeURIComponent(body);
|
||||
// Encode base branch and head branch to handle special chars like # or %
|
||||
const encodedBase = encodeURIComponent(base);
|
||||
const encodedBranch = encodeURIComponent(branchName);
|
||||
|
||||
if (upstreamRepo && originOwner) {
|
||||
// Fork workflow: PR to upstream from origin
|
||||
browserUrl = `https://github.com/${upstreamRepo}/compare/${base}...${originOwner}:${branchName}?expand=1&title=${encodedTitle}&body=${encodedBody}`;
|
||||
// Fork workflow (or cross-remote PR): PR to target from push remote
|
||||
browserUrl = `https://github.com/${upstreamRepo}/compare/${encodedBase}...${originOwner}:${encodedBranch}?expand=1&title=${encodedTitle}&body=${encodedBody}`;
|
||||
} else {
|
||||
// Regular repo
|
||||
browserUrl = `${repoUrl}/compare/${base}...${branchName}?expand=1&title=${encodedTitle}&body=${encodedBody}`;
|
||||
browserUrl = `${repoUrl}/compare/${encodedBase}...${encodedBranch}?expand=1&title=${encodedTitle}&body=${encodedBody}`;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -263,18 +234,40 @@ export function createCreatePRHandler() {
|
||||
if (ghCliAvailable) {
|
||||
// First, check if a PR already exists for this branch using gh pr list
|
||||
// This is more reliable than gh pr view as it explicitly searches by branch name
|
||||
// For forks, we need to use owner:branch format for the head parameter
|
||||
// For forks/cross-remote, we need to use owner:branch format for the head parameter
|
||||
const headRef = upstreamRepo && originOwner ? `${originOwner}:${branchName}` : branchName;
|
||||
const repoArg = upstreamRepo ? ` --repo "${upstreamRepo}"` : '';
|
||||
|
||||
logger.debug(`Checking for existing PR for branch: ${branchName} (headRef: ${headRef})`);
|
||||
try {
|
||||
const listCmd = `gh pr list${repoArg} --head "${headRef}" --json number,title,url,state --limit 1`;
|
||||
logger.debug(`Running: ${listCmd}`);
|
||||
const { stdout: existingPrOutput } = await execAsync(listCmd, {
|
||||
const listArgs = ['pr', 'list'];
|
||||
if (upstreamRepo) {
|
||||
listArgs.push('--repo', upstreamRepo);
|
||||
}
|
||||
listArgs.push(
|
||||
'--head',
|
||||
headRef,
|
||||
'--json',
|
||||
'number,title,url,state,createdAt',
|
||||
'--limit',
|
||||
'1'
|
||||
);
|
||||
logger.debug(`Running: gh ${listArgs.join(' ')}`);
|
||||
const listResult = await spawnProcess({
|
||||
command: 'gh',
|
||||
args: listArgs,
|
||||
cwd: worktreePath,
|
||||
env: execEnv,
|
||||
});
|
||||
if (listResult.exitCode !== 0) {
|
||||
logger.error(
|
||||
`gh pr list failed with exit code ${listResult.exitCode}: ` +
|
||||
`stderr=${listResult.stderr}, stdout=${listResult.stdout}`
|
||||
);
|
||||
throw new Error(
|
||||
`gh pr list failed (exit code ${listResult.exitCode}): ${listResult.stderr || listResult.stdout}`
|
||||
);
|
||||
}
|
||||
const existingPrOutput = listResult.stdout;
|
||||
logger.debug(`gh pr list output: ${existingPrOutput}`);
|
||||
|
||||
const existingPrs = JSON.parse(existingPrOutput);
|
||||
@@ -294,7 +287,7 @@ export function createCreatePRHandler() {
|
||||
url: existingPr.url,
|
||||
title: existingPr.title || title,
|
||||
state: validatePRState(existingPr.state),
|
||||
createdAt: new Date().toISOString(),
|
||||
createdAt: existingPr.createdAt || new Date().toISOString(),
|
||||
});
|
||||
logger.debug(
|
||||
`Stored existing PR info for branch ${branchName}: PR #${existingPr.number}`
|
||||
@@ -372,11 +365,26 @@ export function createCreatePRHandler() {
|
||||
if (errorMessage.toLowerCase().includes('already exists')) {
|
||||
logger.debug(`PR already exists error - trying to fetch existing PR`);
|
||||
try {
|
||||
const { stdout: viewOutput } = await execAsync(
|
||||
`gh pr view --json number,title,url,state`,
|
||||
{ cwd: worktreePath, env: execEnv }
|
||||
);
|
||||
const existingPr = JSON.parse(viewOutput);
|
||||
// Build args as an array to avoid shell injection.
|
||||
// When upstreamRepo is set (fork/cross-remote workflow) we must
|
||||
// query the upstream repository so we find the correct PR.
|
||||
const viewArgs = ['pr', 'view', '--json', 'number,title,url,state,createdAt'];
|
||||
if (upstreamRepo) {
|
||||
viewArgs.push('--repo', upstreamRepo);
|
||||
}
|
||||
logger.debug(`Running: gh ${viewArgs.join(' ')}`);
|
||||
const viewResult = await spawnProcess({
|
||||
command: 'gh',
|
||||
args: viewArgs,
|
||||
cwd: worktreePath,
|
||||
env: execEnv,
|
||||
});
|
||||
if (viewResult.exitCode !== 0) {
|
||||
throw new Error(
|
||||
`gh pr view failed (exit code ${viewResult.exitCode}): ${viewResult.stderr || viewResult.stdout}`
|
||||
);
|
||||
}
|
||||
const existingPr = JSON.parse(viewResult.stdout);
|
||||
if (existingPr.url) {
|
||||
prUrl = existingPr.url;
|
||||
prNumber = existingPr.number;
|
||||
@@ -388,7 +396,7 @@ export function createCreatePRHandler() {
|
||||
url: existingPr.url,
|
||||
title: existingPr.title || title,
|
||||
state: validatePRState(existingPr.state),
|
||||
createdAt: new Date().toISOString(),
|
||||
createdAt: existingPr.createdAt || new Date().toISOString(),
|
||||
});
|
||||
logger.debug(`Fetched and stored existing PR: #${existingPr.number}`);
|
||||
}
|
||||
|
||||
@@ -4,7 +4,8 @@
|
||||
* This endpoint handles worktree creation with proper checks:
|
||||
* 1. First checks if git already has a worktree for the branch (anywhere)
|
||||
* 2. If found, returns the existing worktree (no error)
|
||||
* 3. Only creates a new worktree if none exists for the branch
|
||||
* 3. Syncs the base branch from its remote tracking branch (fast-forward only)
|
||||
* 4. Only creates a new worktree if none exists for the branch
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
@@ -27,9 +28,16 @@ import { execGitCommand } from '../../../lib/git.js';
|
||||
import { trackBranch } from './branch-tracking.js';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import { runInitScript } from '../../../services/init-script-service.js';
|
||||
import {
|
||||
syncBaseBranch,
|
||||
type BaseBranchSyncResult,
|
||||
} from '../../../services/branch-sync-service.js';
|
||||
|
||||
const logger = createLogger('Worktree');
|
||||
|
||||
/** Timeout for git fetch operations (30 seconds) */
|
||||
const FETCH_TIMEOUT_MS = 30_000;
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
|
||||
/**
|
||||
@@ -91,7 +99,7 @@ export function createCreateHandler(events: EventEmitter, settingsService?: Sett
|
||||
const { projectPath, branchName, baseBranch } = req.body as {
|
||||
projectPath: string;
|
||||
branchName: string;
|
||||
baseBranch?: string; // Optional base branch to create from (defaults to current HEAD)
|
||||
baseBranch?: string; // Optional base branch to create from (defaults to current HEAD). Can be a remote branch like "origin/main".
|
||||
};
|
||||
|
||||
if (!projectPath || !branchName) {
|
||||
@@ -171,6 +179,71 @@ export function createCreateHandler(events: EventEmitter, settingsService?: Sett
|
||||
// Create worktrees directory if it doesn't exist
|
||||
await secureFs.mkdir(worktreesDir, { recursive: true });
|
||||
|
||||
// Fetch latest from all remotes before creating the worktree.
|
||||
// This ensures remote refs are up-to-date for:
|
||||
// - Remote base branches (e.g. "origin/main")
|
||||
// - Existing remote branches being checked out as worktrees
|
||||
// - Branch existence checks against fresh remote state
|
||||
logger.info('Fetching from all remotes before creating worktree');
|
||||
try {
|
||||
const controller = new AbortController();
|
||||
const timerId = setTimeout(() => controller.abort(), FETCH_TIMEOUT_MS);
|
||||
try {
|
||||
await execGitCommand(['fetch', '--all', '--quiet'], projectPath, undefined, controller);
|
||||
} finally {
|
||||
clearTimeout(timerId);
|
||||
}
|
||||
} catch (fetchErr) {
|
||||
// Non-fatal: log but continue — refs might already be cached locally
|
||||
logger.warn(`Failed to fetch from remotes: ${getErrorMessage(fetchErr)}`);
|
||||
}
|
||||
|
||||
// Sync the base branch with its remote tracking branch (fast-forward only).
|
||||
// This ensures the new worktree starts from an up-to-date state rather than
|
||||
// a potentially stale local copy. If the sync fails or the branch has diverged,
|
||||
// we proceed with the local copy and inform the user.
|
||||
const effectiveBase = baseBranch || 'HEAD';
|
||||
let syncResult: BaseBranchSyncResult = { attempted: false, synced: false };
|
||||
|
||||
// Only sync if the base is a real branch (not 'HEAD')
|
||||
// Pass skipFetch=true because we already fetched all remotes above.
|
||||
if (effectiveBase !== 'HEAD') {
|
||||
logger.info(`Syncing base branch '${effectiveBase}' before creating worktree`);
|
||||
syncResult = await syncBaseBranch(projectPath, effectiveBase, true);
|
||||
if (syncResult.attempted) {
|
||||
if (syncResult.synced) {
|
||||
logger.info(`Base branch sync result: ${syncResult.message}`);
|
||||
} else {
|
||||
logger.warn(`Base branch sync result: ${syncResult.message}`);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// When using HEAD, try to sync the currently checked-out branch
|
||||
// Pass skipFetch=true because we already fetched all remotes above.
|
||||
try {
|
||||
const currentBranch = await execGitCommand(
|
||||
['rev-parse', '--abbrev-ref', 'HEAD'],
|
||||
projectPath
|
||||
);
|
||||
const trimmedBranch = currentBranch.trim();
|
||||
if (trimmedBranch && trimmedBranch !== 'HEAD') {
|
||||
logger.info(
|
||||
`Syncing current branch '${trimmedBranch}' (HEAD) before creating worktree`
|
||||
);
|
||||
syncResult = await syncBaseBranch(projectPath, trimmedBranch, true);
|
||||
if (syncResult.attempted) {
|
||||
if (syncResult.synced) {
|
||||
logger.info(`HEAD branch sync result: ${syncResult.message}`);
|
||||
} else {
|
||||
logger.warn(`HEAD branch sync result: ${syncResult.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Could not determine HEAD branch — skip sync
|
||||
}
|
||||
}
|
||||
|
||||
// Check if branch exists (using array arguments to prevent injection)
|
||||
let branchExists = false;
|
||||
try {
|
||||
@@ -204,6 +277,19 @@ export function createCreateHandler(events: EventEmitter, settingsService?: Sett
|
||||
// normalizePath converts to forward slashes for API consistency
|
||||
const absoluteWorktreePath = path.resolve(worktreePath);
|
||||
|
||||
// Get the commit hash the new worktree is based on for logging
|
||||
let baseCommitHash: string | undefined;
|
||||
try {
|
||||
const hash = await execGitCommand(['rev-parse', '--short', 'HEAD'], absoluteWorktreePath);
|
||||
baseCommitHash = hash.trim();
|
||||
} catch {
|
||||
// Non-critical — just for logging
|
||||
}
|
||||
|
||||
if (baseCommitHash) {
|
||||
logger.info(`New worktree for '${branchName}' based on commit ${baseCommitHash}`);
|
||||
}
|
||||
|
||||
// Copy configured files into the new worktree before responding
|
||||
// This runs synchronously to ensure files are in place before any init script
|
||||
try {
|
||||
@@ -225,6 +311,17 @@ export function createCreateHandler(events: EventEmitter, settingsService?: Sett
|
||||
path: normalizePath(absoluteWorktreePath),
|
||||
branch: branchName,
|
||||
isNew: !branchExists,
|
||||
baseCommitHash,
|
||||
...(syncResult.attempted
|
||||
? {
|
||||
syncResult: {
|
||||
synced: syncResult.synced,
|
||||
remote: syncResult.remote,
|
||||
message: syncResult.message,
|
||||
diverged: syncResult.diverged,
|
||||
},
|
||||
}
|
||||
: {}),
|
||||
},
|
||||
});
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
import type { Request, Response } from 'express';
|
||||
import { exec } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import fs from 'fs/promises';
|
||||
import { isGitRepo } from '@automaker/git-utils';
|
||||
import { getErrorMessage, logError, isValidBranchName } from '../common.js';
|
||||
import { execGitCommand } from '../../../lib/git.js';
|
||||
@@ -46,20 +47,79 @@ export function createDeleteHandler() {
|
||||
});
|
||||
branchName = stdout.trim();
|
||||
} catch {
|
||||
// Could not get branch name
|
||||
// Could not get branch name - worktree directory may already be gone
|
||||
logger.debug('Could not determine branch for worktree, directory may be missing');
|
||||
}
|
||||
|
||||
// Remove the worktree (using array arguments to prevent injection)
|
||||
let removeSucceeded = false;
|
||||
try {
|
||||
await execGitCommand(['worktree', 'remove', worktreePath, '--force'], projectPath);
|
||||
} catch {
|
||||
// Try with prune if remove fails
|
||||
await execGitCommand(['worktree', 'prune'], projectPath);
|
||||
removeSucceeded = true;
|
||||
} catch (removeError) {
|
||||
// `git worktree remove` can fail if the directory is already missing
|
||||
// or in a bad state. Try pruning stale worktree entries as a fallback.
|
||||
logger.debug('git worktree remove failed, trying prune', {
|
||||
error: getErrorMessage(removeError),
|
||||
});
|
||||
try {
|
||||
await execGitCommand(['worktree', 'prune'], projectPath);
|
||||
|
||||
// Verify the specific worktree is no longer registered after prune.
|
||||
// `git worktree prune` exits 0 even if worktreePath was never registered,
|
||||
// so we must explicitly check the worktree list to avoid false positives.
|
||||
const { stdout: listOut } = await execAsync('git worktree list --porcelain', {
|
||||
cwd: projectPath,
|
||||
});
|
||||
// Parse porcelain output and check for an exact path match.
|
||||
// Using substring .includes() can produce false positives when one
|
||||
// worktree path is a prefix of another (e.g. /foo vs /foobar).
|
||||
const stillRegistered = listOut
|
||||
.split('\n')
|
||||
.filter((line) => line.startsWith('worktree '))
|
||||
.map((line) => line.slice('worktree '.length).trim())
|
||||
.some((registeredPath) => registeredPath === worktreePath);
|
||||
if (stillRegistered) {
|
||||
// Prune didn't clean up our entry - treat as failure
|
||||
throw removeError;
|
||||
}
|
||||
removeSucceeded = true;
|
||||
} catch (pruneError) {
|
||||
// If pruneError is the original removeError re-thrown, propagate it
|
||||
if (pruneError === removeError) {
|
||||
throw removeError;
|
||||
}
|
||||
logger.warn('git worktree prune also failed', {
|
||||
error: getErrorMessage(pruneError),
|
||||
});
|
||||
// If both remove and prune fail, still try to return success
|
||||
// if the worktree directory no longer exists (it may have been
|
||||
// manually deleted already).
|
||||
let dirExists = false;
|
||||
try {
|
||||
await fs.access(worktreePath);
|
||||
dirExists = true;
|
||||
} catch {
|
||||
// Directory doesn't exist
|
||||
}
|
||||
if (dirExists) {
|
||||
// Directory still exists - this is a real failure
|
||||
throw removeError;
|
||||
}
|
||||
// Directory is gone, treat as success
|
||||
removeSucceeded = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Optionally delete the branch
|
||||
// Optionally delete the branch (only if worktree was successfully removed)
|
||||
let branchDeleted = false;
|
||||
if (deleteBranch && branchName && branchName !== 'main' && branchName !== 'master') {
|
||||
if (
|
||||
removeSucceeded &&
|
||||
deleteBranch &&
|
||||
branchName &&
|
||||
branchName !== 'main' &&
|
||||
branchName !== 'master'
|
||||
) {
|
||||
// Validate branch name to prevent command injection
|
||||
if (!isValidBranchName(branchName)) {
|
||||
logger.warn(`Invalid branch name detected, skipping deletion: ${branchName}`);
|
||||
|
||||
@@ -34,6 +34,7 @@ export function createDiffsHandler() {
|
||||
diff: result.diff,
|
||||
files: result.files,
|
||||
hasChanges: result.hasChanges,
|
||||
...(result.mergeState ? { mergeState: result.mergeState } : {}),
|
||||
});
|
||||
return;
|
||||
}
|
||||
@@ -55,6 +56,7 @@ export function createDiffsHandler() {
|
||||
diff: result.diff,
|
||||
files: result.files,
|
||||
hasChanges: result.hasChanges,
|
||||
...(result.mergeState ? { mergeState: result.mergeState } : {}),
|
||||
});
|
||||
} catch (innerError) {
|
||||
// Worktree doesn't exist - fallback to main project path
|
||||
@@ -71,6 +73,7 @@ export function createDiffsHandler() {
|
||||
diff: result.diff,
|
||||
files: result.files,
|
||||
hasChanges: result.hasChanges,
|
||||
...(result.mergeState ? { mergeState: result.mergeState } : {}),
|
||||
});
|
||||
} catch (fallbackError) {
|
||||
logError(fallbackError, 'Fallback to main project also failed');
|
||||
|
||||
@@ -5,12 +5,12 @@
|
||||
* 1. Discard ALL changes (when no files array is provided)
|
||||
* - Resets staged changes (git reset HEAD)
|
||||
* - Discards modified tracked files (git checkout .)
|
||||
* - Removes untracked files and directories (git clean -fd)
|
||||
* - Removes untracked files and directories (git clean -ffd)
|
||||
*
|
||||
* 2. Discard SELECTED files (when files array is provided)
|
||||
* - Unstages selected staged files (git reset HEAD -- <files>)
|
||||
* - Reverts selected tracked file changes (git checkout -- <files>)
|
||||
* - Removes selected untracked files (git clean -fd -- <files>)
|
||||
* - Removes selected untracked files (git clean -ffd -- <files>)
|
||||
*
|
||||
* Note: Git repository validation (isGitRepo) is handled by
|
||||
* the requireGitRepoOnly middleware in index.ts
|
||||
@@ -52,6 +52,22 @@ function validateFilePath(filePath: string, worktreePath: string): boolean {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse a file path from git status --porcelain output, handling renames.
|
||||
* For renamed files (R status), git reports "old_path -> new_path" and
|
||||
* we need the new path to match what parseGitStatus() returns in git-utils.
|
||||
*/
|
||||
function parseFilePath(rawPath: string, indexStatus: string, workTreeStatus: string): string {
|
||||
const trimmedPath = rawPath.trim();
|
||||
if (indexStatus === 'R' || workTreeStatus === 'R') {
|
||||
const arrowIndex = trimmedPath.indexOf(' -> ');
|
||||
if (arrowIndex !== -1) {
|
||||
return trimmedPath.slice(arrowIndex + 4);
|
||||
}
|
||||
}
|
||||
return trimmedPath;
|
||||
}
|
||||
|
||||
export function createDiscardChangesHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
@@ -91,11 +107,16 @@ export function createDiscardChangesHandler() {
|
||||
|
||||
// Parse the status output to categorize files
|
||||
// Git --porcelain format: XY PATH where X=index status, Y=worktree status
|
||||
// Preserve the exact two-character XY status (no trim) to keep index vs worktree info
|
||||
// For renamed files: XY OLD_PATH -> NEW_PATH
|
||||
const statusLines = status.trim().split('\n').filter(Boolean);
|
||||
const allFiles = statusLines.map((line) => {
|
||||
const fileStatus = line.substring(0, 2);
|
||||
const filePath = line.slice(3).trim();
|
||||
const rawPath = line.slice(3);
|
||||
const indexStatus = fileStatus.charAt(0);
|
||||
const workTreeStatus = fileStatus.charAt(1);
|
||||
// Parse path consistently with parseGitStatus() in git-utils,
|
||||
// which extracts the new path for renames
|
||||
const filePath = parseFilePath(rawPath, indexStatus, workTreeStatus);
|
||||
return { status: fileStatus, path: filePath };
|
||||
});
|
||||
|
||||
@@ -122,8 +143,12 @@ export function createDiscardChangesHandler() {
|
||||
const untrackedFiles: string[] = []; // Untracked files (?)
|
||||
const warnings: string[] = [];
|
||||
|
||||
// Track which requested files were matched so we can handle unmatched ones
|
||||
const matchedFiles = new Set<string>();
|
||||
|
||||
for (const file of allFiles) {
|
||||
if (!filesToDiscard.has(file.path)) continue;
|
||||
matchedFiles.add(file.path);
|
||||
|
||||
// file.status is the raw two-character XY git porcelain status (no trim)
|
||||
// X = index/staging status, Y = worktree status
|
||||
@@ -151,6 +176,16 @@ export function createDiscardChangesHandler() {
|
||||
}
|
||||
}
|
||||
|
||||
// Handle files from the UI that didn't match any entry in allFiles.
|
||||
// This can happen due to timing differences between the UI loading diffs
|
||||
// and the discard request, or path format differences.
|
||||
// Attempt to clean unmatched files directly as untracked files.
|
||||
for (const requestedFile of files) {
|
||||
if (!matchedFiles.has(requestedFile)) {
|
||||
untrackedFiles.push(requestedFile);
|
||||
}
|
||||
}
|
||||
|
||||
// 1. Unstage selected staged files (using execFile to bypass shell)
|
||||
if (stagedFiles.length > 0) {
|
||||
try {
|
||||
@@ -174,9 +209,10 @@ export function createDiscardChangesHandler() {
|
||||
}
|
||||
|
||||
// 3. Remove selected untracked files
|
||||
// Use -ffd (double force) to also handle nested git repositories
|
||||
if (untrackedFiles.length > 0) {
|
||||
try {
|
||||
await execGitCommand(['clean', '-fd', '--', ...untrackedFiles], worktreePath);
|
||||
await execGitCommand(['clean', '-ffd', '--', ...untrackedFiles], worktreePath);
|
||||
} catch (error) {
|
||||
const msg = getErrorMessage(error);
|
||||
logError(error, `Failed to clean untracked files: ${msg}`);
|
||||
@@ -234,11 +270,12 @@ export function createDiscardChangesHandler() {
|
||||
}
|
||||
|
||||
// 3. Remove untracked files and directories
|
||||
// Use -ffd (double force) to also handle nested git repositories
|
||||
try {
|
||||
await execGitCommand(['clean', '-fd'], worktreePath);
|
||||
await execGitCommand(['clean', '-ffd', '--'], worktreePath);
|
||||
} catch (error) {
|
||||
const msg = getErrorMessage(error);
|
||||
logError(error, `git clean -fd failed: ${msg}`);
|
||||
logError(error, `git clean -ffd failed: ${msg}`);
|
||||
warnings.push(`Failed to remove untracked files: ${msg}`);
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { exec } from 'child_process';
|
||||
import { execFile } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import { existsSync } from 'fs';
|
||||
import { join } from 'path';
|
||||
@@ -20,7 +20,7 @@ import { getErrorMessage, logError } from '../common.js';
|
||||
import { getPhaseModelWithOverrides } from '../../../lib/settings-helpers.js';
|
||||
|
||||
const logger = createLogger('GenerateCommitMessage');
|
||||
const execAsync = promisify(exec);
|
||||
const execFileAsync = promisify(execFile);
|
||||
|
||||
/** Timeout for AI provider calls in milliseconds (30 seconds) */
|
||||
const AI_TIMEOUT_MS = 30_000;
|
||||
@@ -33,20 +33,39 @@ async function* withTimeout<T>(
|
||||
generator: AsyncIterable<T>,
|
||||
timeoutMs: number
|
||||
): AsyncGenerator<T, void, unknown> {
|
||||
let timerId: ReturnType<typeof setTimeout> | undefined;
|
||||
|
||||
const timeoutPromise = new Promise<never>((_, reject) => {
|
||||
setTimeout(() => reject(new Error(`AI provider timed out after ${timeoutMs}ms`)), timeoutMs);
|
||||
timerId = setTimeout(
|
||||
() => reject(new Error(`AI provider timed out after ${timeoutMs}ms`)),
|
||||
timeoutMs
|
||||
);
|
||||
});
|
||||
|
||||
const iterator = generator[Symbol.asyncIterator]();
|
||||
let done = false;
|
||||
|
||||
while (!done) {
|
||||
const result = await Promise.race([iterator.next(), timeoutPromise]);
|
||||
if (result.done) {
|
||||
done = true;
|
||||
} else {
|
||||
yield result.value;
|
||||
try {
|
||||
while (!done) {
|
||||
const result = await Promise.race([iterator.next(), timeoutPromise]).catch(async (err) => {
|
||||
// Capture the original error, then attempt to close the iterator.
|
||||
// If iterator.return() throws, log it but rethrow the original error
|
||||
// so the timeout error (not the teardown error) is preserved.
|
||||
try {
|
||||
await iterator.return?.();
|
||||
} catch (teardownErr) {
|
||||
logger.warn('Error during iterator cleanup after timeout:', teardownErr);
|
||||
}
|
||||
throw err;
|
||||
});
|
||||
if (result.done) {
|
||||
done = true;
|
||||
} else {
|
||||
yield result.value;
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
clearTimeout(timerId);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -117,14 +136,14 @@ export function createGenerateCommitMessageHandler(
|
||||
let diff = '';
|
||||
try {
|
||||
// First try to get staged changes
|
||||
const { stdout: stagedDiff } = await execAsync('git diff --cached', {
|
||||
const { stdout: stagedDiff } = await execFileAsync('git', ['diff', '--cached'], {
|
||||
cwd: worktreePath,
|
||||
maxBuffer: 1024 * 1024 * 5, // 5MB buffer
|
||||
});
|
||||
|
||||
// If no staged changes, get unstaged changes
|
||||
if (!stagedDiff.trim()) {
|
||||
const { stdout: unstagedDiff } = await execAsync('git diff', {
|
||||
const { stdout: unstagedDiff } = await execFileAsync('git', ['diff'], {
|
||||
cwd: worktreePath,
|
||||
maxBuffer: 1024 * 1024 * 5, // 5MB buffer
|
||||
});
|
||||
@@ -213,14 +232,16 @@ export function createGenerateCommitMessageHandler(
|
||||
}
|
||||
}
|
||||
} else if (msg.type === 'result' && msg.subtype === 'success' && msg.result) {
|
||||
// Use result if available (some providers return final text here)
|
||||
responseText = msg.result;
|
||||
// Use result text if longer than accumulated text (consistent with simpleQuery pattern)
|
||||
if (msg.result.length > responseText.length) {
|
||||
responseText = msg.result;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const message = responseText.trim();
|
||||
|
||||
if (!message || message.trim().length === 0) {
|
||||
if (!message) {
|
||||
logger.warn('Received empty response from model');
|
||||
const response: GenerateCommitMessageErrorResponse = {
|
||||
success: false,
|
||||
|
||||
@@ -30,6 +30,8 @@ const MAX_DIFF_SIZE = 15_000;
|
||||
|
||||
const PR_DESCRIPTION_SYSTEM_PROMPT = `You are a pull request description generator. Your task is to create a clear, well-structured PR title and description based on the git diff and branch information provided.
|
||||
|
||||
IMPORTANT: Do NOT include any conversational text, explanations, or preamble. Do NOT say things like "I'll analyze..." or "Here is...". Output ONLY the structured format below and nothing else.
|
||||
|
||||
Output your response in EXACTLY this format (including the markers):
|
||||
---TITLE---
|
||||
<a concise PR title, 50-72 chars, imperative mood>
|
||||
@@ -41,6 +43,7 @@ Output your response in EXACTLY this format (including the markers):
|
||||
<Detailed list of what was changed and why>
|
||||
|
||||
Rules:
|
||||
- Your ENTIRE response must start with ---TITLE--- and contain nothing before it
|
||||
- The title should be concise and descriptive (50-72 characters)
|
||||
- Use imperative mood for the title (e.g., "Add dark mode toggle" not "Added dark mode toggle")
|
||||
- The description should explain WHAT changed and WHY
|
||||
@@ -50,7 +53,9 @@ Rules:
|
||||
- Focus on the user-facing impact when possible
|
||||
- If there are breaking changes, mention them prominently
|
||||
- The diff may include both committed changes and uncommitted working directory changes. Treat all changes as part of the PR since uncommitted changes will be committed when the PR is created
|
||||
- Do NOT distinguish between committed and uncommitted changes in the output - describe all changes as a unified set of PR changes`;
|
||||
- Do NOT distinguish between committed and uncommitted changes in the output - describe all changes as a unified set of PR changes
|
||||
- EXCLUDE any files that are gitignored (e.g., node_modules, dist, build, .env files, lock files, generated files, binary artifacts, coverage reports, cache directories). These should not be mentioned in the description even if they appear in the diff
|
||||
- Focus only on meaningful source code changes that are tracked by git and relevant to reviewers`;
|
||||
|
||||
/**
|
||||
* Wraps an async generator with a timeout.
|
||||
@@ -165,127 +170,125 @@ export function createGeneratePRDescriptionHandler(
|
||||
// Determine the base branch for comparison
|
||||
const base = baseBranch || 'main';
|
||||
|
||||
// Get the diff between current branch and base branch (committed changes)
|
||||
// Track whether the diff method used only includes committed changes.
|
||||
// `git diff base...HEAD` and `git diff origin/base...HEAD` only show committed changes,
|
||||
// while the fallback methods (`git diff HEAD`, `git diff --cached + git diff`) already
|
||||
// include uncommitted working directory changes.
|
||||
let diff = '';
|
||||
let diffIncludesUncommitted = false;
|
||||
// Collect diffs in three layers and combine them:
|
||||
// 1. Committed changes on the branch: `git diff base...HEAD`
|
||||
// 2. Staged (cached) changes not yet committed: `git diff --cached`
|
||||
// 3. Unstaged changes to tracked files: `git diff` (no --cached flag)
|
||||
//
|
||||
// Untracked files are intentionally excluded — they are typically build artifacts,
|
||||
// planning files, hidden dotfiles, or other files unrelated to the PR.
|
||||
// `git diff` and `git diff --cached` only show changes to files already tracked by git,
|
||||
// which is exactly the correct scope.
|
||||
//
|
||||
// We combine all three sources and deduplicate by file path so that a file modified
|
||||
// in commits AND with additional uncommitted changes is not double-counted.
|
||||
|
||||
/** Parse a unified diff into per-file hunks keyed by file path */
|
||||
function parseDiffIntoFileHunks(diffText: string): Map<string, string> {
|
||||
const fileHunks = new Map<string, string>();
|
||||
if (!diffText.trim()) return fileHunks;
|
||||
|
||||
// Split on "diff --git" boundaries (keep the delimiter)
|
||||
const sections = diffText.split(/(?=^diff --git )/m);
|
||||
for (const section of sections) {
|
||||
if (!section.trim()) continue;
|
||||
// Use a back-reference pattern so the "b/" side must match the "a/" capture,
|
||||
// correctly handling paths that contain " b/" in their name.
|
||||
// Falls back to a two-capture pattern to handle renames (a/ and b/ differ).
|
||||
const backrefMatch = section.match(/^diff --git a\/(.+) b\/\1$/m);
|
||||
const renameMatch = !backrefMatch ? section.match(/^diff --git a\/(.+) b\/(.+)$/m) : null;
|
||||
const match = backrefMatch || renameMatch;
|
||||
if (match) {
|
||||
// Prefer the backref capture (identical paths); for renames use the destination (match[2])
|
||||
const filePath = backrefMatch ? match[1] : match[2];
|
||||
// Merge hunks if the same file appears in multiple diff sources
|
||||
const existing = fileHunks.get(filePath) ?? '';
|
||||
fileHunks.set(filePath, existing + section);
|
||||
}
|
||||
}
|
||||
return fileHunks;
|
||||
}
|
||||
|
||||
// --- Step 1: committed changes (branch vs base) ---
|
||||
let committedDiff = '';
|
||||
try {
|
||||
// First, try to get diff against the base branch
|
||||
const { stdout: branchDiff } = await execFileAsync('git', ['diff', `${base}...HEAD`], {
|
||||
const { stdout } = await execFileAsync('git', ['diff', `${base}...HEAD`], {
|
||||
cwd: worktreePath,
|
||||
maxBuffer: 1024 * 1024 * 5, // 5MB buffer
|
||||
maxBuffer: 1024 * 1024 * 5,
|
||||
});
|
||||
diff = branchDiff;
|
||||
// git diff base...HEAD only shows committed changes
|
||||
diffIncludesUncommitted = false;
|
||||
committedDiff = stdout;
|
||||
} catch {
|
||||
// If branch comparison fails (e.g., base branch doesn't exist locally),
|
||||
// try fetching and comparing against remote base
|
||||
// Base branch may not exist locally; try the remote tracking branch
|
||||
try {
|
||||
const { stdout: remoteDiff } = await execFileAsync(
|
||||
'git',
|
||||
['diff', `origin/${base}...HEAD`],
|
||||
{
|
||||
cwd: worktreePath,
|
||||
maxBuffer: 1024 * 1024 * 5,
|
||||
}
|
||||
);
|
||||
diff = remoteDiff;
|
||||
// git diff origin/base...HEAD only shows committed changes
|
||||
diffIncludesUncommitted = false;
|
||||
const { stdout } = await execFileAsync('git', ['diff', `origin/${base}...HEAD`], {
|
||||
cwd: worktreePath,
|
||||
maxBuffer: 1024 * 1024 * 5,
|
||||
});
|
||||
committedDiff = stdout;
|
||||
} catch {
|
||||
// Fall back to getting all uncommitted + committed changes
|
||||
try {
|
||||
const { stdout: allDiff } = await execFileAsync('git', ['diff', 'HEAD'], {
|
||||
cwd: worktreePath,
|
||||
maxBuffer: 1024 * 1024 * 5,
|
||||
});
|
||||
diff = allDiff;
|
||||
// git diff HEAD includes uncommitted changes
|
||||
diffIncludesUncommitted = true;
|
||||
} catch {
|
||||
// Last resort: get staged + unstaged changes
|
||||
const { stdout: stagedDiff } = await execFileAsync('git', ['diff', '--cached'], {
|
||||
cwd: worktreePath,
|
||||
maxBuffer: 1024 * 1024 * 5,
|
||||
});
|
||||
const { stdout: unstagedDiff } = await execFileAsync('git', ['diff'], {
|
||||
cwd: worktreePath,
|
||||
maxBuffer: 1024 * 1024 * 5,
|
||||
});
|
||||
diff = stagedDiff + unstagedDiff;
|
||||
// These already include uncommitted changes
|
||||
diffIncludesUncommitted = true;
|
||||
}
|
||||
// Cannot compare against base — leave committedDiff empty; the uncommitted
|
||||
// changes gathered below will still be included.
|
||||
logger.warn(`Could not get committed diff against ${base} or origin/${base}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Check for uncommitted changes (staged + unstaged) to include in the description.
|
||||
// When creating a PR, uncommitted changes will be auto-committed, so they should be
|
||||
// reflected in the generated description. We only need to fetch uncommitted diffs
|
||||
// when the primary diff method (base...HEAD) was used, since it only shows committed changes.
|
||||
let hasUncommittedChanges = false;
|
||||
// --- Step 2: staged changes (tracked files only) ---
|
||||
let stagedDiff = '';
|
||||
try {
|
||||
const { stdout: statusOutput } = await execFileAsync('git', ['status', '--porcelain'], {
|
||||
const { stdout } = await execFileAsync('git', ['diff', '--cached'], {
|
||||
cwd: worktreePath,
|
||||
maxBuffer: 1024 * 1024 * 5,
|
||||
});
|
||||
hasUncommittedChanges = statusOutput.trim().length > 0;
|
||||
|
||||
if (hasUncommittedChanges && !diffIncludesUncommitted) {
|
||||
logger.info('Uncommitted changes detected, including in PR description context');
|
||||
|
||||
let uncommittedDiff = '';
|
||||
|
||||
// Get staged changes
|
||||
try {
|
||||
const { stdout: stagedDiff } = await execFileAsync('git', ['diff', '--cached'], {
|
||||
cwd: worktreePath,
|
||||
maxBuffer: 1024 * 1024 * 5,
|
||||
});
|
||||
if (stagedDiff.trim()) {
|
||||
uncommittedDiff += stagedDiff;
|
||||
}
|
||||
} catch {
|
||||
// Ignore staged diff errors
|
||||
}
|
||||
|
||||
// Get unstaged changes (tracked files only)
|
||||
try {
|
||||
const { stdout: unstagedDiff } = await execFileAsync('git', ['diff'], {
|
||||
cwd: worktreePath,
|
||||
maxBuffer: 1024 * 1024 * 5,
|
||||
});
|
||||
if (unstagedDiff.trim()) {
|
||||
uncommittedDiff += unstagedDiff;
|
||||
}
|
||||
} catch {
|
||||
// Ignore unstaged diff errors
|
||||
}
|
||||
|
||||
// Get list of untracked files for context
|
||||
const untrackedFiles = statusOutput
|
||||
.split('\n')
|
||||
.filter((line) => line.startsWith('??'))
|
||||
.map((line) => line.substring(3).trim());
|
||||
|
||||
if (untrackedFiles.length > 0) {
|
||||
// Add a summary of untracked (new) files as context
|
||||
uncommittedDiff += `\n# New untracked files:\n${untrackedFiles.map((f) => `# + ${f}`).join('\n')}\n`;
|
||||
}
|
||||
|
||||
// Append uncommitted changes to the committed diff
|
||||
if (uncommittedDiff.trim()) {
|
||||
diff = diff + uncommittedDiff;
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Ignore errors checking for uncommitted changes
|
||||
stagedDiff = stdout;
|
||||
} catch (err) {
|
||||
// Non-fatal — staged diff is a best-effort supplement
|
||||
logger.debug('Failed to get staged diff', err);
|
||||
}
|
||||
|
||||
// Also get the commit log for context
|
||||
// --- Step 3: unstaged changes (tracked files only) ---
|
||||
let unstagedDiff = '';
|
||||
try {
|
||||
const { stdout } = await execFileAsync('git', ['diff'], {
|
||||
cwd: worktreePath,
|
||||
maxBuffer: 1024 * 1024 * 5,
|
||||
});
|
||||
unstagedDiff = stdout;
|
||||
} catch (err) {
|
||||
// Non-fatal — unstaged diff is a best-effort supplement
|
||||
logger.debug('Failed to get unstaged diff', err);
|
||||
}
|
||||
|
||||
// --- Combine and deduplicate ---
|
||||
// Build a map of filePath → diff content by concatenating hunks from all sources
|
||||
// in chronological order (committed → staged → unstaged) so that no changes
|
||||
// are lost when a file appears in multiple diff sources.
|
||||
const combinedFileHunks = new Map<string, string>();
|
||||
|
||||
for (const source of [committedDiff, stagedDiff, unstagedDiff]) {
|
||||
const hunks = parseDiffIntoFileHunks(source);
|
||||
for (const [filePath, hunk] of hunks) {
|
||||
if (combinedFileHunks.has(filePath)) {
|
||||
combinedFileHunks.set(filePath, combinedFileHunks.get(filePath)! + hunk);
|
||||
} else {
|
||||
combinedFileHunks.set(filePath, hunk);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const diff = Array.from(combinedFileHunks.values()).join('');
|
||||
|
||||
// Log what files were included for observability
|
||||
if (combinedFileHunks.size > 0) {
|
||||
logger.info(`PR description scope: ${combinedFileHunks.size} file(s)`);
|
||||
logger.debug(
|
||||
`PR description scope files: ${Array.from(combinedFileHunks.keys()).join(', ')}`
|
||||
);
|
||||
}
|
||||
|
||||
// Also get the commit log for context — always scoped to the selected base branch
|
||||
// so the log only contains commits that are part of this PR.
|
||||
// We do NOT fall back to an unscoped `git log` because that would include commits
|
||||
// from the base branch itself and produce misleading AI context.
|
||||
let commitLog = '';
|
||||
try {
|
||||
const { stdout: logOutput } = await execFileAsync(
|
||||
@@ -298,11 +301,11 @@ export function createGeneratePRDescriptionHandler(
|
||||
);
|
||||
commitLog = logOutput.trim();
|
||||
} catch {
|
||||
// If comparing against base fails, fall back to recent commits
|
||||
// Base branch not available locally — try the remote tracking branch
|
||||
try {
|
||||
const { stdout: logOutput } = await execFileAsync(
|
||||
'git',
|
||||
['log', '--oneline', '-10', '--no-decorate'],
|
||||
['log', `origin/${base}..HEAD`, '--oneline', '--no-decorate'],
|
||||
{
|
||||
cwd: worktreePath,
|
||||
maxBuffer: 1024 * 1024,
|
||||
@@ -310,7 +313,9 @@ export function createGeneratePRDescriptionHandler(
|
||||
);
|
||||
commitLog = logOutput.trim();
|
||||
} catch {
|
||||
// Ignore commit log errors
|
||||
// Cannot scope commit log to base branch — leave empty rather than
|
||||
// including unscoped commits that would pollute the AI context.
|
||||
logger.warn(`Could not get commit log against ${base} or origin/${base}`);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -336,10 +341,6 @@ export function createGeneratePRDescriptionHandler(
|
||||
userPrompt += `\nCommit History:\n${commitLog}\n`;
|
||||
}
|
||||
|
||||
if (hasUncommittedChanges) {
|
||||
userPrompt += `\nNote: This branch has uncommitted changes that will be included in the PR.\n`;
|
||||
}
|
||||
|
||||
if (truncatedDiff) {
|
||||
userPrompt += `\n\`\`\`diff\n${truncatedDiff}\n\`\`\``;
|
||||
}
|
||||
@@ -397,7 +398,10 @@ export function createGeneratePRDescriptionHandler(
|
||||
}
|
||||
}
|
||||
} else if (msg.type === 'result' && msg.subtype === 'success' && msg.result) {
|
||||
responseText = msg.result;
|
||||
// Use result text if longer than accumulated text (consistent with simpleQuery pattern)
|
||||
if (msg.result.length > responseText.length) {
|
||||
responseText = msg.result;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -413,7 +417,9 @@ export function createGeneratePRDescriptionHandler(
|
||||
return;
|
||||
}
|
||||
|
||||
// Parse the response to extract title and body
|
||||
// Parse the response to extract title and body.
|
||||
// The model may include conversational preamble before the structured markers,
|
||||
// so we search for the markers anywhere in the response, not just at the start.
|
||||
let title = '';
|
||||
let body = '';
|
||||
|
||||
@@ -424,14 +430,46 @@ export function createGeneratePRDescriptionHandler(
|
||||
title = titleMatch[1].trim();
|
||||
body = bodyMatch[1].trim();
|
||||
} else {
|
||||
// Fallback: treat first line as title, rest as body
|
||||
const lines = fullResponse.split('\n');
|
||||
title = lines[0].trim();
|
||||
body = lines.slice(1).join('\n').trim();
|
||||
// Fallback: try to extract meaningful content, skipping any conversational preamble.
|
||||
// Common preamble patterns start with "I'll", "I will", "Here", "Let me", "Based on", etc.
|
||||
const lines = fullResponse.split('\n').filter((line) => line.trim().length > 0);
|
||||
|
||||
// Skip lines that look like conversational preamble
|
||||
let startIndex = 0;
|
||||
for (let i = 0; i < lines.length; i++) {
|
||||
const line = lines[i].trim();
|
||||
// Check if this line looks like conversational AI preamble
|
||||
if (
|
||||
/^(I'll|I will|Here('s| is| are)|Let me|Based on|Looking at|Analyzing|Sure|OK|Okay|Of course)/i.test(
|
||||
line
|
||||
) ||
|
||||
/^(The following|Below is|This (is|will)|After (analyzing|reviewing|looking))/i.test(
|
||||
line
|
||||
)
|
||||
) {
|
||||
startIndex = i + 1;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// Use remaining lines after skipping preamble
|
||||
const contentLines = lines.slice(startIndex);
|
||||
if (contentLines.length > 0) {
|
||||
title = contentLines[0].trim();
|
||||
body = contentLines.slice(1).join('\n').trim();
|
||||
} else {
|
||||
// If all lines were filtered as preamble, use the original first non-empty line
|
||||
title = lines[0]?.trim() || '';
|
||||
body = lines.slice(1).join('\n').trim();
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up title - remove any markdown or quotes
|
||||
title = title.replace(/^#+\s*/, '').replace(/^["']|["']$/g, '');
|
||||
// Clean up title - remove any markdown headings, quotes, or marker artifacts
|
||||
title = title
|
||||
.replace(/^#+\s*/, '')
|
||||
.replace(/^["']|["']$/g, '')
|
||||
.replace(/^---\w+---\s*/, '');
|
||||
|
||||
logger.info(`Generated PR title: ${title.substring(0, 100)}...`);
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ import type { Request, Response } from 'express';
|
||||
import { exec, execFile } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import { getErrorMessage, logWorktreeError } from '../common.js';
|
||||
import { getRemotesWithBranch } from '../../../services/worktree-service.js';
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
const execFileAsync = promisify(execFile);
|
||||
@@ -130,6 +131,9 @@ export function createListBranchesHandler() {
|
||||
let aheadCount = 0;
|
||||
let behindCount = 0;
|
||||
let hasRemoteBranch = false;
|
||||
let trackingRemote: string | undefined;
|
||||
// List of remote names that have a branch matching the current branch name
|
||||
let remotesWithBranch: string[] = [];
|
||||
try {
|
||||
// First check if there's a remote tracking branch
|
||||
const { stdout: upstreamOutput } = await execFileAsync(
|
||||
@@ -138,8 +142,14 @@ export function createListBranchesHandler() {
|
||||
{ cwd: worktreePath }
|
||||
);
|
||||
|
||||
if (upstreamOutput.trim()) {
|
||||
const upstreamRef = upstreamOutput.trim();
|
||||
if (upstreamRef) {
|
||||
hasRemoteBranch = true;
|
||||
// Extract the remote name from the upstream ref (e.g. "origin/main" -> "origin")
|
||||
const slashIndex = upstreamRef.indexOf('/');
|
||||
if (slashIndex !== -1) {
|
||||
trackingRemote = upstreamRef.slice(0, slashIndex);
|
||||
}
|
||||
const { stdout: aheadBehindOutput } = await execFileAsync(
|
||||
'git',
|
||||
['rev-list', '--left-right', '--count', `${currentBranch}@{upstream}...HEAD`],
|
||||
@@ -165,6 +175,12 @@ export function createListBranchesHandler() {
|
||||
}
|
||||
}
|
||||
|
||||
// Check which remotes have a branch matching the current branch name.
|
||||
// This helps the UI distinguish between "branch exists on tracking remote" vs
|
||||
// "branch was pushed to a different remote" (e.g., pushed to 'upstream' but tracking 'origin').
|
||||
// Use for-each-ref to check cached remote refs (already fetched above if includeRemote was true)
|
||||
remotesWithBranch = await getRemotesWithBranch(worktreePath, currentBranch, hasAnyRemotes);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
result: {
|
||||
@@ -174,6 +190,8 @@ export function createListBranchesHandler() {
|
||||
behindCount,
|
||||
hasRemoteBranch,
|
||||
hasAnyRemotes,
|
||||
trackingRemote,
|
||||
remotesWithBranch,
|
||||
},
|
||||
});
|
||||
} catch (error) {
|
||||
|
||||
@@ -20,7 +20,12 @@ export function createMergeHandler(events: EventEmitter) {
|
||||
branchName: string;
|
||||
worktreePath: string;
|
||||
targetBranch?: string; // Branch to merge into (defaults to 'main')
|
||||
options?: { squash?: boolean; message?: string; deleteWorktreeAndBranch?: boolean };
|
||||
options?: {
|
||||
squash?: boolean;
|
||||
message?: string;
|
||||
deleteWorktreeAndBranch?: boolean;
|
||||
remote?: string;
|
||||
};
|
||||
};
|
||||
|
||||
if (!projectPath || !branchName || !worktreePath) {
|
||||
|
||||
@@ -83,6 +83,9 @@ function mapResultToResponse(res: Response, result: PullResult): void {
|
||||
stashed: result.stashed,
|
||||
stashRestored: result.stashRestored,
|
||||
message: result.message,
|
||||
isMerge: result.isMerge,
|
||||
isFastForward: result.isFastForward,
|
||||
mergeAffectedFiles: result.mergeAffectedFiles,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1,24 +1,24 @@
|
||||
/**
|
||||
* POST /push endpoint - Push a worktree branch to remote
|
||||
*
|
||||
* Git business logic is delegated to push-service.ts.
|
||||
*
|
||||
* Note: Git repository validation (isGitRepo, hasCommits) is handled by
|
||||
* the requireValidWorktree middleware in index.ts
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { exec } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
import { performPush } from '../../../services/push-service.js';
|
||||
|
||||
export function createPushHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { worktreePath, force, remote } = req.body as {
|
||||
const { worktreePath, force, remote, autoResolve } = req.body as {
|
||||
worktreePath: string;
|
||||
force?: boolean;
|
||||
remote?: string;
|
||||
autoResolve?: boolean;
|
||||
};
|
||||
|
||||
if (!worktreePath) {
|
||||
@@ -29,34 +29,28 @@ export function createPushHandler() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Get branch name
|
||||
const { stdout: branchOutput } = await execAsync('git rev-parse --abbrev-ref HEAD', {
|
||||
cwd: worktreePath,
|
||||
});
|
||||
const branchName = branchOutput.trim();
|
||||
const result = await performPush(worktreePath, { remote, force, autoResolve });
|
||||
|
||||
// Use specified remote or default to 'origin'
|
||||
const targetRemote = remote || 'origin';
|
||||
|
||||
// Push the branch
|
||||
const forceFlag = force ? '--force' : '';
|
||||
try {
|
||||
await execAsync(`git push -u ${targetRemote} ${branchName} ${forceFlag}`, {
|
||||
cwd: worktreePath,
|
||||
});
|
||||
} catch {
|
||||
// Try setting upstream
|
||||
await execAsync(`git push --set-upstream ${targetRemote} ${branchName} ${forceFlag}`, {
|
||||
cwd: worktreePath,
|
||||
if (!result.success) {
|
||||
const statusCode = isClientError(result.error ?? '') ? 400 : 500;
|
||||
res.status(statusCode).json({
|
||||
success: false,
|
||||
error: result.error,
|
||||
diverged: result.diverged,
|
||||
hasConflicts: result.hasConflicts,
|
||||
conflictFiles: result.conflictFiles,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
result: {
|
||||
branch: branchName,
|
||||
pushed: true,
|
||||
message: `Successfully pushed ${branchName} to ${targetRemote}`,
|
||||
branch: result.branch,
|
||||
pushed: result.pushed,
|
||||
diverged: result.diverged,
|
||||
autoResolved: result.autoResolved,
|
||||
message: result.message,
|
||||
},
|
||||
});
|
||||
} catch (error) {
|
||||
@@ -65,3 +59,15 @@ export function createPushHandler() {
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine whether an error message represents a client error (400)
|
||||
* vs a server error (500).
|
||||
*/
|
||||
function isClientError(errorMessage: string): boolean {
|
||||
return (
|
||||
errorMessage.includes('detached HEAD') ||
|
||||
errorMessage.includes('rejected') ||
|
||||
errorMessage.includes('diverged')
|
||||
);
|
||||
}
|
||||
|
||||
@@ -14,17 +14,19 @@
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import path from 'path';
|
||||
import { getErrorMessage, logError, isValidBranchName } from '../common.js';
|
||||
import { getErrorMessage, logError, isValidBranchName, isValidRemoteName } from '../common.js';
|
||||
import type { EventEmitter } from '../../../lib/events.js';
|
||||
import { runRebase } from '../../../services/rebase-service.js';
|
||||
|
||||
export function createRebaseHandler(events: EventEmitter) {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { worktreePath, ontoBranch } = req.body as {
|
||||
const { worktreePath, ontoBranch, remote } = req.body as {
|
||||
worktreePath: string;
|
||||
/** The branch/ref to rebase onto (e.g., 'origin/main', 'main') */
|
||||
ontoBranch: string;
|
||||
/** Remote name to fetch from before rebasing (defaults to 'origin') */
|
||||
remote?: string;
|
||||
};
|
||||
|
||||
if (!worktreePath) {
|
||||
@@ -55,6 +57,15 @@ export function createRebaseHandler(events: EventEmitter) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Validate optional remote name to reject unsafe characters at the route layer
|
||||
if (remote !== undefined && !isValidRemoteName(remote)) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: `Invalid remote name: "${remote}"`,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Emit started event
|
||||
events.emit('rebase:started', {
|
||||
worktreePath: resolvedWorktreePath,
|
||||
@@ -62,7 +73,7 @@ export function createRebaseHandler(events: EventEmitter) {
|
||||
});
|
||||
|
||||
// Execute the rebase via the service
|
||||
const result = await runRebase(resolvedWorktreePath, ontoBranch);
|
||||
const result = await runRebase(resolvedWorktreePath, ontoBranch, { remote });
|
||||
|
||||
if (result.success) {
|
||||
// Emit success event
|
||||
|
||||
76
apps/server/src/routes/worktree/routes/set-tracking.ts
Normal file
76
apps/server/src/routes/worktree/routes/set-tracking.ts
Normal file
@@ -0,0 +1,76 @@
|
||||
/**
|
||||
* POST /set-tracking endpoint - Set the upstream tracking branch for a worktree
|
||||
*
|
||||
* Sets `git branch --set-upstream-to=<remote>/<branch>` for the current branch.
|
||||
*
|
||||
* Note: Git repository validation (isGitRepo, hasCommits) is handled by
|
||||
* the requireValidWorktree middleware in index.ts
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { execGitCommand } from '@automaker/git-utils';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
import { getCurrentBranch } from '../../../lib/git.js';
|
||||
|
||||
export function createSetTrackingHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { worktreePath, remote, branch } = req.body as {
|
||||
worktreePath: string;
|
||||
remote: string;
|
||||
branch?: string;
|
||||
};
|
||||
|
||||
if (!worktreePath) {
|
||||
res.status(400).json({ success: false, error: 'worktreePath required' });
|
||||
return;
|
||||
}
|
||||
|
||||
if (!remote) {
|
||||
res.status(400).json({ success: false, error: 'remote required' });
|
||||
return;
|
||||
}
|
||||
|
||||
// Get current branch if not provided
|
||||
let targetBranch = branch;
|
||||
if (!targetBranch) {
|
||||
try {
|
||||
targetBranch = await getCurrentBranch(worktreePath);
|
||||
} catch (err) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: `Failed to get current branch: ${getErrorMessage(err)}`,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
if (targetBranch === 'HEAD') {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'Cannot set tracking in detached HEAD state.',
|
||||
});
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Set upstream tracking (pass local branch name as final arg to be explicit)
|
||||
await execGitCommand(
|
||||
['branch', '--set-upstream-to', `${remote}/${targetBranch}`, targetBranch],
|
||||
worktreePath
|
||||
);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
result: {
|
||||
branch: targetBranch,
|
||||
remote,
|
||||
upstream: `${remote}/${targetBranch}`,
|
||||
message: `Set tracking branch to ${remote}/${targetBranch}`,
|
||||
},
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Set tracking branch failed');
|
||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -9,7 +9,7 @@
|
||||
* For remote branches (e.g., "origin/feature"), automatically creates a
|
||||
* local tracking branch and checks it out.
|
||||
*
|
||||
* Also fetches the latest remote refs after switching.
|
||||
* Also fetches the latest remote refs before switching to ensure accurate branch detection.
|
||||
*
|
||||
* Git business logic is delegated to worktree-branch-service.ts.
|
||||
* Events are emitted at key lifecycle points for WebSocket subscribers.
|
||||
|
||||
66
apps/server/src/routes/worktree/routes/sync.ts
Normal file
66
apps/server/src/routes/worktree/routes/sync.ts
Normal file
@@ -0,0 +1,66 @@
|
||||
/**
|
||||
* POST /sync endpoint - Pull then push a worktree branch
|
||||
*
|
||||
* Performs a full sync operation: pull latest from remote, then push
|
||||
* local commits. Handles divergence automatically.
|
||||
*
|
||||
* Git business logic is delegated to sync-service.ts.
|
||||
*
|
||||
* Note: Git repository validation (isGitRepo, hasCommits) is handled by
|
||||
* the requireValidWorktree middleware in index.ts
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { getErrorMessage, logError } from '../common.js';
|
||||
import { performSync } from '../../../services/sync-service.js';
|
||||
|
||||
export function createSyncHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { worktreePath, remote } = req.body as {
|
||||
worktreePath: string;
|
||||
remote?: string;
|
||||
};
|
||||
|
||||
if (!worktreePath) {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'worktreePath required',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const result = await performSync(worktreePath, { remote });
|
||||
|
||||
if (!result.success) {
|
||||
const statusCode = result.hasConflicts ? 409 : 500;
|
||||
res.status(statusCode).json({
|
||||
success: false,
|
||||
error: result.error,
|
||||
hasConflicts: result.hasConflicts,
|
||||
conflictFiles: result.conflictFiles,
|
||||
conflictSource: result.conflictSource,
|
||||
pulled: result.pulled,
|
||||
pushed: result.pushed,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
result: {
|
||||
branch: result.branch,
|
||||
pulled: result.pulled,
|
||||
pushed: result.pushed,
|
||||
isFastForward: result.isFastForward,
|
||||
isMerge: result.isMerge,
|
||||
autoResolved: result.autoResolved,
|
||||
message: result.message,
|
||||
},
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Sync worktree failed');
|
||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||
}
|
||||
};
|
||||
}
|
||||
163
apps/server/src/routes/worktree/routes/update-pr-number.ts
Normal file
163
apps/server/src/routes/worktree/routes/update-pr-number.ts
Normal file
@@ -0,0 +1,163 @@
|
||||
/**
|
||||
* POST /update-pr-number endpoint - Update the tracked PR number for a worktree
|
||||
*
|
||||
* Allows users to manually change which PR number is tracked for a worktree branch.
|
||||
* Fetches updated PR info from GitHub when available, or updates metadata with the
|
||||
* provided number only if GitHub CLI is unavailable.
|
||||
*/
|
||||
|
||||
import type { Request, Response } from 'express';
|
||||
import { getErrorMessage, logError, execAsync, execEnv, isGhCliAvailable } from '../common.js';
|
||||
import { updateWorktreePRInfo } from '../../../lib/worktree-metadata.js';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import { validatePRState } from '@automaker/types';
|
||||
|
||||
const logger = createLogger('UpdatePRNumber');
|
||||
|
||||
export function createUpdatePRNumberHandler() {
|
||||
return async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { worktreePath, projectPath, prNumber } = req.body as {
|
||||
worktreePath: string;
|
||||
projectPath?: string;
|
||||
prNumber: number;
|
||||
};
|
||||
|
||||
if (!worktreePath) {
|
||||
res.status(400).json({ success: false, error: 'worktreePath required' });
|
||||
return;
|
||||
}
|
||||
|
||||
if (
|
||||
!prNumber ||
|
||||
typeof prNumber !== 'number' ||
|
||||
prNumber <= 0 ||
|
||||
!Number.isInteger(prNumber)
|
||||
) {
|
||||
res.status(400).json({ success: false, error: 'prNumber must be a positive integer' });
|
||||
return;
|
||||
}
|
||||
|
||||
const effectiveProjectPath = projectPath || worktreePath;
|
||||
|
||||
// Get current branch name
|
||||
const { stdout: branchOutput } = await execAsync('git rev-parse --abbrev-ref HEAD', {
|
||||
cwd: worktreePath,
|
||||
env: execEnv,
|
||||
});
|
||||
const branchName = branchOutput.trim();
|
||||
|
||||
if (!branchName || branchName === 'HEAD') {
|
||||
res.status(400).json({
|
||||
success: false,
|
||||
error: 'Cannot update PR number in detached HEAD state',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Try to fetch PR info from GitHub for the given PR number
|
||||
const ghCliAvailable = await isGhCliAvailable();
|
||||
|
||||
if (ghCliAvailable) {
|
||||
try {
|
||||
// Detect repository for gh CLI
|
||||
let repoFlag = '';
|
||||
try {
|
||||
const { stdout: remotes } = await execAsync('git remote -v', {
|
||||
cwd: worktreePath,
|
||||
env: execEnv,
|
||||
});
|
||||
const lines = remotes.split(/\r?\n/);
|
||||
let upstreamRepo: string | null = null;
|
||||
let originOwner: string | null = null;
|
||||
let originRepo: string | null = null;
|
||||
|
||||
for (const line of lines) {
|
||||
const match =
|
||||
line.match(/^(\w+)\s+.*[:/]([^/]+)\/([^/\s]+?)(?:\.git)?\s+\(fetch\)/) ||
|
||||
line.match(/^(\w+)\s+git@[^:]+:([^/]+)\/([^\s]+?)(?:\.git)?\s+\(fetch\)/) ||
|
||||
line.match(/^(\w+)\s+https?:\/\/[^/]+\/([^/]+)\/([^\s]+?)(?:\.git)?\s+\(fetch\)/);
|
||||
|
||||
if (match) {
|
||||
const [, remoteName, owner, repo] = match;
|
||||
if (remoteName === 'upstream') {
|
||||
upstreamRepo = `${owner}/${repo}`;
|
||||
} else if (remoteName === 'origin') {
|
||||
originOwner = owner;
|
||||
originRepo = repo;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const targetRepo =
|
||||
upstreamRepo || (originOwner && originRepo ? `${originOwner}/${originRepo}` : null);
|
||||
if (targetRepo) {
|
||||
repoFlag = ` --repo "${targetRepo}"`;
|
||||
}
|
||||
} catch {
|
||||
// Ignore remote parsing errors
|
||||
}
|
||||
|
||||
// Fetch PR info from GitHub using the PR number
|
||||
const viewCmd = `gh pr view ${prNumber}${repoFlag} --json number,title,url,state,createdAt`;
|
||||
const { stdout: prOutput } = await execAsync(viewCmd, {
|
||||
cwd: worktreePath,
|
||||
env: execEnv,
|
||||
});
|
||||
|
||||
const prData = JSON.parse(prOutput);
|
||||
|
||||
const prInfo = {
|
||||
number: prData.number,
|
||||
url: prData.url,
|
||||
title: prData.title,
|
||||
state: validatePRState(prData.state),
|
||||
createdAt: prData.createdAt || new Date().toISOString(),
|
||||
};
|
||||
|
||||
await updateWorktreePRInfo(effectiveProjectPath, branchName, prInfo);
|
||||
|
||||
logger.info(`Updated PR tracking to #${prNumber} for branch ${branchName}`);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
result: {
|
||||
branch: branchName,
|
||||
prInfo,
|
||||
},
|
||||
});
|
||||
return;
|
||||
} catch (error) {
|
||||
logger.warn(`Failed to fetch PR #${prNumber} from GitHub:`, error);
|
||||
// Fall through to simple update below
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: update with just the number, preserving existing PR info structure
|
||||
// or creating minimal info if no GitHub data available
|
||||
const prInfo = {
|
||||
number: prNumber,
|
||||
url: `https://github.com/pulls/${prNumber}`,
|
||||
title: `PR #${prNumber}`,
|
||||
state: validatePRState('OPEN'),
|
||||
createdAt: new Date().toISOString(),
|
||||
};
|
||||
|
||||
await updateWorktreePRInfo(effectiveProjectPath, branchName, prInfo);
|
||||
|
||||
logger.info(`Updated PR tracking to #${prNumber} for branch ${branchName} (no GitHub data)`);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
result: {
|
||||
branch: branchName,
|
||||
prInfo,
|
||||
ghCliUnavailable: !ghCliAvailable,
|
||||
},
|
||||
});
|
||||
} catch (error) {
|
||||
logError(error, 'Update PR number failed');
|
||||
res.status(500).json({ success: false, error: getErrorMessage(error) });
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -5,6 +5,7 @@
|
||||
import type {
|
||||
PlanningMode,
|
||||
ThinkingLevel,
|
||||
ReasoningEffort,
|
||||
ParsedTask,
|
||||
ClaudeCompatibleProvider,
|
||||
Credentials,
|
||||
@@ -24,11 +25,14 @@ export interface AgentExecutionOptions {
|
||||
previousContent?: string;
|
||||
systemPrompt?: string;
|
||||
autoLoadClaudeMd?: boolean;
|
||||
useClaudeCodeSystemPrompt?: boolean;
|
||||
thinkingLevel?: ThinkingLevel;
|
||||
reasoningEffort?: ReasoningEffort;
|
||||
branchName?: string | null;
|
||||
credentials?: Credentials;
|
||||
claudeCompatibleProvider?: ClaudeCompatibleProvider;
|
||||
mcpServers?: Record<string, unknown>;
|
||||
sdkSessionId?: string;
|
||||
sdkOptions?: {
|
||||
maxTurns?: number;
|
||||
allowedTools?: string[];
|
||||
|
||||
@@ -38,6 +38,8 @@ export type {
|
||||
|
||||
const logger = createLogger('AgentExecutor');
|
||||
|
||||
const DEFAULT_MAX_TURNS = 10000;
|
||||
|
||||
export class AgentExecutor {
|
||||
private static readonly WRITE_DEBOUNCE_MS = 500;
|
||||
private static readonly STREAM_HEARTBEAT_MS = 15_000;
|
||||
@@ -91,6 +93,7 @@ export class AgentExecutor {
|
||||
credentials,
|
||||
claudeCompatibleProvider,
|
||||
mcpServers,
|
||||
sdkSessionId,
|
||||
sdkOptions,
|
||||
} = options;
|
||||
const { content: promptContent } = await buildPromptWithImages(
|
||||
@@ -99,10 +102,22 @@ export class AgentExecutor {
|
||||
workDir,
|
||||
false
|
||||
);
|
||||
const resolvedMaxTurns = sdkOptions?.maxTurns ?? DEFAULT_MAX_TURNS;
|
||||
if (sdkOptions?.maxTurns == null) {
|
||||
logger.info(
|
||||
`[execute] Feature ${featureId}: sdkOptions.maxTurns is not set, defaulting to ${resolvedMaxTurns}. ` +
|
||||
`Model: ${effectiveBareModel}`
|
||||
);
|
||||
} else {
|
||||
logger.info(
|
||||
`[execute] Feature ${featureId}: maxTurns=${resolvedMaxTurns}, model=${effectiveBareModel}`
|
||||
);
|
||||
}
|
||||
|
||||
const executeOptions: ExecuteOptions = {
|
||||
prompt: promptContent,
|
||||
model: effectiveBareModel,
|
||||
maxTurns: sdkOptions?.maxTurns,
|
||||
maxTurns: resolvedMaxTurns,
|
||||
cwd: workDir,
|
||||
allowedTools: sdkOptions?.allowedTools as string[] | undefined,
|
||||
abortController,
|
||||
@@ -113,8 +128,10 @@ export class AgentExecutor {
|
||||
? (mcpServers as Record<string, { command: string }>)
|
||||
: undefined,
|
||||
thinkingLevel: options.thinkingLevel,
|
||||
reasoningEffort: options.reasoningEffort,
|
||||
credentials,
|
||||
claudeCompatibleProvider,
|
||||
sdkSessionId,
|
||||
};
|
||||
const featureDirForOutput = getFeatureDir(projectPath, featureId);
|
||||
const outputPath = path.join(featureDirForOutput, 'agent-output.md');
|
||||
@@ -203,6 +220,9 @@ export class AgentExecutor {
|
||||
try {
|
||||
const stream = provider.executeQuery(executeOptions);
|
||||
streamLoop: for await (const msg of stream) {
|
||||
if (msg.session_id && msg.session_id !== options.sdkSessionId) {
|
||||
options.sdkSessionId = msg.session_id;
|
||||
}
|
||||
receivedAnyStreamMessage = true;
|
||||
appendRawEvent(msg);
|
||||
if (abortController.signal.aborted) {
|
||||
@@ -276,9 +296,40 @@ export class AgentExecutor {
|
||||
}
|
||||
}
|
||||
} else if (msg.type === 'error') {
|
||||
throw new Error(AgentExecutor.sanitizeProviderError(msg.error));
|
||||
} else if (msg.type === 'result' && msg.subtype === 'success') scheduleWrite();
|
||||
const sanitized = AgentExecutor.sanitizeProviderError(msg.error);
|
||||
logger.error(
|
||||
`[execute] Feature ${featureId} received error from provider. ` +
|
||||
`raw="${msg.error}", sanitized="${sanitized}", session_id=${msg.session_id ?? 'none'}`
|
||||
);
|
||||
throw new Error(sanitized);
|
||||
} else if (msg.type === 'result') {
|
||||
if (msg.subtype === 'success') {
|
||||
scheduleWrite();
|
||||
} else if (msg.subtype?.startsWith('error')) {
|
||||
// Non-success result subtypes from the SDK (error_max_turns, error_during_execution, etc.)
|
||||
logger.error(
|
||||
`[execute] Feature ${featureId} ended with error subtype: ${msg.subtype}. ` +
|
||||
`session_id=${msg.session_id ?? 'none'}`
|
||||
);
|
||||
throw new Error(`Agent execution ended with: ${msg.subtype}`);
|
||||
} else {
|
||||
logger.warn(
|
||||
`[execute] Feature ${featureId} received unhandled result subtype: ${msg.subtype}`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
clearInterval(streamHeartbeat);
|
||||
if (writeTimeout) clearTimeout(writeTimeout);
|
||||
if (rawWriteTimeout) clearTimeout(rawWriteTimeout);
|
||||
|
||||
const streamElapsedMs = Date.now() - streamStartTime;
|
||||
logger.info(
|
||||
`[execute] Stream ended for feature ${featureId} after ${Math.round(streamElapsedMs / 1000)}s. ` +
|
||||
`aborted=${aborted}, specDetected=${specDetected}, responseLength=${responseText.length}`
|
||||
);
|
||||
|
||||
await writeToFile();
|
||||
if (enableRawOutput && rawOutputLines.length > 0) {
|
||||
try {
|
||||
@@ -288,10 +339,6 @@ export class AgentExecutor {
|
||||
/* ignore */
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
clearInterval(streamHeartbeat);
|
||||
if (writeTimeout) clearTimeout(writeTimeout);
|
||||
if (rawWriteTimeout) clearTimeout(rawWriteTimeout);
|
||||
}
|
||||
return { responseText, specDetected, tasksCompleted, aborted };
|
||||
}
|
||||
@@ -351,14 +398,22 @@ export class AgentExecutor {
|
||||
taskPrompts.taskExecution.taskPromptTemplate,
|
||||
userFeedback
|
||||
);
|
||||
const taskMaxTurns = sdkOptions?.maxTurns ?? DEFAULT_MAX_TURNS;
|
||||
logger.info(
|
||||
`[executeTasksLoop] Feature ${featureId}, task ${task.id} (${taskIndex + 1}/${tasks.length}): ` +
|
||||
`maxTurns=${taskMaxTurns} (sdkOptions.maxTurns=${sdkOptions?.maxTurns ?? 'undefined'})`
|
||||
);
|
||||
const taskStream = provider.executeQuery(
|
||||
this.buildExecOpts(options, taskPrompt, Math.min(sdkOptions?.maxTurns ?? 100, 100))
|
||||
this.buildExecOpts(options, taskPrompt, taskMaxTurns)
|
||||
);
|
||||
let taskOutput = '',
|
||||
taskStartDetected = false,
|
||||
taskCompleteDetected = false;
|
||||
|
||||
for await (const msg of taskStream) {
|
||||
if (msg.session_id && msg.session_id !== options.sdkSessionId) {
|
||||
options.sdkSessionId = msg.session_id;
|
||||
}
|
||||
if (msg.type === 'assistant' && msg.message?.content) {
|
||||
for (const b of msg.message.content) {
|
||||
if (b.type === 'text') {
|
||||
@@ -412,16 +467,28 @@ export class AgentExecutor {
|
||||
});
|
||||
}
|
||||
} else if (msg.type === 'error') {
|
||||
// Clean the error: strip ANSI codes and redundant "Error: " prefix
|
||||
const cleanedError =
|
||||
(msg.error || `Error during task ${task.id}`)
|
||||
.replace(/\x1b\[[0-9;]*m/g, '')
|
||||
.replace(/^Error:\s*/i, '')
|
||||
.trim() || `Error during task ${task.id}`;
|
||||
throw new Error(cleanedError);
|
||||
} else if (msg.type === 'result' && msg.subtype === 'success') {
|
||||
taskOutput += msg.result || '';
|
||||
responseText += msg.result || '';
|
||||
const fallback = `Error during task ${task.id}`;
|
||||
const sanitized = AgentExecutor.sanitizeProviderError(msg.error || fallback);
|
||||
logger.error(
|
||||
`[executeTasksLoop] Feature ${featureId} task ${task.id} received error from provider. ` +
|
||||
`raw="${msg.error}", sanitized="${sanitized}", session_id=${msg.session_id ?? 'none'}`
|
||||
);
|
||||
throw new Error(sanitized);
|
||||
} else if (msg.type === 'result') {
|
||||
if (msg.subtype === 'success') {
|
||||
taskOutput += msg.result || '';
|
||||
responseText += msg.result || '';
|
||||
} else if (msg.subtype?.startsWith('error')) {
|
||||
logger.error(
|
||||
`[executeTasksLoop] Feature ${featureId} task ${task.id} ended with error subtype: ${msg.subtype}. ` +
|
||||
`session_id=${msg.session_id ?? 'none'}`
|
||||
);
|
||||
throw new Error(`Agent execution ended with: ${msg.subtype}`);
|
||||
} else {
|
||||
logger.warn(
|
||||
`[executeTasksLoop] Feature ${featureId} task ${task.id} received unhandled result subtype: ${msg.subtype}`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!taskCompleteDetected)
|
||||
@@ -571,8 +638,11 @@ export class AgentExecutor {
|
||||
});
|
||||
let revText = '';
|
||||
for await (const msg of provider.executeQuery(
|
||||
this.buildExecOpts(options, revPrompt, sdkOptions?.maxTurns ?? 100)
|
||||
this.buildExecOpts(options, revPrompt, sdkOptions?.maxTurns ?? DEFAULT_MAX_TURNS)
|
||||
)) {
|
||||
if (msg.session_id && msg.session_id !== options.sdkSessionId) {
|
||||
options.sdkSessionId = msg.session_id;
|
||||
}
|
||||
if (msg.type === 'assistant' && msg.message?.content)
|
||||
for (const b of msg.message.content)
|
||||
if (b.type === 'text') {
|
||||
@@ -657,7 +727,7 @@ export class AgentExecutor {
|
||||
return { responseText, tasksCompleted };
|
||||
}
|
||||
|
||||
private buildExecOpts(o: AgentExecutionOptions, prompt: string, maxTurns?: number) {
|
||||
private buildExecOpts(o: AgentExecutionOptions, prompt: string, maxTurns: number) {
|
||||
return {
|
||||
prompt,
|
||||
model: o.effectiveBareModel,
|
||||
@@ -666,12 +736,14 @@ export class AgentExecutor {
|
||||
allowedTools: o.sdkOptions?.allowedTools as string[] | undefined,
|
||||
abortController: o.abortController,
|
||||
thinkingLevel: o.thinkingLevel,
|
||||
reasoningEffort: o.reasoningEffort,
|
||||
mcpServers:
|
||||
o.mcpServers && Object.keys(o.mcpServers).length > 0
|
||||
? (o.mcpServers as Record<string, { command: string }>)
|
||||
: undefined,
|
||||
credentials: o.credentials,
|
||||
claudeCompatibleProvider: o.claudeCompatibleProvider,
|
||||
sdkSessionId: o.sdkSessionId,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -689,8 +761,11 @@ export class AgentExecutor {
|
||||
.replace(/\{\{approvedPlan\}\}/g, planContent);
|
||||
let responseText = initialResponseText;
|
||||
for await (const msg of provider.executeQuery(
|
||||
this.buildExecOpts(options, contPrompt, options.sdkOptions?.maxTurns)
|
||||
this.buildExecOpts(options, contPrompt, options.sdkOptions?.maxTurns ?? DEFAULT_MAX_TURNS)
|
||||
)) {
|
||||
if (msg.session_id && msg.session_id !== options.sdkSessionId) {
|
||||
options.sdkSessionId = msg.session_id;
|
||||
}
|
||||
if (msg.type === 'assistant' && msg.message?.content)
|
||||
for (const b of msg.message.content) {
|
||||
if (b.type === 'text') {
|
||||
|
||||
@@ -21,6 +21,7 @@ import { createChatOptions, validateWorkingDirectory } from '../lib/sdk-options.
|
||||
import type { SettingsService } from './settings-service.js';
|
||||
import {
|
||||
getAutoLoadClaudeMdSetting,
|
||||
getUseClaudeCodeSystemPromptSetting,
|
||||
filterClaudeMdFromContext,
|
||||
getMCPServersFromSettings,
|
||||
getPromptCustomization,
|
||||
@@ -28,6 +29,7 @@ import {
|
||||
getSubagentsConfiguration,
|
||||
getCustomSubagents,
|
||||
getProviderByModelId,
|
||||
getDefaultMaxTurnsSetting,
|
||||
} from '../lib/settings-helpers.js';
|
||||
|
||||
interface Message {
|
||||
@@ -328,12 +330,6 @@ export class AgentService {
|
||||
timestamp: new Date().toISOString(),
|
||||
};
|
||||
|
||||
// Build conversation history from existing messages BEFORE adding current message
|
||||
const conversationHistory = session.messages.map((msg) => ({
|
||||
role: msg.role,
|
||||
content: msg.content,
|
||||
}));
|
||||
|
||||
session.messages.push(userMessage);
|
||||
session.isRunning = true;
|
||||
session.abortController = new AbortController();
|
||||
@@ -362,6 +358,22 @@ export class AgentService {
|
||||
'[AgentService]'
|
||||
);
|
||||
|
||||
// Load useClaudeCodeSystemPrompt setting (project setting takes precedence over global)
|
||||
// Wrap in try/catch so transient settingsService errors don't abort message processing
|
||||
let useClaudeCodeSystemPrompt = true;
|
||||
try {
|
||||
useClaudeCodeSystemPrompt = await getUseClaudeCodeSystemPromptSetting(
|
||||
effectiveWorkDir,
|
||||
this.settingsService,
|
||||
'[AgentService]'
|
||||
);
|
||||
} catch (err) {
|
||||
this.logger.error(
|
||||
'[AgentService] getUseClaudeCodeSystemPromptSetting failed, defaulting to true',
|
||||
err
|
||||
);
|
||||
}
|
||||
|
||||
// Load MCP servers from settings (global setting only)
|
||||
const mcpServers = await getMCPServersFromSettings(this.settingsService, '[AgentService]');
|
||||
|
||||
@@ -405,6 +417,7 @@ export class AgentService {
|
||||
}
|
||||
}
|
||||
|
||||
let combinedSystemPrompt: string | undefined;
|
||||
// Load project context files (CLAUDE.md, CODE_QUALITY.md, etc.) and memory files
|
||||
// Use the user's message as task context for smart memory selection
|
||||
const contextResult = await loadContextFiles({
|
||||
@@ -422,7 +435,7 @@ export class AgentService {
|
||||
|
||||
// Build combined system prompt with base prompt and context files
|
||||
const baseSystemPrompt = await this.getSystemPrompt();
|
||||
const combinedSystemPrompt = contextFilesPrompt
|
||||
combinedSystemPrompt = contextFilesPrompt
|
||||
? `${contextFilesPrompt}\n\n${baseSystemPrompt}`
|
||||
: baseSystemPrompt;
|
||||
|
||||
@@ -437,6 +450,9 @@ export class AgentService {
|
||||
const modelForSdk = providerResolvedModel || model;
|
||||
const sessionModelForSdk = providerResolvedModel ? undefined : session.model;
|
||||
|
||||
// Read user-configured max turns from settings
|
||||
const userMaxTurns = await getDefaultMaxTurnsSetting(this.settingsService, '[AgentService]');
|
||||
|
||||
const sdkOptions = createChatOptions({
|
||||
cwd: effectiveWorkDir,
|
||||
model: modelForSdk,
|
||||
@@ -444,7 +460,9 @@ export class AgentService {
|
||||
systemPrompt: combinedSystemPrompt,
|
||||
abortController: session.abortController!,
|
||||
autoLoadClaudeMd,
|
||||
useClaudeCodeSystemPrompt,
|
||||
thinkingLevel: effectiveThinkingLevel, // Pass thinking level for Claude models
|
||||
maxTurns: userMaxTurns, // User-configured max turns from settings
|
||||
mcpServers: Object.keys(mcpServers).length > 0 ? mcpServers : undefined,
|
||||
});
|
||||
|
||||
@@ -469,7 +487,19 @@ export class AgentService {
|
||||
Object.keys(customSubagents).length > 0;
|
||||
|
||||
// Base tools that match the provider's default set
|
||||
const baseTools = ['Read', 'Write', 'Edit', 'Glob', 'Grep', 'Bash', 'WebSearch', 'WebFetch'];
|
||||
const baseTools = [
|
||||
'Read',
|
||||
'Write',
|
||||
'Edit',
|
||||
'MultiEdit',
|
||||
'Glob',
|
||||
'Grep',
|
||||
'LS',
|
||||
'Bash',
|
||||
'WebSearch',
|
||||
'WebFetch',
|
||||
'TodoWrite',
|
||||
];
|
||||
|
||||
if (allowedTools) {
|
||||
allowedTools = [...allowedTools]; // Create a copy to avoid mutating SDK options
|
||||
@@ -508,6 +538,14 @@ export class AgentService {
|
||||
: stripProviderPrefix(effectiveModel);
|
||||
|
||||
// Build options for provider
|
||||
const conversationHistory = session.messages
|
||||
.slice(0, -1)
|
||||
.map((msg) => ({
|
||||
role: msg.role,
|
||||
content: msg.content,
|
||||
}))
|
||||
.filter((msg) => msg.content.trim().length > 0);
|
||||
|
||||
const options: ExecuteOptions = {
|
||||
prompt: '', // Will be set below based on images
|
||||
model: bareModel, // Bare model ID (e.g., "gpt-5.1-codex-max", "composer-1")
|
||||
@@ -517,7 +555,8 @@ export class AgentService {
|
||||
maxTurns: maxTurns,
|
||||
allowedTools: allowedTools,
|
||||
abortController: session.abortController!,
|
||||
conversationHistory: conversationHistory.length > 0 ? conversationHistory : undefined,
|
||||
conversationHistory:
|
||||
conversationHistory && conversationHistory.length > 0 ? conversationHistory : undefined,
|
||||
settingSources: settingSources.length > 0 ? settingSources : undefined,
|
||||
sdkSessionId: session.sdkSessionId, // Pass SDK session ID for resuming
|
||||
mcpServers: Object.keys(mcpServers).length > 0 ? mcpServers : undefined, // Pass MCP servers configuration
|
||||
@@ -545,6 +584,7 @@ export class AgentService {
|
||||
let currentAssistantMessage: Message | null = null;
|
||||
let responseText = '';
|
||||
const toolUses: Array<{ name: string; input: unknown }> = [];
|
||||
const toolNamesById = new Map<string, string>();
|
||||
|
||||
for await (const msg of stream) {
|
||||
// Capture SDK session ID from any message and persist it.
|
||||
@@ -589,11 +629,50 @@ export class AgentService {
|
||||
input: block.input,
|
||||
};
|
||||
toolUses.push(toolUse);
|
||||
if (block.tool_use_id) {
|
||||
toolNamesById.set(block.tool_use_id, toolUse.name);
|
||||
}
|
||||
|
||||
this.emitAgentEvent(sessionId, {
|
||||
type: 'tool_use',
|
||||
tool: toolUse,
|
||||
});
|
||||
} else if (block.type === 'tool_result') {
|
||||
const toolUseId = block.tool_use_id;
|
||||
const toolName = toolUseId ? toolNamesById.get(toolUseId) : undefined;
|
||||
|
||||
// Normalize block.content to a string for the emitted event
|
||||
const rawContent: unknown = block.content;
|
||||
let contentString: string;
|
||||
if (typeof rawContent === 'string') {
|
||||
contentString = rawContent;
|
||||
} else if (Array.isArray(rawContent)) {
|
||||
// Extract text from content blocks (TextBlock, ImageBlock, etc.)
|
||||
contentString = rawContent
|
||||
.map((part: { text?: string; type?: string }) => {
|
||||
if (typeof part === 'string') return part;
|
||||
if (part.text) return part.text;
|
||||
// For non-text blocks (e.g., images), represent as type indicator
|
||||
if (part.type) return `[${part.type}]`;
|
||||
return JSON.stringify(part);
|
||||
})
|
||||
.join('\n');
|
||||
} else if (rawContent !== undefined && rawContent !== null) {
|
||||
contentString = JSON.stringify(rawContent);
|
||||
} else {
|
||||
contentString = '';
|
||||
}
|
||||
|
||||
this.emitAgentEvent(sessionId, {
|
||||
type: 'tool_result',
|
||||
tool: {
|
||||
name: toolName || 'unknown',
|
||||
input: {
|
||||
toolUseId,
|
||||
content: contentString,
|
||||
},
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -163,6 +163,10 @@ export class AutoLoopCoordinator {
|
||||
const { projectPath, branchName } = projectState.config;
|
||||
while (projectState.isRunning && !projectState.abortController.signal.aborted) {
|
||||
try {
|
||||
// Count ALL running features (both auto and manual) against the concurrency limit.
|
||||
// This ensures auto mode is aware of the total system load and does not over-subscribe
|
||||
// resources. Manual tasks always bypass the limit and run immediately, but their
|
||||
// presence is accounted for when deciding whether to dispatch new auto-mode tasks.
|
||||
const runningCount = await this.getRunningCountForWorktree(projectPath, branchName);
|
||||
if (runningCount >= projectState.config.maxConcurrency) {
|
||||
await this.sleep(5000, projectState.abortController.signal);
|
||||
@@ -298,11 +302,17 @@ export class AutoLoopCoordinator {
|
||||
return Array.from(activeProjects);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of running features for a worktree.
|
||||
* By default counts ALL running features (both auto-mode and manual).
|
||||
* Pass `autoModeOnly: true` to count only auto-mode features.
|
||||
*/
|
||||
async getRunningCountForWorktree(
|
||||
projectPath: string,
|
||||
branchName: string | null
|
||||
branchName: string | null,
|
||||
options?: { autoModeOnly?: boolean }
|
||||
): Promise<number> {
|
||||
return this.concurrencyManager.getRunningCountForWorktree(projectPath, branchName);
|
||||
return this.concurrencyManager.getRunningCountForWorktree(projectPath, branchName, options);
|
||||
}
|
||||
|
||||
trackFailureAndCheckPauseForProject(
|
||||
|
||||
@@ -14,14 +14,19 @@
|
||||
import path from 'path';
|
||||
import { exec } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import type { Feature, PlanningMode, ThinkingLevel } from '@automaker/types';
|
||||
import type { Feature, PlanningMode, ThinkingLevel, ReasoningEffort } from '@automaker/types';
|
||||
import { DEFAULT_MAX_CONCURRENCY, DEFAULT_MODELS, stripProviderPrefix } from '@automaker/types';
|
||||
import { resolveModelString } from '@automaker/model-resolver';
|
||||
import { createLogger, loadContextFiles, classifyError } from '@automaker/utils';
|
||||
import { getFeatureDir } from '@automaker/platform';
|
||||
import * as secureFs from '../../lib/secure-fs.js';
|
||||
import { validateWorkingDirectory } from '../../lib/sdk-options.js';
|
||||
import { getPromptCustomization, getProviderByModelId } from '../../lib/settings-helpers.js';
|
||||
import { validateWorkingDirectory, createAutoModeOptions } from '../../lib/sdk-options.js';
|
||||
import {
|
||||
getPromptCustomization,
|
||||
getProviderByModelId,
|
||||
getMCPServersFromSettings,
|
||||
getDefaultMaxTurnsSetting,
|
||||
} from '../../lib/settings-helpers.js';
|
||||
import { execGitCommand } from '@automaker/git-utils';
|
||||
import { TypedEventBus } from '../typed-event-bus.js';
|
||||
import { ConcurrencyManager } from '../concurrency-manager.js';
|
||||
@@ -208,7 +213,9 @@ export class AutoModeServiceFacade {
|
||||
previousContent?: string;
|
||||
systemPrompt?: string;
|
||||
autoLoadClaudeMd?: boolean;
|
||||
useClaudeCodeSystemPrompt?: boolean;
|
||||
thinkingLevel?: ThinkingLevel;
|
||||
reasoningEffort?: ReasoningEffort;
|
||||
branchName?: string | null;
|
||||
[key: string]: unknown;
|
||||
}
|
||||
@@ -234,6 +241,47 @@ export class AutoModeServiceFacade {
|
||||
}
|
||||
}
|
||||
|
||||
// Build sdkOptions with proper maxTurns and allowedTools for auto-mode.
|
||||
// Without this, maxTurns would be undefined, causing providers to use their
|
||||
// internal defaults which may be much lower than intended (e.g., Codex CLI's
|
||||
// default turn limit can cause feature runs to stop prematurely).
|
||||
const autoLoadClaudeMd = opts?.autoLoadClaudeMd ?? false;
|
||||
const useClaudeCodeSystemPrompt = opts?.useClaudeCodeSystemPrompt ?? true;
|
||||
let mcpServers: Record<string, unknown> | undefined;
|
||||
try {
|
||||
if (settingsService) {
|
||||
const servers = await getMCPServersFromSettings(settingsService, '[AutoModeFacade]');
|
||||
if (Object.keys(servers).length > 0) {
|
||||
mcpServers = servers;
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// MCP servers are optional - continue without them
|
||||
}
|
||||
|
||||
// Read user-configured max turns from settings
|
||||
const userMaxTurns = await getDefaultMaxTurnsSetting(settingsService, '[AutoModeFacade]');
|
||||
|
||||
const sdkOpts = createAutoModeOptions({
|
||||
cwd: workDir,
|
||||
model: resolvedModel,
|
||||
systemPrompt: opts?.systemPrompt,
|
||||
abortController,
|
||||
autoLoadClaudeMd,
|
||||
useClaudeCodeSystemPrompt,
|
||||
thinkingLevel: opts?.thinkingLevel,
|
||||
maxTurns: userMaxTurns,
|
||||
mcpServers: mcpServers as
|
||||
| Record<string, import('@automaker/types').McpServerConfig>
|
||||
| undefined,
|
||||
});
|
||||
|
||||
logger.info(
|
||||
`[createRunAgentFn] Feature ${featureId}: model=${resolvedModel}, ` +
|
||||
`maxTurns=${sdkOpts.maxTurns}, allowedTools=${(sdkOpts.allowedTools as string[])?.length ?? 'default'}, ` +
|
||||
`provider=${provider.getName()}`
|
||||
);
|
||||
|
||||
await agentExecutor.execute(
|
||||
{
|
||||
workDir,
|
||||
@@ -248,12 +296,23 @@ export class AutoModeServiceFacade {
|
||||
previousContent: opts?.previousContent as string | undefined,
|
||||
systemPrompt: opts?.systemPrompt as string | undefined,
|
||||
autoLoadClaudeMd: opts?.autoLoadClaudeMd as boolean | undefined,
|
||||
useClaudeCodeSystemPrompt,
|
||||
thinkingLevel: opts?.thinkingLevel as ThinkingLevel | undefined,
|
||||
reasoningEffort: opts?.reasoningEffort as ReasoningEffort | undefined,
|
||||
branchName: opts?.branchName as string | null | undefined,
|
||||
provider,
|
||||
effectiveBareModel,
|
||||
credentials,
|
||||
claudeCompatibleProvider,
|
||||
mcpServers,
|
||||
sdkOptions: {
|
||||
maxTurns: sdkOpts.maxTurns,
|
||||
allowedTools: sdkOpts.allowedTools as string[] | undefined,
|
||||
systemPrompt: sdkOpts.systemPrompt,
|
||||
settingSources: sdkOpts.settingSources as
|
||||
| Array<'user' | 'project' | 'local'>
|
||||
| undefined,
|
||||
},
|
||||
},
|
||||
{
|
||||
waitForApproval: (fId, projPath) => planApprovalService.waitForApproval(fId, projPath),
|
||||
@@ -334,6 +393,23 @@ export class AutoModeServiceFacade {
|
||||
async (pPath) => featureLoader.getAll(pPath)
|
||||
);
|
||||
|
||||
/**
|
||||
* Iterate all active worktrees for this project, falling back to the
|
||||
* main worktree (null) when none are active.
|
||||
*/
|
||||
const forEachProjectWorktree = (fn: (branchName: string | null) => void): void => {
|
||||
const projectWorktrees = autoLoopCoordinator
|
||||
.getActiveWorktrees()
|
||||
.filter((w) => w.projectPath === projectPath);
|
||||
if (projectWorktrees.length === 0) {
|
||||
fn(null);
|
||||
} else {
|
||||
for (const w of projectWorktrees) {
|
||||
fn(w.branchName);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// ExecutionService - runAgentFn delegates to AgentExecutor via shared helper
|
||||
const executionService = new ExecutionService(
|
||||
eventBus,
|
||||
@@ -357,11 +433,36 @@ export class AutoModeServiceFacade {
|
||||
(pPath, featureId) => getFacade().contextExists(featureId),
|
||||
(pPath, featureId, useWorktrees, _calledInternally) =>
|
||||
getFacade().resumeFeature(featureId, useWorktrees, _calledInternally),
|
||||
(errorInfo) =>
|
||||
autoLoopCoordinator.trackFailureAndCheckPauseForProject(projectPath, null, errorInfo),
|
||||
(errorInfo) => autoLoopCoordinator.signalShouldPauseForProject(projectPath, null, errorInfo),
|
||||
(errorInfo) => {
|
||||
// Track failure against ALL active worktrees for this project.
|
||||
// The ExecutionService callbacks don't receive branchName, so we
|
||||
// iterate all active worktrees. Uses a for-of loop (not .some()) to
|
||||
// ensure every worktree's failure counter is incremented.
|
||||
let shouldPause = false;
|
||||
forEachProjectWorktree((branchName) => {
|
||||
if (
|
||||
autoLoopCoordinator.trackFailureAndCheckPauseForProject(
|
||||
projectPath,
|
||||
branchName,
|
||||
errorInfo
|
||||
)
|
||||
) {
|
||||
shouldPause = true;
|
||||
}
|
||||
});
|
||||
return shouldPause;
|
||||
},
|
||||
(errorInfo) => {
|
||||
forEachProjectWorktree((branchName) =>
|
||||
autoLoopCoordinator.signalShouldPauseForProject(projectPath, branchName, errorInfo)
|
||||
);
|
||||
},
|
||||
() => {
|
||||
/* recordSuccess - no-op */
|
||||
// Record success to clear failure tracking. This prevents failures
|
||||
// from accumulating over time and incorrectly pausing auto mode.
|
||||
forEachProjectWorktree((branchName) =>
|
||||
autoLoopCoordinator.recordSuccessForProject(projectPath, branchName)
|
||||
);
|
||||
},
|
||||
(_pPath) => getFacade().saveExecutionState(),
|
||||
loadContextFiles
|
||||
@@ -660,16 +761,20 @@ export class AutoModeServiceFacade {
|
||||
}
|
||||
}
|
||||
|
||||
this.eventBus.emitAutoModeEvent('auto_mode_feature_complete', {
|
||||
featureId,
|
||||
featureName: feature?.title,
|
||||
branchName: feature?.branchName ?? null,
|
||||
passes: allPassed,
|
||||
message: allPassed
|
||||
? 'All verification checks passed'
|
||||
: `Verification failed: ${results.find((r) => !r.passed)?.check || 'Unknown'}`,
|
||||
projectPath: this.projectPath,
|
||||
});
|
||||
const runningEntryForVerify = this.concurrencyManager.getRunningFeature(featureId);
|
||||
if (runningEntryForVerify?.isAutoMode) {
|
||||
this.eventBus.emitAutoModeEvent('auto_mode_feature_complete', {
|
||||
featureId,
|
||||
featureName: feature?.title,
|
||||
branchName: feature?.branchName ?? null,
|
||||
executionMode: 'auto',
|
||||
passes: allPassed,
|
||||
message: allPassed
|
||||
? 'All verification checks passed'
|
||||
: `Verification failed: ${results.find((r) => !r.passed)?.check || 'Unknown'}`,
|
||||
projectPath: this.projectPath,
|
||||
});
|
||||
}
|
||||
|
||||
return allPassed;
|
||||
}
|
||||
@@ -719,14 +824,18 @@ export class AutoModeServiceFacade {
|
||||
await execGitCommand(['commit', '-m', commitMessage], workDir);
|
||||
const hash = await execGitCommand(['rev-parse', 'HEAD'], workDir);
|
||||
|
||||
this.eventBus.emitAutoModeEvent('auto_mode_feature_complete', {
|
||||
featureId,
|
||||
featureName: feature?.title,
|
||||
branchName: feature?.branchName ?? null,
|
||||
passes: true,
|
||||
message: `Changes committed: ${hash.trim().substring(0, 8)}`,
|
||||
projectPath: this.projectPath,
|
||||
});
|
||||
const runningEntryForCommit = this.concurrencyManager.getRunningFeature(featureId);
|
||||
if (runningEntryForCommit?.isAutoMode) {
|
||||
this.eventBus.emitAutoModeEvent('auto_mode_feature_complete', {
|
||||
featureId,
|
||||
featureName: feature?.title,
|
||||
branchName: feature?.branchName ?? null,
|
||||
executionMode: 'auto',
|
||||
passes: true,
|
||||
message: `Changes committed: ${hash.trim().substring(0, 8)}`,
|
||||
projectPath: this.projectPath,
|
||||
});
|
||||
}
|
||||
|
||||
return hash.trim();
|
||||
} catch (error) {
|
||||
@@ -809,7 +918,7 @@ export class AutoModeServiceFacade {
|
||||
if (feature) {
|
||||
title = feature.title;
|
||||
description = feature.description;
|
||||
branchName = feature.branchName;
|
||||
branchName = feature.branchName ?? undefined;
|
||||
}
|
||||
} catch {
|
||||
// Silently ignore
|
||||
@@ -1039,10 +1148,31 @@ export class AutoModeServiceFacade {
|
||||
// ===========================================================================
|
||||
|
||||
/**
|
||||
* Save execution state for recovery
|
||||
* Save execution state for recovery.
|
||||
*
|
||||
* Uses the active auto-loop config for each worktree so that the persisted
|
||||
* state reflects the real branch and maxConcurrency values rather than the
|
||||
* hard-coded fallbacks (null / DEFAULT_MAX_CONCURRENCY).
|
||||
*/
|
||||
private async saveExecutionState(): Promise<void> {
|
||||
return this.saveExecutionStateForProject(null, DEFAULT_MAX_CONCURRENCY);
|
||||
const projectWorktrees = this.autoLoopCoordinator
|
||||
.getActiveWorktrees()
|
||||
.filter((w) => w.projectPath === this.projectPath);
|
||||
|
||||
if (projectWorktrees.length === 0) {
|
||||
// No active auto loops — save with defaults as a best-effort fallback.
|
||||
return this.saveExecutionStateForProject(null, DEFAULT_MAX_CONCURRENCY);
|
||||
}
|
||||
|
||||
// Save state for every active worktree using its real config values.
|
||||
for (const { branchName } of projectWorktrees) {
|
||||
const config = this.autoLoopCoordinator.getAutoLoopConfigForProject(
|
||||
this.projectPath,
|
||||
branchName
|
||||
);
|
||||
const maxConcurrency = config?.maxConcurrency ?? DEFAULT_MAX_CONCURRENCY;
|
||||
await this.saveExecutionStateForProject(branchName, maxConcurrency);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -159,7 +159,7 @@ export class GlobalAutoModeService {
|
||||
if (feature) {
|
||||
title = feature.title;
|
||||
description = feature.description;
|
||||
branchName = feature.branchName;
|
||||
branchName = feature.branchName ?? undefined;
|
||||
}
|
||||
} catch {
|
||||
// Silently ignore
|
||||
|
||||
426
apps/server/src/services/branch-sync-service.ts
Normal file
426
apps/server/src/services/branch-sync-service.ts
Normal file
@@ -0,0 +1,426 @@
|
||||
/**
|
||||
* branch-sync-service - Sync a local base branch with its remote tracking branch
|
||||
*
|
||||
* Provides logic to detect remote tracking branches, check whether a branch
|
||||
* is checked out in any worktree, and fast-forward a local branch to match
|
||||
* its remote counterpart. Extracted from the worktree create route so
|
||||
* the git logic is decoupled from HTTP request/response handling.
|
||||
*/
|
||||
|
||||
import { createLogger, getErrorMessage } from '@automaker/utils';
|
||||
import { execGitCommand } from '../lib/git.js';
|
||||
|
||||
const logger = createLogger('BranchSyncService');
|
||||
|
||||
/** Timeout for git fetch operations (30 seconds) */
|
||||
const FETCH_TIMEOUT_MS = 30_000;
|
||||
|
||||
// ============================================================================
|
||||
// Types
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Result of attempting to sync a base branch with its remote.
|
||||
*/
|
||||
export interface BaseBranchSyncResult {
|
||||
/** Whether the sync was attempted */
|
||||
attempted: boolean;
|
||||
/** Whether the sync succeeded */
|
||||
synced: boolean;
|
||||
/** Whether the ref was resolved (but not synced, e.g. remote ref, tag, or commit hash) */
|
||||
resolved?: boolean;
|
||||
/** The remote that was synced from (e.g. 'origin') */
|
||||
remote?: string;
|
||||
/** The commit hash the base branch points to after sync */
|
||||
commitHash?: string;
|
||||
/** Human-readable message about the sync result */
|
||||
message?: string;
|
||||
/** Whether the branch had diverged (local commits ahead of remote) */
|
||||
diverged?: boolean;
|
||||
/** Whether the user can proceed with a stale local copy */
|
||||
canProceedWithStale?: boolean;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Helpers
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Detect the remote tracking branch for a given local branch.
|
||||
*
|
||||
* @param projectPath - Path to the git repository
|
||||
* @param branchName - Local branch name to check (e.g. 'main')
|
||||
* @returns Object with remote name and remote branch, or null if no tracking branch
|
||||
*/
|
||||
export async function getTrackingBranch(
|
||||
projectPath: string,
|
||||
branchName: string
|
||||
): Promise<{ remote: string; remoteBranch: string } | null> {
|
||||
try {
|
||||
// git rev-parse --abbrev-ref <branch>@{upstream} returns e.g. "origin/main"
|
||||
const upstream = await execGitCommand(
|
||||
['rev-parse', '--abbrev-ref', `${branchName}@{upstream}`],
|
||||
projectPath
|
||||
);
|
||||
const trimmed = upstream.trim();
|
||||
if (!trimmed) return null;
|
||||
|
||||
// First, attempt to determine the remote name explicitly via git config
|
||||
// so that remotes whose names contain slashes are handled correctly.
|
||||
let remote: string | null = null;
|
||||
try {
|
||||
const configRemote = await execGitCommand(
|
||||
['config', '--get', `branch.${branchName}.remote`],
|
||||
projectPath
|
||||
);
|
||||
const configRemoteTrimmed = configRemote.trim();
|
||||
if (configRemoteTrimmed) {
|
||||
remote = configRemoteTrimmed;
|
||||
}
|
||||
} catch {
|
||||
// git config lookup failed — will fall back to string splitting below
|
||||
}
|
||||
|
||||
if (remote) {
|
||||
// Strip the known remote prefix (plus the separating '/') to get the remote branch.
|
||||
// The upstream string is expected to be "<remote>/<remoteBranch>".
|
||||
const prefix = `${remote}/`;
|
||||
if (trimmed.startsWith(prefix)) {
|
||||
return {
|
||||
remote,
|
||||
remoteBranch: trimmed.substring(prefix.length),
|
||||
};
|
||||
}
|
||||
// Upstream doesn't start with the expected prefix — fall through to split
|
||||
}
|
||||
|
||||
// Fall back: split on the FIRST slash, which favors the common case of
|
||||
// single-name remotes with slash-containing branch names (e.g.
|
||||
// "origin/feature/foo" → remote="origin", remoteBranch="feature/foo").
|
||||
// Remotes with slashes in their names are uncommon and are already handled
|
||||
// by the git-config lookup above; this fallback only runs when that lookup
|
||||
// fails, so optimizing for single-name remotes is the safer default.
|
||||
const slashIndex = trimmed.indexOf('/');
|
||||
if (slashIndex > 0) {
|
||||
return {
|
||||
remote: trimmed.substring(0, slashIndex),
|
||||
remoteBranch: trimmed.substring(slashIndex + 1),
|
||||
};
|
||||
}
|
||||
return null;
|
||||
} catch {
|
||||
// No upstream tracking branch configured
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether a branch is checked out in ANY worktree (main or linked).
|
||||
* Uses `git worktree list --porcelain` to enumerate all worktrees and
|
||||
* checks if any of them has the given branch as their HEAD.
|
||||
*
|
||||
* Returns the absolute path of the worktree where the branch is checked out,
|
||||
* or null if the branch is not checked out anywhere. Callers can use the
|
||||
* returned path to run commands (e.g. `git merge`) inside the correct worktree.
|
||||
*
|
||||
* This prevents using `git update-ref` on a branch that is checked out in
|
||||
* a linked worktree, which would desync that worktree's HEAD.
|
||||
*/
|
||||
export async function isBranchCheckedOut(
|
||||
projectPath: string,
|
||||
branchName: string
|
||||
): Promise<string | null> {
|
||||
try {
|
||||
const stdout = await execGitCommand(['worktree', 'list', '--porcelain'], projectPath);
|
||||
const lines = stdout.split('\n');
|
||||
let currentWorktreePath: string | null = null;
|
||||
let currentBranch: string | null = null;
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.startsWith('worktree ')) {
|
||||
currentWorktreePath = line.slice(9);
|
||||
} else if (line.startsWith('branch ')) {
|
||||
currentBranch = line.slice(7).replace('refs/heads/', '');
|
||||
} else if (line === '') {
|
||||
// End of a worktree entry — check for match, then reset for the next
|
||||
if (currentBranch === branchName && currentWorktreePath) {
|
||||
return currentWorktreePath;
|
||||
}
|
||||
currentWorktreePath = null;
|
||||
currentBranch = null;
|
||||
}
|
||||
}
|
||||
|
||||
// Check the last entry (if output doesn't end with a blank line)
|
||||
if (currentBranch === branchName && currentWorktreePath) {
|
||||
return currentWorktreePath;
|
||||
}
|
||||
|
||||
return null;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a BaseBranchSyncResult for cases where we proceed with a stale local copy.
|
||||
* Extracts the repeated pattern of getting the short commit hash with a fallback.
|
||||
*/
|
||||
export async function buildStaleResult(
|
||||
projectPath: string,
|
||||
branchName: string,
|
||||
remote: string | undefined,
|
||||
message: string,
|
||||
extra?: Partial<BaseBranchSyncResult>
|
||||
): Promise<BaseBranchSyncResult> {
|
||||
let commitHash: string | undefined;
|
||||
try {
|
||||
const hash = await execGitCommand(['rev-parse', '--short', branchName], projectPath);
|
||||
commitHash = hash.trim();
|
||||
} catch {
|
||||
/* ignore — commit hash is non-critical */
|
||||
}
|
||||
return {
|
||||
attempted: true,
|
||||
synced: false,
|
||||
remote,
|
||||
commitHash,
|
||||
message,
|
||||
canProceedWithStale: true,
|
||||
...extra,
|
||||
};
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Main Sync Function
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Sync a local base branch with its remote tracking branch using fast-forward only.
|
||||
*
|
||||
* This function:
|
||||
* 1. Detects the remote tracking branch for the given local branch
|
||||
* 2. Fetches latest from that remote (unless skipFetch is true)
|
||||
* 3. Attempts a fast-forward-only update of the local branch
|
||||
* 4. If the branch has diverged, reports the divergence and allows proceeding with stale copy
|
||||
* 5. If no remote tracking branch exists, skips silently
|
||||
*
|
||||
* @param projectPath - Path to the git repository
|
||||
* @param branchName - The local branch name to sync (e.g. 'main')
|
||||
* @param skipFetch - When true, skip the internal git fetch (caller has already fetched)
|
||||
* @returns Sync result with status information
|
||||
*/
|
||||
export async function syncBaseBranch(
|
||||
projectPath: string,
|
||||
branchName: string,
|
||||
skipFetch = false
|
||||
): Promise<BaseBranchSyncResult> {
|
||||
// Check if the branch exists as a local branch (under refs/heads/).
|
||||
// This correctly handles branch names containing slashes (e.g. "feature/abc",
|
||||
// "fix/issue-123") which are valid local branch names, not remote refs.
|
||||
let existsLocally = false;
|
||||
try {
|
||||
await execGitCommand(['rev-parse', '--verify', `refs/heads/${branchName}`], projectPath);
|
||||
existsLocally = true;
|
||||
} catch {
|
||||
existsLocally = false;
|
||||
}
|
||||
|
||||
if (!existsLocally) {
|
||||
// Not a local branch — check if it's a valid ref (remote ref, tag, or commit hash).
|
||||
// No synchronization is performed here; we only resolve the ref to a commit hash.
|
||||
try {
|
||||
const commitHash = await execGitCommand(['rev-parse', '--short', branchName], projectPath);
|
||||
return {
|
||||
attempted: false,
|
||||
synced: false,
|
||||
resolved: true,
|
||||
commitHash: commitHash.trim(),
|
||||
message: `Ref '${branchName}' resolved (not a local branch; no sync performed)`,
|
||||
};
|
||||
} catch {
|
||||
return {
|
||||
attempted: false,
|
||||
synced: false,
|
||||
message: `Ref '${branchName}' not found`,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Detect remote tracking branch
|
||||
const tracking = await getTrackingBranch(projectPath, branchName);
|
||||
if (!tracking) {
|
||||
// No remote tracking branch — skip silently
|
||||
logger.info(`Branch '${branchName}' has no remote tracking branch, skipping sync`);
|
||||
try {
|
||||
const commitHash = await execGitCommand(['rev-parse', '--short', branchName], projectPath);
|
||||
return {
|
||||
attempted: false,
|
||||
synced: false,
|
||||
commitHash: commitHash.trim(),
|
||||
message: `Branch '${branchName}' has no remote tracking branch`,
|
||||
};
|
||||
} catch {
|
||||
return {
|
||||
attempted: false,
|
||||
synced: false,
|
||||
message: `Branch '${branchName}' has no remote tracking branch`,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
logger.info(
|
||||
`Syncing base branch '${branchName}' from ${tracking.remote}/${tracking.remoteBranch}`
|
||||
);
|
||||
|
||||
// Fetch the specific remote unless the caller has already performed a fetch
|
||||
// (e.g. via `git fetch --all`) and passed skipFetch=true to avoid redundant work.
|
||||
if (!skipFetch) {
|
||||
try {
|
||||
const fetchController = new AbortController();
|
||||
const fetchTimer = setTimeout(() => fetchController.abort(), FETCH_TIMEOUT_MS);
|
||||
try {
|
||||
await execGitCommand(
|
||||
['fetch', tracking.remote, tracking.remoteBranch, '--quiet'],
|
||||
projectPath,
|
||||
undefined,
|
||||
fetchController
|
||||
);
|
||||
} finally {
|
||||
clearTimeout(fetchTimer);
|
||||
}
|
||||
} catch (fetchErr) {
|
||||
// Fetch failed — network error, auth error, etc.
|
||||
// Allow proceeding with stale local copy
|
||||
const errMsg = getErrorMessage(fetchErr);
|
||||
logger.warn(`Failed to fetch ${tracking.remote}/${tracking.remoteBranch}: ${errMsg}`);
|
||||
return buildStaleResult(
|
||||
projectPath,
|
||||
branchName,
|
||||
tracking.remote,
|
||||
`Failed to fetch from remote: ${errMsg}. Proceeding with local copy.`
|
||||
);
|
||||
}
|
||||
} else {
|
||||
logger.info(`Skipping fetch for '${branchName}' (caller already fetched from remotes)`);
|
||||
}
|
||||
|
||||
// Check if the local branch is behind, ahead, or diverged from the remote
|
||||
const remoteRef = `${tracking.remote}/${tracking.remoteBranch}`;
|
||||
try {
|
||||
// Count commits ahead and behind
|
||||
const revListOutput = await execGitCommand(
|
||||
['rev-list', '--left-right', '--count', `${branchName}...${remoteRef}`],
|
||||
projectPath
|
||||
);
|
||||
const parts = revListOutput.trim().split(/\s+/);
|
||||
const ahead = parseInt(parts[0], 10) || 0;
|
||||
const behind = parseInt(parts[1], 10) || 0;
|
||||
|
||||
if (ahead === 0 && behind === 0) {
|
||||
// Already up to date
|
||||
const commitHash = await execGitCommand(['rev-parse', '--short', branchName], projectPath);
|
||||
logger.info(`Branch '${branchName}' is already up to date with ${remoteRef}`);
|
||||
return {
|
||||
attempted: true,
|
||||
synced: true,
|
||||
remote: tracking.remote,
|
||||
commitHash: commitHash.trim(),
|
||||
message: `Branch '${branchName}' is already up to date`,
|
||||
};
|
||||
}
|
||||
|
||||
if (ahead > 0 && behind > 0) {
|
||||
// Branch has diverged — cannot fast-forward
|
||||
logger.warn(
|
||||
`Branch '${branchName}' has diverged from ${remoteRef} (${ahead} ahead, ${behind} behind)`
|
||||
);
|
||||
return buildStaleResult(
|
||||
projectPath,
|
||||
branchName,
|
||||
tracking.remote,
|
||||
`Branch '${branchName}' has diverged from ${remoteRef} (${ahead} commit(s) ahead, ${behind} behind). Using local copy to avoid overwriting local commits.`,
|
||||
{ diverged: true }
|
||||
);
|
||||
}
|
||||
|
||||
if (ahead > 0 && behind === 0) {
|
||||
// Local is ahead — nothing to pull, already has everything from remote plus more
|
||||
const commitHash = await execGitCommand(['rev-parse', '--short', branchName], projectPath);
|
||||
logger.info(`Branch '${branchName}' is ${ahead} commit(s) ahead of ${remoteRef}`);
|
||||
return {
|
||||
attempted: true,
|
||||
synced: true,
|
||||
remote: tracking.remote,
|
||||
commitHash: commitHash.trim(),
|
||||
message: `Branch '${branchName}' is ${ahead} commit(s) ahead of remote`,
|
||||
};
|
||||
}
|
||||
|
||||
// behind > 0 && ahead === 0 — can fast-forward
|
||||
logger.info(
|
||||
`Branch '${branchName}' is ${behind} commit(s) behind ${remoteRef}, fast-forwarding`
|
||||
);
|
||||
|
||||
// Determine whether the branch is currently checked out (returns the
|
||||
// worktree path where it is checked out, or null if not checked out)
|
||||
const worktreePath = await isBranchCheckedOut(projectPath, branchName);
|
||||
|
||||
if (worktreePath) {
|
||||
// Branch is checked out in a worktree — use git merge --ff-only
|
||||
// Run the merge inside the worktree that has the branch checked out
|
||||
try {
|
||||
await execGitCommand(['merge', '--ff-only', remoteRef], worktreePath);
|
||||
} catch (mergeErr) {
|
||||
const errMsg = getErrorMessage(mergeErr);
|
||||
logger.warn(`Fast-forward merge failed for '${branchName}': ${errMsg}`);
|
||||
return buildStaleResult(
|
||||
projectPath,
|
||||
branchName,
|
||||
tracking.remote,
|
||||
`Fast-forward merge failed: ${errMsg}. Proceeding with local copy.`
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// Branch is NOT checked out — use git update-ref to fast-forward without checkout
|
||||
// This is safe because we already verified the branch is strictly behind (ahead === 0)
|
||||
try {
|
||||
const remoteCommit = await execGitCommand(['rev-parse', remoteRef], projectPath);
|
||||
await execGitCommand(
|
||||
['update-ref', `refs/heads/${branchName}`, remoteCommit.trim()],
|
||||
projectPath
|
||||
);
|
||||
} catch (updateErr) {
|
||||
const errMsg = getErrorMessage(updateErr);
|
||||
logger.warn(`update-ref failed for '${branchName}': ${errMsg}`);
|
||||
return buildStaleResult(
|
||||
projectPath,
|
||||
branchName,
|
||||
tracking.remote,
|
||||
`Failed to fast-forward branch: ${errMsg}. Proceeding with local copy.`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Successfully fast-forwarded
|
||||
const commitHash = await execGitCommand(['rev-parse', '--short', branchName], projectPath);
|
||||
logger.info(`Successfully synced '${branchName}' to ${commitHash.trim()} from ${remoteRef}`);
|
||||
return {
|
||||
attempted: true,
|
||||
synced: true,
|
||||
remote: tracking.remote,
|
||||
commitHash: commitHash.trim(),
|
||||
message: `Fast-forwarded '${branchName}' by ${behind} commit(s) from ${remoteRef}`,
|
||||
};
|
||||
} catch (err) {
|
||||
// Unexpected error during rev-list or merge — proceed with stale
|
||||
const errMsg = getErrorMessage(err);
|
||||
logger.warn(`Unexpected error syncing '${branchName}': ${errMsg}`);
|
||||
return buildStaleResult(
|
||||
projectPath,
|
||||
branchName,
|
||||
tracking.remote,
|
||||
`Sync failed: ${errMsg}. Proceeding with local copy.`
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -10,6 +10,7 @@
|
||||
* Follows the same pattern as worktree-branch-service.ts (performSwitchBranch).
|
||||
*
|
||||
* The workflow:
|
||||
* 0. Fetch latest from all remotes (ensures remote refs are up-to-date)
|
||||
* 1. Validate inputs (branch name, base branch)
|
||||
* 2. Get current branch name
|
||||
* 3. Check if target branch already exists
|
||||
@@ -19,11 +20,51 @@
|
||||
* 7. Handle error recovery (restore stash if checkout fails)
|
||||
*/
|
||||
|
||||
import { getErrorMessage } from '@automaker/utils';
|
||||
import { createLogger, getErrorMessage } from '@automaker/utils';
|
||||
import { execGitCommand } from '../lib/git.js';
|
||||
import type { EventEmitter } from '../lib/events.js';
|
||||
import { hasAnyChanges, stashChanges, popStash, localBranchExists } from './branch-utils.js';
|
||||
|
||||
const logger = createLogger('CheckoutBranchService');
|
||||
|
||||
// ============================================================================
|
||||
// Local Helpers
|
||||
// ============================================================================
|
||||
|
||||
/** Timeout for git fetch operations (30 seconds) */
|
||||
const FETCH_TIMEOUT_MS = 30_000;
|
||||
|
||||
/**
|
||||
* Fetch latest from all remotes (silently, with timeout).
|
||||
*
|
||||
* A process-level timeout is enforced via an AbortController so that a
|
||||
* slow or unresponsive remote does not block the branch creation flow
|
||||
* indefinitely. Timeout errors are logged and treated as non-fatal
|
||||
* (the same as network-unavailable errors) so the rest of the workflow
|
||||
* continues normally. This is called before creating the new branch to
|
||||
* ensure remote refs are up-to-date when a remote base branch is used.
|
||||
*/
|
||||
async function fetchRemotes(cwd: string): Promise<void> {
|
||||
const controller = new AbortController();
|
||||
const timerId = setTimeout(() => controller.abort(), FETCH_TIMEOUT_MS);
|
||||
|
||||
try {
|
||||
await execGitCommand(['fetch', '--all', '--quiet'], cwd, undefined, controller);
|
||||
} catch (error) {
|
||||
if (controller.signal.aborted) {
|
||||
// Fetch timed out - log and continue; callers should not be blocked by a slow remote
|
||||
logger.warn(
|
||||
`fetchRemotes timed out after ${FETCH_TIMEOUT_MS}ms - continuing without latest remote refs`
|
||||
);
|
||||
} else {
|
||||
logger.warn(`fetchRemotes failed: ${getErrorMessage(error)} - continuing with local refs`);
|
||||
}
|
||||
// Non-fatal: continue with locally available refs regardless of failure type
|
||||
} finally {
|
||||
clearTimeout(timerId);
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Types
|
||||
// ============================================================================
|
||||
@@ -78,6 +119,11 @@ export async function performCheckoutBranch(
|
||||
// Emit start event
|
||||
events?.emit('switch:start', { worktreePath, branchName, operation: 'checkout' });
|
||||
|
||||
// 0. Fetch latest from all remotes before creating the branch
|
||||
// This ensures remote refs are up-to-date so that base branch validation
|
||||
// works correctly for remote branch references (e.g. "origin/main").
|
||||
await fetchRemotes(worktreePath);
|
||||
|
||||
// 1. Get current branch
|
||||
let previousBranch: string;
|
||||
try {
|
||||
|
||||
@@ -170,17 +170,28 @@ export class ConcurrencyManager {
|
||||
* @param projectPath - The project path
|
||||
* @param branchName - The branch name, or null for main worktree
|
||||
* (features without branchName or matching primary branch)
|
||||
* @param options.autoModeOnly - If true, only count features started by auto mode.
|
||||
* Note: The auto-loop coordinator now counts ALL
|
||||
* running features (not just auto-mode) to ensure
|
||||
* total system load is respected. This option is
|
||||
* retained for other callers that may need filtered counts.
|
||||
* @returns Number of running features for the worktree
|
||||
*/
|
||||
async getRunningCountForWorktree(
|
||||
projectPath: string,
|
||||
branchName: string | null
|
||||
branchName: string | null,
|
||||
options?: { autoModeOnly?: boolean }
|
||||
): Promise<number> {
|
||||
// Get the actual primary branch name for the project
|
||||
const primaryBranch = await this.getCurrentBranch(projectPath);
|
||||
|
||||
let count = 0;
|
||||
for (const [, feature] of this.runningFeatures) {
|
||||
// If autoModeOnly is set, skip manually started features
|
||||
if (options?.autoModeOnly && !feature.isAutoMode) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Filter by project path AND branchName to get accurate worktree-specific count
|
||||
const featureBranch = feature.branchName ?? null;
|
||||
if (branchName === null) {
|
||||
|
||||
@@ -19,12 +19,81 @@ const logger = createLogger('DevServerService');
|
||||
// Maximum scrollback buffer size (characters) - matches TerminalService pattern
|
||||
const MAX_SCROLLBACK_SIZE = 50000; // ~50KB per dev server
|
||||
|
||||
// Timeout (ms) before falling back to the allocated port if URL detection hasn't succeeded.
|
||||
// This handles cases where the dev server output format is not recognized by any pattern.
|
||||
const URL_DETECTION_TIMEOUT_MS = 30_000;
|
||||
|
||||
// URL patterns for detecting full URLs from dev server output.
|
||||
// Defined once at module level to avoid reallocation on every call to detectUrlFromOutput.
|
||||
// Ordered from most specific (framework-specific) to least specific.
|
||||
const URL_PATTERNS: Array<{ pattern: RegExp; description: string }> = [
|
||||
// Vite / Nuxt / SvelteKit / Astro / Angular CLI format: "Local: http://..."
|
||||
{
|
||||
pattern: /(?:Local|Network|External):\s+(https?:\/\/[^\s]+)/i,
|
||||
description: 'Vite/Nuxt/SvelteKit/Astro/Angular format',
|
||||
},
|
||||
// Next.js format: "ready - started server on 0.0.0.0:3000, url: http://localhost:3000"
|
||||
// Next.js 14+: "▲ Next.js 14.0.0\n- Local: http://localhost:3000"
|
||||
{
|
||||
pattern: /(?:ready|started server).*?(?:url:\s*)?(https?:\/\/[^\s,]+)/i,
|
||||
description: 'Next.js format',
|
||||
},
|
||||
// Remix format: "started at http://localhost:3000"
|
||||
// Django format: "Starting development server at http://127.0.0.1:8000/"
|
||||
// Rails / Puma: "Listening on http://127.0.0.1:3000"
|
||||
// Generic: "listening at http://...", "available at http://...", "running at http://..."
|
||||
{
|
||||
pattern:
|
||||
/(?:starting|started|listening|running|available|serving|accessible)\s+(?:at|on)\s+(https?:\/\/[^\s,)]+)/i,
|
||||
description: 'Generic "starting/started/listening at" format',
|
||||
},
|
||||
// PHP built-in server: "Development Server (http://localhost:8000) started"
|
||||
{
|
||||
pattern: /(?:server|development server)\s*\(\s*(https?:\/\/[^\s)]+)\s*\)/i,
|
||||
description: 'PHP server format',
|
||||
},
|
||||
// Webpack Dev Server: "Project is running at http://localhost:8080/"
|
||||
{
|
||||
pattern: /(?:project|app|application)\s+(?:is\s+)?running\s+(?:at|on)\s+(https?:\/\/[^\s,]+)/i,
|
||||
description: 'Webpack/generic "running at" format',
|
||||
},
|
||||
// Go / Rust / generic: "Serving on http://...", "Server on http://..."
|
||||
{
|
||||
pattern: /(?:serving|server)\s+(?:on|at)\s+(https?:\/\/[^\s,]+)/i,
|
||||
description: 'Generic "serving on" format',
|
||||
},
|
||||
// Localhost URL with port (conservative - must be localhost/127.0.0.1/[::]/0.0.0.0)
|
||||
// This catches anything that looks like a dev server URL
|
||||
{
|
||||
pattern: /(https?:\/\/(?:localhost|127\.0\.0\.1|\[::\]|0\.0\.0\.0):\d+\S*)/i,
|
||||
description: 'Generic localhost URL with port',
|
||||
},
|
||||
];
|
||||
|
||||
// Port-only patterns for detecting port numbers from dev server output
|
||||
// when a full URL is not present in the output.
|
||||
// Defined once at module level to avoid reallocation on every call to detectUrlFromOutput.
|
||||
const PORT_PATTERNS: Array<{ pattern: RegExp; description: string }> = [
|
||||
// "listening on port 3000", "server on port 3000", "started on port 3000"
|
||||
{
|
||||
pattern: /(?:listening|running|started|serving|available)\s+on\s+port\s+(\d+)/i,
|
||||
description: '"listening on port" format',
|
||||
},
|
||||
// "Port: 3000", "port 3000" (at start of line or after whitespace)
|
||||
{
|
||||
pattern: /(?:^|\s)port[:\s]+(\d{4,5})(?:\s|$|[.,;])/im,
|
||||
description: '"port:" format',
|
||||
},
|
||||
];
|
||||
|
||||
// Throttle output to prevent overwhelming WebSocket under heavy load
|
||||
const OUTPUT_THROTTLE_MS = 4; // ~250fps max update rate for responsive feedback
|
||||
const OUTPUT_BATCH_SIZE = 4096; // Smaller batches for lower latency
|
||||
|
||||
export interface DevServerInfo {
|
||||
worktreePath: string;
|
||||
/** The port originally reserved by findAvailablePort() – never mutated after startDevServer sets it */
|
||||
allocatedPort: number;
|
||||
port: number;
|
||||
url: string;
|
||||
process: ChildProcess | null;
|
||||
@@ -39,6 +108,8 @@ export interface DevServerInfo {
|
||||
stopping: boolean;
|
||||
// Flag to indicate if URL has been detected from output
|
||||
urlDetected: boolean;
|
||||
// Timer for URL detection timeout fallback
|
||||
urlDetectionTimeout: NodeJS.Timeout | null;
|
||||
}
|
||||
|
||||
// Port allocation starts at 3001 to avoid conflicts with common dev ports
|
||||
@@ -61,6 +132,32 @@ class DevServerService {
|
||||
this.emitter = emitter;
|
||||
}
|
||||
|
||||
/**
|
||||
* Prune a stale server entry whose process has exited without cleanup.
|
||||
* Clears any pending timers, removes the port from allocatedPorts, deletes
|
||||
* the entry from runningServers, and emits the "dev-server:stopped" event
|
||||
* so all callers consistently notify the frontend when pruning entries.
|
||||
*
|
||||
* @param worktreePath - The key used in runningServers
|
||||
* @param server - The DevServerInfo entry to prune
|
||||
*/
|
||||
private pruneStaleServer(worktreePath: string, server: DevServerInfo): void {
|
||||
if (server.flushTimeout) clearTimeout(server.flushTimeout);
|
||||
if (server.urlDetectionTimeout) clearTimeout(server.urlDetectionTimeout);
|
||||
// Use allocatedPort (immutable) to free the reserved slot; server.port may have
|
||||
// been mutated by detectUrlFromOutput to reflect the actual detected port.
|
||||
this.allocatedPorts.delete(server.allocatedPort);
|
||||
this.runningServers.delete(worktreePath);
|
||||
if (this.emitter) {
|
||||
this.emitter.emit('dev-server:stopped', {
|
||||
worktreePath,
|
||||
port: server.port, // Report the externally-visible (detected) port
|
||||
exitCode: server.process?.exitCode ?? null,
|
||||
timestamp: new Date().toISOString(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Append data to scrollback buffer with size limit enforcement
|
||||
* Evicts oldest data when buffer exceeds MAX_SCROLLBACK_SIZE
|
||||
@@ -105,9 +202,52 @@ class DevServerService {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Strip ANSI escape codes from a string
|
||||
* Dev server output often contains color codes that can interfere with URL detection
|
||||
*/
|
||||
private stripAnsi(str: string): string {
|
||||
// Matches ANSI escape sequences: CSI sequences, OSC sequences, and simple escapes
|
||||
// eslint-disable-next-line no-control-regex
|
||||
return str.replace(/\x1B(?:\[[0-9;]*[a-zA-Z]|\].*?(?:\x07|\x1B\\)|\[[?]?[0-9;]*[hl])/g, '');
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract port number from a URL string.
|
||||
* Returns the explicit port if present, or null if no port is specified.
|
||||
* Default protocol ports (80/443) are intentionally NOT returned to avoid
|
||||
* overwriting allocated dev server ports with protocol defaults.
|
||||
*/
|
||||
private extractPortFromUrl(url: string): number | null {
|
||||
try {
|
||||
const parsed = new URL(url);
|
||||
if (parsed.port) {
|
||||
return parseInt(parsed.port, 10);
|
||||
}
|
||||
return null;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect actual server URL from output
|
||||
* Parses stdout/stderr for common URL patterns from dev servers
|
||||
* Parses stdout/stderr for common URL patterns from dev servers.
|
||||
*
|
||||
* Supports detection of URLs from:
|
||||
* - Vite: "Local: http://localhost:5173/"
|
||||
* - Next.js: "ready - started server on 0.0.0.0:3000, url: http://localhost:3000"
|
||||
* - Nuxt: "Local: http://localhost:3000/"
|
||||
* - Remix: "started at http://localhost:3000"
|
||||
* - Astro: "Local http://localhost:4321/"
|
||||
* - SvelteKit: "Local: http://localhost:5173/"
|
||||
* - CRA/Webpack: "On Your Network: http://192.168.1.1:3000"
|
||||
* - Angular: "Local: http://localhost:4200/"
|
||||
* - Express/Fastify/Koa: "Server listening on port 3000"
|
||||
* - Django: "Starting development server at http://127.0.0.1:8000/"
|
||||
* - Rails: "Listening on http://127.0.0.1:3000"
|
||||
* - PHP: "Development Server (http://localhost:8000) started"
|
||||
* - Generic: Any localhost URL with a port
|
||||
*/
|
||||
private detectUrlFromOutput(server: DevServerInfo, content: string): void {
|
||||
// Skip if URL already detected
|
||||
@@ -115,39 +255,107 @@ class DevServerService {
|
||||
return;
|
||||
}
|
||||
|
||||
// Common URL patterns from various dev servers:
|
||||
// - Vite: "Local: http://localhost:5173/"
|
||||
// - Next.js: "ready - started server on 0.0.0.0:3000, url: http://localhost:3000"
|
||||
// - CRA/Webpack: "On Your Network: http://192.168.1.1:3000"
|
||||
// - Generic: Any http:// or https:// URL
|
||||
const urlPatterns = [
|
||||
/(?:Local|Network):\s+(https?:\/\/[^\s]+)/i, // Vite format
|
||||
/(?:ready|started server).*?(?:url:\s*)?(https?:\/\/[^\s,]+)/i, // Next.js format
|
||||
/(https?:\/\/(?:localhost|127\.0\.0\.1|\[::\]):\d+)/i, // Generic localhost URL
|
||||
/(https?:\/\/[^\s<>"{}|\\^`[\]]+)/i, // Any HTTP(S) URL
|
||||
];
|
||||
// Strip ANSI escape codes to prevent color codes from breaking regex matching
|
||||
const cleanContent = this.stripAnsi(content);
|
||||
|
||||
for (const pattern of urlPatterns) {
|
||||
const match = content.match(pattern);
|
||||
// Phase 1: Try to detect a full URL from output
|
||||
// Patterns are defined at module level (URL_PATTERNS) and reused across calls
|
||||
for (const { pattern, description } of URL_PATTERNS) {
|
||||
const match = cleanContent.match(pattern);
|
||||
if (match && match[1]) {
|
||||
const detectedUrl = match[1].trim();
|
||||
// Validate it looks like a reasonable URL
|
||||
let detectedUrl = match[1].trim();
|
||||
// Remove trailing punctuation that might have been captured
|
||||
detectedUrl = detectedUrl.replace(/[.,;:!?)\]}>]+$/, '');
|
||||
|
||||
if (detectedUrl.startsWith('http://') || detectedUrl.startsWith('https://')) {
|
||||
// Normalize 0.0.0.0 to localhost for browser accessibility
|
||||
detectedUrl = detectedUrl.replace(
|
||||
/\/\/0\.0\.0\.0(:\d+)?/,
|
||||
(_, port) => `//localhost${port || ''}`
|
||||
);
|
||||
// Normalize [::] to localhost for browser accessibility
|
||||
detectedUrl = detectedUrl.replace(
|
||||
/\/\/\[::\](:\d+)?/,
|
||||
(_, port) => `//localhost${port || ''}`
|
||||
);
|
||||
// Normalize [::1] (IPv6 loopback) to localhost for browser accessibility
|
||||
detectedUrl = detectedUrl.replace(
|
||||
/\/\/\[::1\](:\d+)?/,
|
||||
(_, port) => `//localhost${port || ''}`
|
||||
);
|
||||
|
||||
server.url = detectedUrl;
|
||||
server.urlDetected = true;
|
||||
logger.info(
|
||||
`Detected actual server URL: ${detectedUrl} (allocated port was ${server.port})`
|
||||
);
|
||||
|
||||
// Clear the URL detection timeout since we found the URL
|
||||
if (server.urlDetectionTimeout) {
|
||||
clearTimeout(server.urlDetectionTimeout);
|
||||
server.urlDetectionTimeout = null;
|
||||
}
|
||||
|
||||
// Update the port to match the detected URL's actual port
|
||||
const detectedPort = this.extractPortFromUrl(detectedUrl);
|
||||
if (detectedPort && detectedPort !== server.port) {
|
||||
logger.info(
|
||||
`Port mismatch: allocated ${server.port}, detected ${detectedPort} from ${description}`
|
||||
);
|
||||
server.port = detectedPort;
|
||||
}
|
||||
|
||||
logger.info(`Detected server URL via ${description}: ${detectedUrl}`);
|
||||
|
||||
// Emit URL update event
|
||||
if (this.emitter) {
|
||||
this.emitter.emit('dev-server:url-detected', {
|
||||
worktreePath: server.worktreePath,
|
||||
url: detectedUrl,
|
||||
port: server.port,
|
||||
timestamp: new Date().toISOString(),
|
||||
});
|
||||
}
|
||||
break;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Phase 2: Try to detect just a port number from output (no full URL)
|
||||
// Some servers only print "listening on port 3000" without a full URL
|
||||
// Patterns are defined at module level (PORT_PATTERNS) and reused across calls
|
||||
for (const { pattern, description } of PORT_PATTERNS) {
|
||||
const match = cleanContent.match(pattern);
|
||||
if (match && match[1]) {
|
||||
const detectedPort = parseInt(match[1], 10);
|
||||
// Sanity check: port should be in a reasonable range
|
||||
if (detectedPort > 0 && detectedPort <= 65535) {
|
||||
const detectedUrl = `http://localhost:${detectedPort}`;
|
||||
server.url = detectedUrl;
|
||||
server.urlDetected = true;
|
||||
|
||||
// Clear the URL detection timeout since we found the port
|
||||
if (server.urlDetectionTimeout) {
|
||||
clearTimeout(server.urlDetectionTimeout);
|
||||
server.urlDetectionTimeout = null;
|
||||
}
|
||||
|
||||
if (detectedPort !== server.port) {
|
||||
logger.info(
|
||||
`Port mismatch: allocated ${server.port}, detected ${detectedPort} from ${description}`
|
||||
);
|
||||
server.port = detectedPort;
|
||||
}
|
||||
|
||||
logger.info(`Detected server port via ${description}: ${detectedPort} → ${detectedUrl}`);
|
||||
|
||||
// Emit URL update event
|
||||
if (this.emitter) {
|
||||
this.emitter.emit('dev-server:url-detected', {
|
||||
worktreePath: server.worktreePath,
|
||||
url: detectedUrl,
|
||||
port: server.port,
|
||||
timestamp: new Date().toISOString(),
|
||||
});
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -498,6 +706,7 @@ class DevServerService {
|
||||
const hostname = process.env.HOSTNAME || 'localhost';
|
||||
const serverInfo: DevServerInfo = {
|
||||
worktreePath,
|
||||
allocatedPort: port, // Immutable: records which port we reserved; never changed after this point
|
||||
port,
|
||||
url: `http://${hostname}:${port}`, // Initial URL, may be updated by detectUrlFromOutput
|
||||
process: devProcess,
|
||||
@@ -507,6 +716,7 @@ class DevServerService {
|
||||
flushTimeout: null,
|
||||
stopping: false,
|
||||
urlDetected: false, // Will be set to true when actual URL is detected from output
|
||||
urlDetectionTimeout: null, // Will be set after server starts successfully
|
||||
};
|
||||
|
||||
// Capture stdout with buffer management and event emission
|
||||
@@ -530,18 +740,24 @@ class DevServerService {
|
||||
serverInfo.flushTimeout = null;
|
||||
}
|
||||
|
||||
// Clear URL detection timeout to prevent stale fallback emission
|
||||
if (serverInfo.urlDetectionTimeout) {
|
||||
clearTimeout(serverInfo.urlDetectionTimeout);
|
||||
serverInfo.urlDetectionTimeout = null;
|
||||
}
|
||||
|
||||
// Emit stopped event (only if not already stopping - prevents duplicate events)
|
||||
if (this.emitter && !serverInfo.stopping) {
|
||||
this.emitter.emit('dev-server:stopped', {
|
||||
worktreePath,
|
||||
port,
|
||||
port: serverInfo.port, // Use the detected port (may differ from allocated port if detectUrlFromOutput updated it)
|
||||
exitCode,
|
||||
error: errorMessage,
|
||||
timestamp: new Date().toISOString(),
|
||||
});
|
||||
}
|
||||
|
||||
this.allocatedPorts.delete(port);
|
||||
this.allocatedPorts.delete(serverInfo.allocatedPort);
|
||||
this.runningServers.delete(worktreePath);
|
||||
};
|
||||
|
||||
@@ -587,6 +803,43 @@ class DevServerService {
|
||||
});
|
||||
}
|
||||
|
||||
// Set up URL detection timeout fallback.
|
||||
// If URL detection hasn't succeeded after URL_DETECTION_TIMEOUT_MS, check if
|
||||
// the allocated port is actually in use (server probably started successfully)
|
||||
// and emit a url-detected event with the allocated port as fallback.
|
||||
// Also re-scan the scrollback buffer in case the URL was printed before
|
||||
// our patterns could match (e.g., it was split across multiple data chunks).
|
||||
serverInfo.urlDetectionTimeout = setTimeout(() => {
|
||||
serverInfo.urlDetectionTimeout = null;
|
||||
|
||||
// Only run fallback if server is still running and URL wasn't detected
|
||||
if (serverInfo.stopping || serverInfo.urlDetected || !this.runningServers.has(worktreePath)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Re-scan the entire scrollback buffer for URL patterns
|
||||
// This catches cases where the URL was split across multiple output chunks
|
||||
logger.info(`URL detection timeout for ${worktreePath}, re-scanning scrollback buffer`);
|
||||
this.detectUrlFromOutput(serverInfo, serverInfo.scrollbackBuffer);
|
||||
|
||||
// If still not detected after full rescan, use the allocated port as fallback
|
||||
if (!serverInfo.urlDetected) {
|
||||
logger.info(`URL detection fallback: using allocated port ${port} for ${worktreePath}`);
|
||||
const fallbackUrl = `http://${hostname}:${port}`;
|
||||
serverInfo.url = fallbackUrl;
|
||||
serverInfo.urlDetected = true;
|
||||
|
||||
if (this.emitter) {
|
||||
this.emitter.emit('dev-server:url-detected', {
|
||||
worktreePath,
|
||||
url: fallbackUrl,
|
||||
port,
|
||||
timestamp: new Date().toISOString(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}, URL_DETECTION_TIMEOUT_MS);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
result: {
|
||||
@@ -632,6 +885,12 @@ class DevServerService {
|
||||
server.flushTimeout = null;
|
||||
}
|
||||
|
||||
// Clean up URL detection timeout
|
||||
if (server.urlDetectionTimeout) {
|
||||
clearTimeout(server.urlDetectionTimeout);
|
||||
server.urlDetectionTimeout = null;
|
||||
}
|
||||
|
||||
// Clear any pending output buffer
|
||||
server.outputBuffer = '';
|
||||
|
||||
@@ -650,8 +909,10 @@ class DevServerService {
|
||||
server.process.kill('SIGTERM');
|
||||
}
|
||||
|
||||
// Free the port
|
||||
this.allocatedPorts.delete(server.port);
|
||||
// Free the originally-reserved port slot (allocatedPort is immutable and always
|
||||
// matches what was added to allocatedPorts in startDevServer; server.port may
|
||||
// have been updated by detectUrlFromOutput to the actual detected port).
|
||||
this.allocatedPorts.delete(server.allocatedPort);
|
||||
this.runningServers.delete(worktreePath);
|
||||
|
||||
return {
|
||||
@@ -665,6 +926,7 @@ class DevServerService {
|
||||
|
||||
/**
|
||||
* List all running dev servers
|
||||
* Also verifies that each server's process is still alive, removing stale entries
|
||||
*/
|
||||
listDevServers(): {
|
||||
success: boolean;
|
||||
@@ -673,13 +935,38 @@ class DevServerService {
|
||||
worktreePath: string;
|
||||
port: number;
|
||||
url: string;
|
||||
urlDetected: boolean;
|
||||
startedAt: string;
|
||||
}>;
|
||||
};
|
||||
} {
|
||||
// Prune any servers whose process has died without us being notified
|
||||
// This handles edge cases where the process exited but the 'exit' event was missed
|
||||
const stalePaths: string[] = [];
|
||||
for (const [worktreePath, server] of this.runningServers) {
|
||||
// Check if exitCode is a number (not null/undefined) - indicates process has exited
|
||||
if (server.process && typeof server.process.exitCode === 'number') {
|
||||
logger.info(
|
||||
`Pruning stale server entry for ${worktreePath} (process exited with code ${server.process.exitCode})`
|
||||
);
|
||||
stalePaths.push(worktreePath);
|
||||
}
|
||||
}
|
||||
for (const stalePath of stalePaths) {
|
||||
const server = this.runningServers.get(stalePath);
|
||||
if (server) {
|
||||
// Delegate to the shared helper so timers, ports, and the stopped event
|
||||
// are all handled consistently with isRunning and getServerInfo.
|
||||
this.pruneStaleServer(stalePath, server);
|
||||
}
|
||||
}
|
||||
|
||||
const servers = Array.from(this.runningServers.values()).map((s) => ({
|
||||
worktreePath: s.worktreePath,
|
||||
port: s.port,
|
||||
url: s.url,
|
||||
urlDetected: s.urlDetected,
|
||||
startedAt: s.startedAt.toISOString(),
|
||||
}));
|
||||
|
||||
return {
|
||||
@@ -689,17 +976,33 @@ class DevServerService {
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a worktree has a running dev server
|
||||
* Check if a worktree has a running dev server.
|
||||
* Also prunes stale entries where the process has exited.
|
||||
*/
|
||||
isRunning(worktreePath: string): boolean {
|
||||
return this.runningServers.has(worktreePath);
|
||||
const server = this.runningServers.get(worktreePath);
|
||||
if (!server) return false;
|
||||
// Prune stale entry if the process has exited
|
||||
if (server.process && typeof server.process.exitCode === 'number') {
|
||||
this.pruneStaleServer(worktreePath, server);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get info for a specific worktree's dev server
|
||||
* Get info for a specific worktree's dev server.
|
||||
* Also prunes stale entries where the process has exited.
|
||||
*/
|
||||
getServerInfo(worktreePath: string): DevServerInfo | undefined {
|
||||
return this.runningServers.get(worktreePath);
|
||||
const server = this.runningServers.get(worktreePath);
|
||||
if (!server) return undefined;
|
||||
// Prune stale entry if the process has exited
|
||||
if (server.process && typeof server.process.exitCode === 'number') {
|
||||
this.pruneStaleServer(worktreePath, server);
|
||||
return undefined;
|
||||
}
|
||||
return server;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -727,6 +1030,15 @@ class DevServerService {
|
||||
};
|
||||
}
|
||||
|
||||
// Prune stale entry if the process has been killed or has exited
|
||||
if (server.process && (server.process.killed || server.process.exitCode != null)) {
|
||||
this.pruneStaleServer(worktreePath, server);
|
||||
return {
|
||||
success: false,
|
||||
error: `No dev server running for worktree: ${worktreePath}`,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
result: {
|
||||
|
||||
@@ -60,10 +60,13 @@ interface AutoModeEventPayload {
|
||||
featureId?: string;
|
||||
featureName?: string;
|
||||
passes?: boolean;
|
||||
executionMode?: 'auto' | 'manual';
|
||||
message?: string;
|
||||
error?: string;
|
||||
errorType?: string;
|
||||
projectPath?: string;
|
||||
/** Status field present when type === 'feature_status_changed' */
|
||||
status?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -75,6 +78,40 @@ interface FeatureCreatedPayload {
|
||||
projectPath: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Feature status changed event payload structure
|
||||
*/
|
||||
interface FeatureStatusChangedPayload {
|
||||
featureId: string;
|
||||
projectPath: string;
|
||||
status: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Type guard to safely narrow AutoModeEventPayload to FeatureStatusChangedPayload
|
||||
*/
|
||||
function isFeatureStatusChangedPayload(
|
||||
payload: AutoModeEventPayload
|
||||
): payload is AutoModeEventPayload & FeatureStatusChangedPayload {
|
||||
return (
|
||||
typeof payload.featureId === 'string' &&
|
||||
typeof payload.projectPath === 'string' &&
|
||||
typeof payload.status === 'string'
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Feature completed event payload structure
|
||||
*/
|
||||
interface FeatureCompletedPayload {
|
||||
featureId: string;
|
||||
featureName?: string;
|
||||
projectPath: string;
|
||||
passes?: boolean;
|
||||
message?: string;
|
||||
executionMode?: 'auto' | 'manual';
|
||||
}
|
||||
|
||||
/**
|
||||
* Event Hook Service
|
||||
*
|
||||
@@ -82,12 +119,30 @@ interface FeatureCreatedPayload {
|
||||
* Also stores events to history for debugging and replay.
|
||||
*/
|
||||
export class EventHookService {
|
||||
/** Feature status that indicates agent work is done and awaiting human review (tests skipped) */
|
||||
private static readonly STATUS_WAITING_APPROVAL = 'waiting_approval';
|
||||
/** Feature status that indicates agent work passed automated verification */
|
||||
private static readonly STATUS_VERIFIED = 'verified';
|
||||
|
||||
private emitter: EventEmitter | null = null;
|
||||
private settingsService: SettingsService | null = null;
|
||||
private eventHistoryService: EventHistoryService | null = null;
|
||||
private featureLoader: FeatureLoader | null = null;
|
||||
private unsubscribe: (() => void) | null = null;
|
||||
|
||||
/**
|
||||
* Track feature IDs that have already had hooks fired via auto_mode_feature_complete
|
||||
* to prevent double-firing when feature_status_changed also fires for the same feature.
|
||||
* Entries are automatically cleaned up after 30 seconds.
|
||||
*/
|
||||
private recentlyHandledFeatures = new Set<string>();
|
||||
|
||||
/**
|
||||
* Timer IDs for pending cleanup of recentlyHandledFeatures entries,
|
||||
* keyed by featureId. Stored so they can be cancelled in destroy().
|
||||
*/
|
||||
private recentlyHandledTimers = new Map<string, ReturnType<typeof setTimeout>>();
|
||||
|
||||
/**
|
||||
* Initialize the service with event emitter, settings service, event history service, and feature loader
|
||||
*/
|
||||
@@ -108,6 +163,8 @@ export class EventHookService {
|
||||
this.handleAutoModeEvent(payload as AutoModeEventPayload);
|
||||
} else if (type === 'feature:created') {
|
||||
this.handleFeatureCreatedEvent(payload as FeatureCreatedPayload);
|
||||
} else if (type === 'feature:completed') {
|
||||
this.handleFeatureCompletedEvent(payload as FeatureCompletedPayload);
|
||||
}
|
||||
});
|
||||
|
||||
@@ -122,6 +179,12 @@ export class EventHookService {
|
||||
this.unsubscribe();
|
||||
this.unsubscribe = null;
|
||||
}
|
||||
// Cancel all pending cleanup timers to avoid cross-session mutations
|
||||
for (const timerId of this.recentlyHandledTimers.values()) {
|
||||
clearTimeout(timerId);
|
||||
}
|
||||
this.recentlyHandledTimers.clear();
|
||||
this.recentlyHandledFeatures.clear();
|
||||
this.emitter = null;
|
||||
this.settingsService = null;
|
||||
this.eventHistoryService = null;
|
||||
@@ -139,15 +202,31 @@ export class EventHookService {
|
||||
|
||||
switch (payload.type) {
|
||||
case 'auto_mode_feature_complete':
|
||||
// Only map explicit auto-mode completion events.
|
||||
// Manual feature completions are emitted as feature:completed.
|
||||
if (payload.executionMode !== 'auto') return;
|
||||
trigger = payload.passes ? 'feature_success' : 'feature_error';
|
||||
// Track this feature so feature_status_changed doesn't double-fire hooks
|
||||
if (payload.featureId) {
|
||||
this.markFeatureHandled(payload.featureId);
|
||||
}
|
||||
break;
|
||||
case 'auto_mode_error':
|
||||
// Feature-level error (has featureId) vs auto-mode level error
|
||||
trigger = payload.featureId ? 'feature_error' : 'auto_mode_error';
|
||||
// Track this feature so feature_status_changed doesn't double-fire hooks
|
||||
if (payload.featureId) {
|
||||
this.markFeatureHandled(payload.featureId);
|
||||
}
|
||||
break;
|
||||
case 'auto_mode_idle':
|
||||
trigger = 'auto_mode_complete';
|
||||
break;
|
||||
case 'feature_status_changed':
|
||||
if (isFeatureStatusChangedPayload(payload)) {
|
||||
this.handleFeatureStatusChanged(payload);
|
||||
}
|
||||
return;
|
||||
default:
|
||||
// Other event types don't trigger hooks
|
||||
return;
|
||||
@@ -170,13 +249,15 @@ export class EventHookService {
|
||||
|
||||
// Build context for variable substitution
|
||||
// Use loaded featureName (from feature.title) or fall back to payload.featureName
|
||||
// Only populate error/errorType for error triggers - don't leak success messages into error fields
|
||||
const isErrorTrigger = trigger === 'feature_error' || trigger === 'auto_mode_error';
|
||||
const context: HookContext = {
|
||||
featureId: payload.featureId,
|
||||
featureName: featureName || payload.featureName,
|
||||
projectPath: payload.projectPath,
|
||||
projectName: payload.projectPath ? this.extractProjectName(payload.projectPath) : undefined,
|
||||
error: payload.error || payload.message,
|
||||
errorType: payload.errorType,
|
||||
error: isErrorTrigger ? payload.error || payload.message : undefined,
|
||||
errorType: isErrorTrigger ? payload.errorType : undefined,
|
||||
timestamp: new Date().toISOString(),
|
||||
eventType: trigger,
|
||||
};
|
||||
@@ -185,6 +266,46 @@ export class EventHookService {
|
||||
await this.executeHooksForTrigger(trigger, context, { passes: payload.passes });
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle feature:completed events and trigger matching hooks
|
||||
*/
|
||||
private async handleFeatureCompletedEvent(payload: FeatureCompletedPayload): Promise<void> {
|
||||
if (!payload.featureId || !payload.projectPath) return;
|
||||
|
||||
// Mark as handled to prevent duplicate firing if feature_status_changed also fires
|
||||
this.markFeatureHandled(payload.featureId);
|
||||
|
||||
const passes = payload.passes ?? true;
|
||||
const trigger: EventHookTrigger = passes ? 'feature_success' : 'feature_error';
|
||||
|
||||
// Load feature name if we have featureId but no featureName
|
||||
let featureName: string | undefined = undefined;
|
||||
if (payload.projectPath && this.featureLoader) {
|
||||
try {
|
||||
const feature = await this.featureLoader.get(payload.projectPath, payload.featureId);
|
||||
if (feature?.title) {
|
||||
featureName = feature.title;
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn(`Failed to load feature ${payload.featureId} for event hook:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
const isErrorTrigger = trigger === 'feature_error';
|
||||
const context: HookContext = {
|
||||
featureId: payload.featureId,
|
||||
featureName: featureName || payload.featureName,
|
||||
projectPath: payload.projectPath,
|
||||
projectName: this.extractProjectName(payload.projectPath),
|
||||
error: isErrorTrigger ? payload.message : undefined,
|
||||
errorType: undefined,
|
||||
timestamp: new Date().toISOString(),
|
||||
eventType: trigger,
|
||||
};
|
||||
|
||||
await this.executeHooksForTrigger(trigger, context, { passes });
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle feature:created events and trigger matching hooks
|
||||
*/
|
||||
@@ -201,6 +322,74 @@ export class EventHookService {
|
||||
await this.executeHooksForTrigger('feature_created', context);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle feature_status_changed events for non-auto-mode feature completion.
|
||||
*
|
||||
* Auto-mode features already emit auto_mode_feature_complete which triggers hooks.
|
||||
* This handler catches manual (non-auto-mode) feature completions by detecting
|
||||
* status transitions to completion states (verified, waiting_approval).
|
||||
*/
|
||||
private async handleFeatureStatusChanged(payload: FeatureStatusChangedPayload): Promise<void> {
|
||||
// Skip if this feature was already handled via auto_mode_feature_complete
|
||||
if (this.recentlyHandledFeatures.has(payload.featureId)) {
|
||||
return;
|
||||
}
|
||||
|
||||
let trigger: EventHookTrigger | null = null;
|
||||
|
||||
if (
|
||||
payload.status === EventHookService.STATUS_VERIFIED ||
|
||||
payload.status === EventHookService.STATUS_WAITING_APPROVAL
|
||||
) {
|
||||
trigger = 'feature_success';
|
||||
} else {
|
||||
// Only completion statuses trigger hooks from status changes
|
||||
return;
|
||||
}
|
||||
|
||||
// Load feature name
|
||||
let featureName: string | undefined = undefined;
|
||||
if (this.featureLoader) {
|
||||
try {
|
||||
const feature = await this.featureLoader.get(payload.projectPath, payload.featureId);
|
||||
if (feature?.title) {
|
||||
featureName = feature.title;
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn(`Failed to load feature ${payload.featureId} for status change hook:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
const context: HookContext = {
|
||||
featureId: payload.featureId,
|
||||
featureName,
|
||||
projectPath: payload.projectPath,
|
||||
projectName: this.extractProjectName(payload.projectPath),
|
||||
timestamp: new Date().toISOString(),
|
||||
eventType: trigger,
|
||||
};
|
||||
|
||||
await this.executeHooksForTrigger(trigger, context, { passes: true });
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark a feature as recently handled to prevent double-firing hooks.
|
||||
* Entries are cleaned up after 30 seconds.
|
||||
*/
|
||||
private markFeatureHandled(featureId: string): void {
|
||||
// Cancel any existing timer for this feature before setting a new one
|
||||
const existing = this.recentlyHandledTimers.get(featureId);
|
||||
if (existing !== undefined) {
|
||||
clearTimeout(existing);
|
||||
}
|
||||
this.recentlyHandledFeatures.add(featureId);
|
||||
const timerId = setTimeout(() => {
|
||||
this.recentlyHandledFeatures.delete(featureId);
|
||||
this.recentlyHandledTimers.delete(featureId);
|
||||
}, 30000);
|
||||
this.recentlyHandledTimers.set(featureId, timerId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute all enabled hooks matching the given trigger and store event to history
|
||||
*/
|
||||
|
||||
@@ -12,6 +12,7 @@ import * as secureFs from '../lib/secure-fs.js';
|
||||
import {
|
||||
getPromptCustomization,
|
||||
getAutoLoadClaudeMdSetting,
|
||||
getUseClaudeCodeSystemPromptSetting,
|
||||
filterClaudeMdFromContext,
|
||||
} from '../lib/settings-helpers.js';
|
||||
import { validateWorkingDirectory } from '../lib/sdk-options.js';
|
||||
@@ -59,6 +60,12 @@ import type {
|
||||
|
||||
const logger = createLogger('ExecutionService');
|
||||
|
||||
/** Marker written by agent-executor for each tool invocation. */
|
||||
const TOOL_USE_MARKER = '🔧 Tool:';
|
||||
|
||||
/** Minimum trimmed output length to consider agent work meaningful. */
|
||||
const MIN_MEANINGFUL_OUTPUT_LENGTH = 200;
|
||||
|
||||
export class ExecutionService {
|
||||
constructor(
|
||||
private eventBus: TypedEventBus,
|
||||
@@ -168,6 +175,20 @@ ${feature.spec}
|
||||
feature = await this.loadFeatureFn(projectPath, featureId);
|
||||
if (!feature) throw new Error(`Feature ${featureId} not found`);
|
||||
|
||||
// Update status to in_progress immediately after acquiring the feature.
|
||||
// This prevents a race condition where the UI reloads features and sees the
|
||||
// feature still in 'backlog' status while it's actually being executed.
|
||||
// Only do this for the initial call (not internal/recursive calls which would
|
||||
// redundantly update the status).
|
||||
if (
|
||||
!options?._calledInternally &&
|
||||
(feature.status === 'backlog' ||
|
||||
feature.status === 'ready' ||
|
||||
feature.status === 'interrupted')
|
||||
) {
|
||||
await this.updateFeatureStatusFn(projectPath, featureId, 'in_progress');
|
||||
}
|
||||
|
||||
if (!options?.continuationPrompt) {
|
||||
if (feature.planSpec?.status === 'approved') {
|
||||
const prompts = await getPromptCustomization(this.settingsService, '[ExecutionService]');
|
||||
@@ -199,7 +220,18 @@ ${feature.spec}
|
||||
validateWorkingDirectory(workDir);
|
||||
tempRunningFeature.worktreePath = worktreePath;
|
||||
tempRunningFeature.branchName = branchName ?? null;
|
||||
await this.updateFeatureStatusFn(projectPath, featureId, 'in_progress');
|
||||
// Ensure status is in_progress (may already be set from the early update above,
|
||||
// but internal/recursive calls skip the early update and need it here).
|
||||
// Mirror the external guard: only transition when the feature is still in
|
||||
// backlog, ready, or interrupted to avoid overwriting a concurrent terminal status.
|
||||
if (
|
||||
options?._calledInternally &&
|
||||
(feature.status === 'backlog' ||
|
||||
feature.status === 'ready' ||
|
||||
feature.status === 'interrupted')
|
||||
) {
|
||||
await this.updateFeatureStatusFn(projectPath, featureId, 'in_progress');
|
||||
}
|
||||
this.eventBus.emitAutoModeEvent('auto_mode_feature_start', {
|
||||
featureId,
|
||||
projectPath,
|
||||
@@ -216,6 +248,11 @@ ${feature.spec}
|
||||
this.settingsService,
|
||||
'[ExecutionService]'
|
||||
);
|
||||
const useClaudeCodeSystemPrompt = await getUseClaudeCodeSystemPromptSetting(
|
||||
projectPath,
|
||||
this.settingsService,
|
||||
'[ExecutionService]'
|
||||
);
|
||||
const prompts = await getPromptCustomization(this.settingsService, '[ExecutionService]');
|
||||
let prompt: string;
|
||||
const contextResult = await this.loadContextFilesFn({
|
||||
@@ -264,7 +301,9 @@ ${feature.spec}
|
||||
requirePlanApproval: feature.requirePlanApproval,
|
||||
systemPrompt: combinedSystemPrompt || undefined,
|
||||
autoLoadClaudeMd,
|
||||
useClaudeCodeSystemPrompt,
|
||||
thinkingLevel: feature.thinkingLevel,
|
||||
reasoningEffort: feature.reasoningEffort,
|
||||
branchName: feature.branchName ?? null,
|
||||
}
|
||||
);
|
||||
@@ -328,7 +367,9 @@ Please continue from where you left off and complete all remaining tasks. Use th
|
||||
requirePlanApproval: false,
|
||||
systemPrompt: combinedSystemPrompt || undefined,
|
||||
autoLoadClaudeMd,
|
||||
useClaudeCodeSystemPrompt,
|
||||
thinkingLevel: feature.thinkingLevel,
|
||||
reasoningEffort: feature.reasoningEffort,
|
||||
branchName: feature.branchName ?? null,
|
||||
}
|
||||
);
|
||||
@@ -363,6 +404,7 @@ Please continue from where you left off and complete all remaining tasks. Use th
|
||||
branchName: feature.branchName ?? null,
|
||||
abortController,
|
||||
autoLoadClaudeMd,
|
||||
useClaudeCodeSystemPrompt,
|
||||
testAttempts: 0,
|
||||
maxTestAttempts: 5,
|
||||
});
|
||||
@@ -373,7 +415,41 @@ Please continue from where you left off and complete all remaining tasks. Use th
|
||||
}
|
||||
}
|
||||
|
||||
const finalStatus = feature.skipTests ? 'waiting_approval' : 'verified';
|
||||
// Read agent output before determining final status.
|
||||
// CLI-based providers (Cursor, Codex, etc.) may exit quickly without doing
|
||||
// meaningful work. Check output to avoid prematurely marking as 'verified'.
|
||||
const outputPath = path.join(getFeatureDir(projectPath, featureId), 'agent-output.md');
|
||||
let agentOutput = '';
|
||||
try {
|
||||
agentOutput = (await secureFs.readFile(outputPath, 'utf-8')) as string;
|
||||
} catch {
|
||||
/* */
|
||||
}
|
||||
|
||||
// Determine if the agent did meaningful work by checking for tool usage
|
||||
// indicators in the output. The agent executor writes "🔧 Tool:" markers
|
||||
// each time a tool is invoked. No tool usage suggests the CLI exited
|
||||
// without performing implementation work.
|
||||
const hasToolUsage = agentOutput.includes(TOOL_USE_MARKER);
|
||||
const isOutputTooShort = agentOutput.trim().length < MIN_MEANINGFUL_OUTPUT_LENGTH;
|
||||
const agentDidWork = hasToolUsage && !isOutputTooShort;
|
||||
|
||||
let finalStatus: 'verified' | 'waiting_approval';
|
||||
if (feature.skipTests) {
|
||||
finalStatus = 'waiting_approval';
|
||||
} else if (!agentDidWork) {
|
||||
// Agent didn't produce meaningful output (e.g., CLI exited quickly).
|
||||
// Route to waiting_approval so the user can review and re-run.
|
||||
finalStatus = 'waiting_approval';
|
||||
logger.warn(
|
||||
`[executeFeature] Feature ${featureId}: agent produced insufficient output ` +
|
||||
`(${agentOutput.trim().length}/${MIN_MEANINGFUL_OUTPUT_LENGTH} chars, toolUsage=${hasToolUsage}). ` +
|
||||
`Setting status to waiting_approval instead of verified.`
|
||||
);
|
||||
} else {
|
||||
finalStatus = 'verified';
|
||||
}
|
||||
|
||||
await this.updateFeatureStatusFn(projectPath, featureId, finalStatus);
|
||||
this.recordSuccessFn();
|
||||
|
||||
@@ -385,13 +461,6 @@ Please continue from where you left off and complete all remaining tasks. Use th
|
||||
const hasIncompleteTasks = totalTasks > 0 && completedTasks < totalTasks;
|
||||
|
||||
try {
|
||||
const outputPath = path.join(getFeatureDir(projectPath, featureId), 'agent-output.md');
|
||||
let agentOutput = '';
|
||||
try {
|
||||
agentOutput = (await secureFs.readFile(outputPath, 'utf-8')) as string;
|
||||
} catch {
|
||||
/* */
|
||||
}
|
||||
if (agentOutput) {
|
||||
const summary = extractSummary(agentOutput);
|
||||
if (summary) await this.saveFeatureSummaryFn(projectPath, featureId, summary);
|
||||
@@ -416,28 +485,34 @@ Please continue from where you left off and complete all remaining tasks. Use th
|
||||
if (hasIncompleteTasks)
|
||||
completionMessage += ` (${completedTasks}/${totalTasks} tasks completed)`;
|
||||
|
||||
this.eventBus.emitAutoModeEvent('auto_mode_feature_complete', {
|
||||
featureId,
|
||||
featureName: feature.title,
|
||||
branchName: feature.branchName ?? null,
|
||||
passes: true,
|
||||
message: completionMessage,
|
||||
projectPath,
|
||||
model: tempRunningFeature.model,
|
||||
provider: tempRunningFeature.provider,
|
||||
});
|
||||
if (isAutoMode) {
|
||||
this.eventBus.emitAutoModeEvent('auto_mode_feature_complete', {
|
||||
featureId,
|
||||
featureName: feature.title,
|
||||
branchName: feature.branchName ?? null,
|
||||
executionMode: 'auto',
|
||||
passes: true,
|
||||
message: completionMessage,
|
||||
projectPath,
|
||||
model: tempRunningFeature.model,
|
||||
provider: tempRunningFeature.provider,
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
const errorInfo = classifyError(error);
|
||||
if (errorInfo.isAbort) {
|
||||
await this.updateFeatureStatusFn(projectPath, featureId, 'interrupted');
|
||||
this.eventBus.emitAutoModeEvent('auto_mode_feature_complete', {
|
||||
featureId,
|
||||
featureName: feature?.title,
|
||||
branchName: feature?.branchName ?? null,
|
||||
passes: false,
|
||||
message: 'Feature stopped by user',
|
||||
projectPath,
|
||||
});
|
||||
if (isAutoMode) {
|
||||
this.eventBus.emitAutoModeEvent('auto_mode_feature_complete', {
|
||||
featureId,
|
||||
featureName: feature?.title,
|
||||
branchName: feature?.branchName ?? null,
|
||||
executionMode: 'auto',
|
||||
passes: false,
|
||||
message: 'Feature stopped by user',
|
||||
projectPath,
|
||||
});
|
||||
}
|
||||
} else {
|
||||
logger.error(`Feature ${featureId} failed:`, error);
|
||||
await this.updateFeatureStatusFn(projectPath, featureId, 'backlog');
|
||||
@@ -462,6 +537,22 @@ Please continue from where you left off and complete all remaining tasks. Use th
|
||||
async stopFeature(featureId: string): Promise<boolean> {
|
||||
const running = this.concurrencyManager.getRunningFeature(featureId);
|
||||
if (!running) return false;
|
||||
const { projectPath } = running;
|
||||
|
||||
// Immediately update feature status to 'interrupted' so the UI reflects
|
||||
// the stop right away. CLI-based providers can take seconds to terminate
|
||||
// their subprocess after the abort signal fires, leaving the feature stuck
|
||||
// in 'in_progress' on the Kanban board until the executeFeature catch block
|
||||
// eventually runs. By persisting and emitting the status change here, the
|
||||
// board updates immediately regardless of how long the subprocess takes to stop.
|
||||
try {
|
||||
await this.updateFeatureStatusFn(projectPath, featureId, 'interrupted');
|
||||
} catch (err) {
|
||||
// Non-fatal: the abort still proceeds and executeFeature's catch block
|
||||
// will attempt the same update once the subprocess terminates.
|
||||
logger.warn(`stopFeature: failed to immediately update status for ${featureId}:`, err);
|
||||
}
|
||||
|
||||
running.abortController.abort();
|
||||
this.releaseRunningFeature(featureId, { force: true });
|
||||
return true;
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
* allowing the service to delegate to other services without circular dependencies.
|
||||
*/
|
||||
|
||||
import type { Feature, PlanningMode, ThinkingLevel } from '@automaker/types';
|
||||
import type { Feature, PlanningMode, ThinkingLevel, ReasoningEffort } from '@automaker/types';
|
||||
import type { loadContextFiles } from '@automaker/utils';
|
||||
import type { PipelineContext } from './pipeline-orchestrator.js';
|
||||
|
||||
@@ -31,7 +31,9 @@ export type RunAgentFn = (
|
||||
previousContent?: string;
|
||||
systemPrompt?: string;
|
||||
autoLoadClaudeMd?: boolean;
|
||||
useClaudeCodeSystemPrompt?: boolean;
|
||||
thinkingLevel?: ThinkingLevel;
|
||||
reasoningEffort?: ReasoningEffort;
|
||||
branchName?: string | null;
|
||||
}
|
||||
) => Promise<void>;
|
||||
|
||||
@@ -225,6 +225,14 @@ export class FeatureLoader {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Clear transient runtime flag - titleGenerating is only meaningful during
|
||||
// the current session's async title generation. If it was persisted (e.g.,
|
||||
// app closed before generation completed), it would cause the UI to show
|
||||
// "Generating title..." indefinitely.
|
||||
if (feature.titleGenerating) {
|
||||
delete feature.titleGenerating;
|
||||
}
|
||||
|
||||
return feature;
|
||||
});
|
||||
|
||||
@@ -323,7 +331,14 @@ export class FeatureLoader {
|
||||
|
||||
logRecoveryWarning(result, `Feature ${featureId}`, logger);
|
||||
|
||||
return result.data;
|
||||
const feature = result.data;
|
||||
|
||||
// Clear transient runtime flag (same as in getAll)
|
||||
if (feature?.titleGenerating) {
|
||||
delete feature.titleGenerating;
|
||||
}
|
||||
|
||||
return feature;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -367,8 +382,15 @@ export class FeatureLoader {
|
||||
descriptionHistory: initialHistory,
|
||||
};
|
||||
|
||||
// Remove transient runtime fields before persisting to disk.
|
||||
// titleGenerating is UI-only state that tracks in-flight async title generation.
|
||||
// Persisting it can cause cards to show "Generating title..." indefinitely
|
||||
// if the app restarts before generation completes.
|
||||
const featureToWrite = { ...feature };
|
||||
delete featureToWrite.titleGenerating;
|
||||
|
||||
// Write feature.json atomically with backup support
|
||||
await atomicWriteJson(featureJsonPath, feature, { backupCount: DEFAULT_BACKUP_COUNT });
|
||||
await atomicWriteJson(featureJsonPath, featureToWrite, { backupCount: DEFAULT_BACKUP_COUNT });
|
||||
|
||||
logger.info(`Created feature ${featureId}`);
|
||||
return feature;
|
||||
@@ -452,9 +474,13 @@ export class FeatureLoader {
|
||||
descriptionHistory: updatedHistory,
|
||||
};
|
||||
|
||||
// Remove transient runtime fields before persisting (same as create)
|
||||
const featureToWrite = { ...updatedFeature };
|
||||
delete featureToWrite.titleGenerating;
|
||||
|
||||
// Write back to file atomically with backup support
|
||||
const featureJsonPath = this.getFeatureJsonPath(projectPath, featureId);
|
||||
await atomicWriteJson(featureJsonPath, updatedFeature, { backupCount: DEFAULT_BACKUP_COUNT });
|
||||
await atomicWriteJson(featureJsonPath, featureToWrite, { backupCount: DEFAULT_BACKUP_COUNT });
|
||||
|
||||
logger.info(`Updated feature ${featureId}`);
|
||||
return updatedFeature;
|
||||
|
||||
103
apps/server/src/services/github-pr-comment.service.ts
Normal file
103
apps/server/src/services/github-pr-comment.service.ts
Normal file
@@ -0,0 +1,103 @@
|
||||
/**
|
||||
* GitHub PR Comment Service
|
||||
*
|
||||
* Domain logic for resolving/unresolving PR review threads via the
|
||||
* GitHub GraphQL API. Extracted from the route handler so the route
|
||||
* only deals with request/response plumbing.
|
||||
*/
|
||||
|
||||
import { spawn } from 'child_process';
|
||||
import { execEnv } from '../lib/exec-utils.js';
|
||||
|
||||
/** Timeout for GitHub GraphQL API requests in milliseconds */
|
||||
const GITHUB_API_TIMEOUT_MS = 30000;
|
||||
|
||||
interface GraphQLMutationResponse {
|
||||
data?: {
|
||||
resolveReviewThread?: {
|
||||
thread?: { isResolved: boolean; id: string } | null;
|
||||
} | null;
|
||||
unresolveReviewThread?: {
|
||||
thread?: { isResolved: boolean; id: string } | null;
|
||||
} | null;
|
||||
};
|
||||
errors?: Array<{ message: string }>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a GraphQL mutation to resolve or unresolve a review thread.
|
||||
*/
|
||||
export async function executeReviewThreadMutation(
|
||||
projectPath: string,
|
||||
threadId: string,
|
||||
resolve: boolean
|
||||
): Promise<{ isResolved: boolean }> {
|
||||
const mutationName = resolve ? 'resolveReviewThread' : 'unresolveReviewThread';
|
||||
|
||||
const mutation = `
|
||||
mutation ${resolve ? 'ResolveThread' : 'UnresolveThread'}($threadId: ID!) {
|
||||
${mutationName}(input: { threadId: $threadId }) {
|
||||
thread {
|
||||
id
|
||||
isResolved
|
||||
}
|
||||
}
|
||||
}`;
|
||||
|
||||
const variables = { threadId };
|
||||
const requestBody = JSON.stringify({ query: mutation, variables });
|
||||
|
||||
// Declare timeoutId before registering the error handler to avoid TDZ confusion
|
||||
let timeoutId: NodeJS.Timeout | undefined;
|
||||
|
||||
const response = await new Promise<GraphQLMutationResponse>((res, rej) => {
|
||||
const gh = spawn('gh', ['api', 'graphql', '--input', '-'], {
|
||||
cwd: projectPath,
|
||||
env: execEnv,
|
||||
});
|
||||
|
||||
gh.on('error', (err) => {
|
||||
clearTimeout(timeoutId);
|
||||
rej(err);
|
||||
});
|
||||
|
||||
timeoutId = setTimeout(() => {
|
||||
gh.kill();
|
||||
rej(new Error('GitHub GraphQL API request timed out'));
|
||||
}, GITHUB_API_TIMEOUT_MS);
|
||||
|
||||
let stdout = '';
|
||||
let stderr = '';
|
||||
gh.stdout.on('data', (data: Buffer) => (stdout += data.toString()));
|
||||
gh.stderr.on('data', (data: Buffer) => (stderr += data.toString()));
|
||||
|
||||
gh.on('close', (code) => {
|
||||
clearTimeout(timeoutId);
|
||||
if (code !== 0) {
|
||||
return rej(new Error(`gh process exited with code ${code}: ${stderr}`));
|
||||
}
|
||||
try {
|
||||
res(JSON.parse(stdout));
|
||||
} catch (e) {
|
||||
rej(e);
|
||||
}
|
||||
});
|
||||
|
||||
gh.stdin.write(requestBody);
|
||||
gh.stdin.end();
|
||||
});
|
||||
|
||||
if (response.errors && response.errors.length > 0) {
|
||||
throw new Error(response.errors[0].message);
|
||||
}
|
||||
|
||||
const threadData = resolve
|
||||
? response.data?.resolveReviewThread?.thread
|
||||
: response.data?.unresolveReviewThread?.thread;
|
||||
|
||||
if (!threadData) {
|
||||
throw new Error('No thread data returned from GitHub API');
|
||||
}
|
||||
|
||||
return { isResolved: threadData.isResolved };
|
||||
}
|
||||
@@ -4,7 +4,7 @@
|
||||
* Extracted from worktree merge route to allow internal service calls.
|
||||
*/
|
||||
|
||||
import { createLogger, isValidBranchName } from '@automaker/utils';
|
||||
import { createLogger, isValidBranchName, isValidRemoteName } from '@automaker/utils';
|
||||
import { type EventEmitter } from '../lib/events.js';
|
||||
import { execGitCommand } from '@automaker/git-utils';
|
||||
const logger = createLogger('MergeService');
|
||||
@@ -13,6 +13,8 @@ export interface MergeOptions {
|
||||
squash?: boolean;
|
||||
message?: string;
|
||||
deleteWorktreeAndBranch?: boolean;
|
||||
/** Remote name to fetch from before merging (defaults to 'origin') */
|
||||
remote?: string;
|
||||
}
|
||||
|
||||
export interface MergeServiceResult {
|
||||
@@ -35,7 +37,11 @@ export interface MergeServiceResult {
|
||||
* @param branchName - Source branch to merge
|
||||
* @param worktreePath - Path to the worktree (used for deletion if requested)
|
||||
* @param targetBranch - Branch to merge into (defaults to 'main')
|
||||
* @param options - Merge options (squash, message, deleteWorktreeAndBranch)
|
||||
* @param options - Merge options
|
||||
* @param options.squash - If true, perform a squash merge
|
||||
* @param options.message - Custom merge commit message
|
||||
* @param options.deleteWorktreeAndBranch - If true, delete worktree and branch after merge
|
||||
* @param options.remote - Remote name to fetch from before merging (defaults to 'origin')
|
||||
*/
|
||||
export async function performMerge(
|
||||
projectPath: string,
|
||||
@@ -88,6 +94,33 @@ export async function performMerge(
|
||||
};
|
||||
}
|
||||
|
||||
// Validate the remote name to prevent git option injection.
|
||||
// Reject invalid remote names so the caller knows their input was wrong,
|
||||
// consistent with how invalid branch names are handled above.
|
||||
const remote = options?.remote || 'origin';
|
||||
if (!isValidRemoteName(remote)) {
|
||||
logger.warn('Invalid remote name supplied to merge-service', {
|
||||
remote,
|
||||
projectPath,
|
||||
});
|
||||
return {
|
||||
success: false,
|
||||
error: `Invalid remote name: "${remote}"`,
|
||||
};
|
||||
}
|
||||
|
||||
// Fetch latest from remote before merging to ensure we have up-to-date refs
|
||||
try {
|
||||
await execGitCommand(['fetch', remote], projectPath);
|
||||
} catch (fetchError) {
|
||||
logger.warn('Failed to fetch from remote before merge; proceeding with local refs', {
|
||||
remote,
|
||||
projectPath,
|
||||
error: (fetchError as Error).message,
|
||||
});
|
||||
// Non-fatal: proceed with local refs if fetch fails (e.g. offline)
|
||||
}
|
||||
|
||||
// Emit merge:start after validating inputs
|
||||
emitter?.emit('merge:start', { branchName, targetBranch: mergeTo, worktreePath });
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ import * as secureFs from '../lib/secure-fs.js';
|
||||
import {
|
||||
getPromptCustomization,
|
||||
getAutoLoadClaudeMdSetting,
|
||||
getUseClaudeCodeSystemPromptSetting,
|
||||
filterClaudeMdFromContext,
|
||||
} from '../lib/settings-helpers.js';
|
||||
import { validateWorkingDirectory } from '../lib/sdk-options.js';
|
||||
@@ -70,8 +71,16 @@ export class PipelineOrchestrator {
|
||||
) {}
|
||||
|
||||
async executePipeline(ctx: PipelineContext): Promise<void> {
|
||||
const { projectPath, featureId, feature, steps, workDir, abortController, autoLoadClaudeMd } =
|
||||
ctx;
|
||||
const {
|
||||
projectPath,
|
||||
featureId,
|
||||
feature,
|
||||
steps,
|
||||
workDir,
|
||||
abortController,
|
||||
autoLoadClaudeMd,
|
||||
useClaudeCodeSystemPrompt,
|
||||
} = ctx;
|
||||
const prompts = await getPromptCustomization(this.settingsService, '[AutoMode]');
|
||||
const contextResult = await this.loadContextFilesFn({
|
||||
projectPath,
|
||||
@@ -121,7 +130,9 @@ export class PipelineOrchestrator {
|
||||
previousContent: previousContext,
|
||||
systemPrompt: contextFilesPrompt || undefined,
|
||||
autoLoadClaudeMd,
|
||||
useClaudeCodeSystemPrompt,
|
||||
thinkingLevel: feature.thinkingLevel,
|
||||
reasoningEffort: feature.reasoningEffort,
|
||||
}
|
||||
);
|
||||
try {
|
||||
@@ -226,14 +237,18 @@ export class PipelineOrchestrator {
|
||||
logger.warn(`Step ${pipelineInfo.stepId} no longer exists, completing feature`);
|
||||
const finalStatus = feature.skipTests ? 'waiting_approval' : 'verified';
|
||||
await this.updateFeatureStatusFn(projectPath, featureId, finalStatus);
|
||||
this.eventBus.emitAutoModeEvent('auto_mode_feature_complete', {
|
||||
featureId,
|
||||
featureName: feature.title,
|
||||
branchName: feature.branchName ?? null,
|
||||
passes: true,
|
||||
message: 'Pipeline step no longer exists',
|
||||
projectPath,
|
||||
});
|
||||
const runningEntryForStep = this.concurrencyManager.getRunningFeature(featureId);
|
||||
if (runningEntryForStep?.isAutoMode) {
|
||||
this.eventBus.emitAutoModeEvent('auto_mode_feature_complete', {
|
||||
featureId,
|
||||
featureName: feature.title,
|
||||
branchName: feature.branchName ?? null,
|
||||
executionMode: 'auto',
|
||||
passes: true,
|
||||
message: 'Pipeline step no longer exists',
|
||||
projectPath,
|
||||
});
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -272,14 +287,18 @@ export class PipelineOrchestrator {
|
||||
);
|
||||
if (!pipelineService.isPipelineStatus(nextStatus)) {
|
||||
await this.updateFeatureStatusFn(projectPath, featureId, nextStatus);
|
||||
this.eventBus.emitAutoModeEvent('auto_mode_feature_complete', {
|
||||
featureId,
|
||||
featureName: feature.title,
|
||||
branchName: feature.branchName ?? null,
|
||||
passes: true,
|
||||
message: 'Pipeline completed (remaining steps excluded)',
|
||||
projectPath,
|
||||
});
|
||||
const runningEntryForExcluded = this.concurrencyManager.getRunningFeature(featureId);
|
||||
if (runningEntryForExcluded?.isAutoMode) {
|
||||
this.eventBus.emitAutoModeEvent('auto_mode_feature_complete', {
|
||||
featureId,
|
||||
featureName: feature.title,
|
||||
branchName: feature.branchName ?? null,
|
||||
executionMode: 'auto',
|
||||
passes: true,
|
||||
message: 'Pipeline completed (remaining steps excluded)',
|
||||
projectPath,
|
||||
});
|
||||
}
|
||||
return;
|
||||
}
|
||||
const nextStepId = pipelineService.getStepIdFromStatus(nextStatus);
|
||||
@@ -294,14 +313,18 @@ export class PipelineOrchestrator {
|
||||
if (stepsToExecute.length === 0) {
|
||||
const finalStatus = feature.skipTests ? 'waiting_approval' : 'verified';
|
||||
await this.updateFeatureStatusFn(projectPath, featureId, finalStatus);
|
||||
this.eventBus.emitAutoModeEvent('auto_mode_feature_complete', {
|
||||
featureId,
|
||||
featureName: feature.title,
|
||||
branchName: feature.branchName ?? null,
|
||||
passes: true,
|
||||
message: 'Pipeline completed (all steps excluded)',
|
||||
projectPath,
|
||||
});
|
||||
const runningEntryForAllExcluded = this.concurrencyManager.getRunningFeature(featureId);
|
||||
if (runningEntryForAllExcluded?.isAutoMode) {
|
||||
this.eventBus.emitAutoModeEvent('auto_mode_feature_complete', {
|
||||
featureId,
|
||||
featureName: feature.title,
|
||||
branchName: feature.branchName ?? null,
|
||||
executionMode: 'auto',
|
||||
passes: true,
|
||||
message: 'Pipeline completed (all steps excluded)',
|
||||
projectPath,
|
||||
});
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -345,6 +368,11 @@ export class PipelineOrchestrator {
|
||||
this.settingsService,
|
||||
'[AutoMode]'
|
||||
);
|
||||
const useClaudeCodeSystemPrompt = await getUseClaudeCodeSystemPromptSetting(
|
||||
projectPath,
|
||||
this.settingsService,
|
||||
'[AutoMode]'
|
||||
);
|
||||
const context: PipelineContext = {
|
||||
projectPath,
|
||||
featureId,
|
||||
@@ -355,6 +383,7 @@ export class PipelineOrchestrator {
|
||||
branchName: branchName ?? null,
|
||||
abortController,
|
||||
autoLoadClaudeMd,
|
||||
useClaudeCodeSystemPrompt,
|
||||
testAttempts: 0,
|
||||
maxTestAttempts: 5,
|
||||
};
|
||||
@@ -370,25 +399,31 @@ export class PipelineOrchestrator {
|
||||
await this.updateFeatureStatusFn(projectPath, featureId, finalStatus);
|
||||
}
|
||||
logger.info(`Pipeline resume completed for feature ${featureId}`);
|
||||
this.eventBus.emitAutoModeEvent('auto_mode_feature_complete', {
|
||||
featureId,
|
||||
featureName: feature.title,
|
||||
branchName: feature.branchName ?? null,
|
||||
passes: true,
|
||||
message: 'Pipeline resumed successfully',
|
||||
projectPath,
|
||||
});
|
||||
} catch (error) {
|
||||
const errorInfo = classifyError(error);
|
||||
if (errorInfo.isAbort) {
|
||||
if (runningEntry.isAutoMode) {
|
||||
this.eventBus.emitAutoModeEvent('auto_mode_feature_complete', {
|
||||
featureId,
|
||||
featureName: feature.title,
|
||||
branchName: feature.branchName ?? null,
|
||||
passes: false,
|
||||
message: 'Pipeline stopped by user',
|
||||
executionMode: 'auto',
|
||||
passes: true,
|
||||
message: 'Pipeline resumed successfully',
|
||||
projectPath,
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
const errorInfo = classifyError(error);
|
||||
if (errorInfo.isAbort) {
|
||||
if (runningEntry.isAutoMode) {
|
||||
this.eventBus.emitAutoModeEvent('auto_mode_feature_complete', {
|
||||
featureId,
|
||||
featureName: feature.title,
|
||||
branchName: feature.branchName ?? null,
|
||||
executionMode: 'auto',
|
||||
passes: false,
|
||||
message: 'Pipeline stopped by user',
|
||||
projectPath,
|
||||
});
|
||||
}
|
||||
} else {
|
||||
logger.error(`Pipeline resume failed for ${featureId}:`, error);
|
||||
await this.updateFeatureStatusFn(projectPath, featureId, 'backlog');
|
||||
@@ -449,7 +484,14 @@ export class PipelineOrchestrator {
|
||||
projectPath,
|
||||
undefined,
|
||||
undefined,
|
||||
{ projectPath, planningMode: 'skip', requirePlanApproval: false }
|
||||
{
|
||||
projectPath,
|
||||
planningMode: 'skip',
|
||||
requirePlanApproval: false,
|
||||
useClaudeCodeSystemPrompt: context.useClaudeCodeSystemPrompt,
|
||||
autoLoadClaudeMd: context.autoLoadClaudeMd,
|
||||
reasoningEffort: context.feature.reasoningEffort,
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -537,14 +579,18 @@ export class PipelineOrchestrator {
|
||||
}
|
||||
|
||||
logger.info(`Auto-merge successful for feature ${featureId}`);
|
||||
this.eventBus.emitAutoModeEvent('auto_mode_feature_complete', {
|
||||
featureId,
|
||||
featureName: feature.title,
|
||||
branchName,
|
||||
passes: true,
|
||||
message: 'Pipeline completed and merged',
|
||||
projectPath,
|
||||
});
|
||||
const runningEntryForMerge = this.concurrencyManager.getRunningFeature(featureId);
|
||||
if (runningEntryForMerge?.isAutoMode) {
|
||||
this.eventBus.emitAutoModeEvent('auto_mode_feature_complete', {
|
||||
featureId,
|
||||
featureName: feature.title,
|
||||
branchName,
|
||||
executionMode: 'auto',
|
||||
passes: true,
|
||||
message: 'Pipeline completed and merged',
|
||||
projectPath,
|
||||
});
|
||||
}
|
||||
return { success: true };
|
||||
} catch (error) {
|
||||
logger.error(`Merge failed for ${featureId}:`, error);
|
||||
@@ -580,7 +626,7 @@ export class PipelineOrchestrator {
|
||||
}
|
||||
// Only capture assertion details when they appear in failure context
|
||||
// or match explicit assertion error / expect patterns
|
||||
if (trimmed.includes('AssertionError') || trimmed.includes('AssertionError')) {
|
||||
if (trimmed.includes('AssertionError')) {
|
||||
failedTests.push(trimmed);
|
||||
} else if (
|
||||
inFailureContext &&
|
||||
|
||||
@@ -14,6 +14,7 @@ export interface PipelineContext {
|
||||
branchName: string | null;
|
||||
abortController: AbortController;
|
||||
autoLoadClaudeMd: boolean;
|
||||
useClaudeCodeSystemPrompt?: boolean;
|
||||
testAttempts: number;
|
||||
maxTestAttempts: number;
|
||||
}
|
||||
|
||||
431
apps/server/src/services/pr-review-comments.service.ts
Normal file
431
apps/server/src/services/pr-review-comments.service.ts
Normal file
@@ -0,0 +1,431 @@
|
||||
/**
|
||||
* PR Review Comments Service
|
||||
*
|
||||
* Domain logic for fetching PR review comments, enriching them with
|
||||
* resolved-thread status, and sorting. Extracted from the route handler
|
||||
* so the route only deals with request/response plumbing.
|
||||
*/
|
||||
|
||||
import { spawn, execFile } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import { createLogger } from '@automaker/utils';
|
||||
import { execEnv, logError } from '../lib/exec-utils.js';
|
||||
|
||||
const execFileAsync = promisify(execFile);
|
||||
|
||||
// ── Public types (re-exported for callers) ──
|
||||
|
||||
export interface PRReviewComment {
|
||||
id: string;
|
||||
author: string;
|
||||
avatarUrl?: string;
|
||||
body: string;
|
||||
path?: string;
|
||||
line?: number;
|
||||
createdAt: string;
|
||||
updatedAt?: string;
|
||||
isReviewComment: boolean;
|
||||
/** Whether this is an outdated review comment (code has changed since) */
|
||||
isOutdated?: boolean;
|
||||
/** Whether the review thread containing this comment has been resolved */
|
||||
isResolved?: boolean;
|
||||
/** The GraphQL node ID of the review thread (used for resolve/unresolve mutations) */
|
||||
threadId?: string;
|
||||
/** The diff hunk context for the comment */
|
||||
diffHunk?: string;
|
||||
/** The side of the diff (LEFT or RIGHT) */
|
||||
side?: string;
|
||||
/** The commit ID the comment was made on */
|
||||
commitId?: string;
|
||||
/** Whether the comment author is a bot/app account */
|
||||
isBot?: boolean;
|
||||
}
|
||||
|
||||
export interface ListPRReviewCommentsResult {
|
||||
success: boolean;
|
||||
comments?: PRReviewComment[];
|
||||
totalCount?: number;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
// ── Internal types ──
|
||||
|
||||
/** Timeout for GitHub GraphQL API requests in milliseconds */
|
||||
const GITHUB_API_TIMEOUT_MS = 30000;
|
||||
|
||||
/** Maximum number of pagination pages to prevent infinite loops */
|
||||
const MAX_PAGINATION_PAGES = 20;
|
||||
|
||||
interface GraphQLReviewThreadComment {
|
||||
databaseId: number;
|
||||
}
|
||||
|
||||
interface GraphQLReviewThread {
|
||||
id: string;
|
||||
isResolved: boolean;
|
||||
comments: {
|
||||
pageInfo?: {
|
||||
hasNextPage: boolean;
|
||||
endCursor?: string | null;
|
||||
};
|
||||
nodes: GraphQLReviewThreadComment[];
|
||||
};
|
||||
}
|
||||
|
||||
interface GraphQLResponse {
|
||||
data?: {
|
||||
repository?: {
|
||||
pullRequest?: {
|
||||
reviewThreads?: {
|
||||
nodes: GraphQLReviewThread[];
|
||||
pageInfo?: {
|
||||
hasNextPage: boolean;
|
||||
endCursor?: string | null;
|
||||
};
|
||||
};
|
||||
} | null;
|
||||
};
|
||||
};
|
||||
errors?: Array<{ message: string }>;
|
||||
}
|
||||
|
||||
interface ReviewThreadInfo {
|
||||
isResolved: boolean;
|
||||
threadId: string;
|
||||
}
|
||||
|
||||
// ── Logger ──
|
||||
|
||||
const logger = createLogger('PRReviewCommentsService');
|
||||
|
||||
// ── Service functions ──
|
||||
|
||||
/**
|
||||
* Execute a GraphQL query via the `gh` CLI and return the parsed response.
|
||||
*/
|
||||
async function executeGraphQL(projectPath: string, requestBody: string): Promise<GraphQLResponse> {
|
||||
let timeoutId: NodeJS.Timeout | undefined;
|
||||
|
||||
const response = await new Promise<GraphQLResponse>((resolve, reject) => {
|
||||
const gh = spawn('gh', ['api', 'graphql', '--input', '-'], {
|
||||
cwd: projectPath,
|
||||
env: execEnv,
|
||||
});
|
||||
|
||||
gh.on('error', (err) => {
|
||||
clearTimeout(timeoutId);
|
||||
reject(err);
|
||||
});
|
||||
|
||||
timeoutId = setTimeout(() => {
|
||||
gh.kill();
|
||||
reject(new Error('GitHub GraphQL API request timed out'));
|
||||
}, GITHUB_API_TIMEOUT_MS);
|
||||
|
||||
let stdout = '';
|
||||
let stderr = '';
|
||||
gh.stdout.on('data', (data: Buffer) => (stdout += data.toString()));
|
||||
gh.stderr.on('data', (data: Buffer) => (stderr += data.toString()));
|
||||
|
||||
gh.on('close', (code) => {
|
||||
clearTimeout(timeoutId);
|
||||
if (code !== 0) {
|
||||
return reject(new Error(`gh process exited with code ${code}: ${stderr}`));
|
||||
}
|
||||
try {
|
||||
resolve(JSON.parse(stdout));
|
||||
} catch (e) {
|
||||
reject(e);
|
||||
}
|
||||
});
|
||||
|
||||
gh.stdin.on('error', () => {
|
||||
// Ignore stdin errors (e.g. when the child process is killed)
|
||||
});
|
||||
gh.stdin.write(requestBody);
|
||||
gh.stdin.end();
|
||||
});
|
||||
|
||||
if (response.errors && response.errors.length > 0) {
|
||||
throw new Error(response.errors[0].message);
|
||||
}
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch review thread resolved status and thread IDs using GitHub GraphQL API.
|
||||
* Uses cursor-based pagination to handle PRs with more than 100 review threads.
|
||||
* Returns a map of comment ID (string) -> { isResolved, threadId }.
|
||||
*/
|
||||
export async function fetchReviewThreadResolvedStatus(
|
||||
projectPath: string,
|
||||
owner: string,
|
||||
repo: string,
|
||||
prNumber: number
|
||||
): Promise<Map<string, ReviewThreadInfo>> {
|
||||
const resolvedMap = new Map<string, ReviewThreadInfo>();
|
||||
|
||||
const query = `
|
||||
query GetPRReviewThreads(
|
||||
$owner: String!
|
||||
$repo: String!
|
||||
$prNumber: Int!
|
||||
$cursor: String
|
||||
) {
|
||||
repository(owner: $owner, name: $repo) {
|
||||
pullRequest(number: $prNumber) {
|
||||
reviewThreads(first: 100, after: $cursor) {
|
||||
pageInfo {
|
||||
hasNextPage
|
||||
endCursor
|
||||
}
|
||||
nodes {
|
||||
id
|
||||
isResolved
|
||||
comments(first: 100) {
|
||||
pageInfo {
|
||||
hasNextPage
|
||||
endCursor
|
||||
}
|
||||
nodes {
|
||||
databaseId
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}`;
|
||||
|
||||
try {
|
||||
let cursor: string | null = null;
|
||||
let pageCount = 0;
|
||||
|
||||
do {
|
||||
const variables = { owner, repo, prNumber, cursor };
|
||||
const requestBody = JSON.stringify({ query, variables });
|
||||
const response = await executeGraphQL(projectPath, requestBody);
|
||||
|
||||
const reviewThreads = response.data?.repository?.pullRequest?.reviewThreads;
|
||||
const threads = reviewThreads?.nodes ?? [];
|
||||
|
||||
for (const thread of threads) {
|
||||
if (thread.comments.pageInfo?.hasNextPage) {
|
||||
logger.debug(
|
||||
`Review thread ${thread.id} in PR #${prNumber} has >100 comments — ` +
|
||||
'some comments may be missing resolved status'
|
||||
);
|
||||
}
|
||||
const info: ReviewThreadInfo = { isResolved: thread.isResolved, threadId: thread.id };
|
||||
for (const comment of thread.comments.nodes) {
|
||||
resolvedMap.set(String(comment.databaseId), info);
|
||||
}
|
||||
}
|
||||
|
||||
const pageInfo = reviewThreads?.pageInfo;
|
||||
if (pageInfo?.hasNextPage && pageInfo.endCursor) {
|
||||
cursor = pageInfo.endCursor;
|
||||
pageCount++;
|
||||
logger.debug(
|
||||
`Fetching next page of review threads for PR #${prNumber} (page ${pageCount + 1})`
|
||||
);
|
||||
} else {
|
||||
cursor = null;
|
||||
}
|
||||
} while (cursor && pageCount < MAX_PAGINATION_PAGES);
|
||||
|
||||
if (pageCount >= MAX_PAGINATION_PAGES) {
|
||||
logger.warn(
|
||||
`PR #${prNumber} in ${owner}/${repo} has more than ${MAX_PAGINATION_PAGES * 100} review threads — ` +
|
||||
'pagination limit reached. Some comments may be missing resolved status.'
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
// Log but don't fail — resolved status is best-effort
|
||||
logError(error, 'Failed to fetch PR review thread resolved status');
|
||||
}
|
||||
|
||||
return resolvedMap;
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch all comments for a PR (regular, inline review, and review body comments)
|
||||
*/
|
||||
export async function fetchPRReviewComments(
|
||||
projectPath: string,
|
||||
owner: string,
|
||||
repo: string,
|
||||
prNumber: number
|
||||
): Promise<PRReviewComment[]> {
|
||||
const allComments: PRReviewComment[] = [];
|
||||
|
||||
// Fetch review thread resolved status in parallel with comment fetching
|
||||
const resolvedStatusPromise = fetchReviewThreadResolvedStatus(projectPath, owner, repo, prNumber);
|
||||
|
||||
// 1. Fetch regular PR comments (issue-level comments)
|
||||
// Uses the REST API issues endpoint instead of `gh pr view --json comments`
|
||||
// because the latter uses GraphQL internally where bot/app authors can return
|
||||
// null, causing bot comments to be silently dropped or display as "unknown".
|
||||
try {
|
||||
const issueCommentsEndpoint = `repos/${owner}/${repo}/issues/${prNumber}/comments`;
|
||||
const { stdout: commentsOutput } = await execFileAsync(
|
||||
'gh',
|
||||
['api', issueCommentsEndpoint, '--paginate'],
|
||||
{
|
||||
cwd: projectPath,
|
||||
env: execEnv,
|
||||
maxBuffer: 1024 * 1024 * 10, // 10MB buffer for large PRs
|
||||
timeout: GITHUB_API_TIMEOUT_MS,
|
||||
}
|
||||
);
|
||||
|
||||
const commentsData = JSON.parse(commentsOutput);
|
||||
const regularComments = (Array.isArray(commentsData) ? commentsData : []).map(
|
||||
(c: {
|
||||
id: number;
|
||||
user: { login: string; avatar_url?: string; type?: string } | null;
|
||||
body: string;
|
||||
created_at: string;
|
||||
updated_at?: string;
|
||||
performed_via_github_app?: { slug: string } | null;
|
||||
}) => ({
|
||||
id: String(c.id),
|
||||
author: c.user?.login || c.performed_via_github_app?.slug || 'unknown',
|
||||
avatarUrl: c.user?.avatar_url,
|
||||
body: c.body,
|
||||
createdAt: c.created_at,
|
||||
updatedAt: c.updated_at,
|
||||
isReviewComment: false,
|
||||
isOutdated: false,
|
||||
isBot: c.user?.type === 'Bot' || !!c.performed_via_github_app,
|
||||
// Regular PR comments are not part of review threads, so not resolvable
|
||||
isResolved: false,
|
||||
})
|
||||
);
|
||||
|
||||
allComments.push(...regularComments);
|
||||
} catch (error) {
|
||||
logError(error, 'Failed to fetch regular PR comments');
|
||||
}
|
||||
|
||||
// 2. Fetch inline review comments (code-level comments with file/line info)
|
||||
try {
|
||||
const reviewsEndpoint = `repos/${owner}/${repo}/pulls/${prNumber}/comments`;
|
||||
const { stdout: reviewsOutput } = await execFileAsync(
|
||||
'gh',
|
||||
['api', reviewsEndpoint, '--paginate'],
|
||||
{
|
||||
cwd: projectPath,
|
||||
env: execEnv,
|
||||
maxBuffer: 1024 * 1024 * 10, // 10MB buffer for large PRs
|
||||
timeout: GITHUB_API_TIMEOUT_MS,
|
||||
}
|
||||
);
|
||||
|
||||
const reviewsData = JSON.parse(reviewsOutput);
|
||||
const reviewComments = (Array.isArray(reviewsData) ? reviewsData : []).map(
|
||||
(c: {
|
||||
id: number;
|
||||
user: { login: string; avatar_url?: string; type?: string } | null;
|
||||
body: string;
|
||||
path: string;
|
||||
line?: number;
|
||||
original_line?: number;
|
||||
created_at: string;
|
||||
updated_at?: string;
|
||||
diff_hunk?: string;
|
||||
side?: string;
|
||||
commit_id?: string;
|
||||
position?: number | null;
|
||||
performed_via_github_app?: { slug: string } | null;
|
||||
}) => ({
|
||||
id: String(c.id),
|
||||
author: c.user?.login || c.performed_via_github_app?.slug || 'unknown',
|
||||
avatarUrl: c.user?.avatar_url,
|
||||
body: c.body,
|
||||
path: c.path,
|
||||
line: c.line ?? c.original_line,
|
||||
createdAt: c.created_at,
|
||||
updatedAt: c.updated_at,
|
||||
isReviewComment: true,
|
||||
// A review comment is "outdated" if position is null (code has changed)
|
||||
isOutdated: c.position === null,
|
||||
// isResolved will be filled in below from GraphQL data
|
||||
isResolved: false,
|
||||
isBot: c.user?.type === 'Bot' || !!c.performed_via_github_app,
|
||||
diffHunk: c.diff_hunk,
|
||||
side: c.side,
|
||||
commitId: c.commit_id,
|
||||
})
|
||||
);
|
||||
|
||||
allComments.push(...reviewComments);
|
||||
} catch (error) {
|
||||
logError(error, 'Failed to fetch inline review comments');
|
||||
}
|
||||
|
||||
// 3. Fetch review body comments (summary text submitted with each review)
|
||||
// These are the top-level comments written when submitting a review
|
||||
// (Approve, Request Changes, Comment). They are separate from inline code comments
|
||||
// and issue-level comments. Only include reviews that have a non-empty body.
|
||||
try {
|
||||
const reviewsEndpoint = `repos/${owner}/${repo}/pulls/${prNumber}/reviews`;
|
||||
const { stdout: reviewBodiesOutput } = await execFileAsync(
|
||||
'gh',
|
||||
['api', reviewsEndpoint, '--paginate'],
|
||||
{
|
||||
cwd: projectPath,
|
||||
env: execEnv,
|
||||
maxBuffer: 1024 * 1024 * 10, // 10MB buffer for large PRs
|
||||
timeout: GITHUB_API_TIMEOUT_MS,
|
||||
}
|
||||
);
|
||||
|
||||
const reviewBodiesData = JSON.parse(reviewBodiesOutput);
|
||||
const reviewBodyComments = (Array.isArray(reviewBodiesData) ? reviewBodiesData : [])
|
||||
.filter(
|
||||
(r: { body?: string; state?: string }) =>
|
||||
r.body && r.body.trim().length > 0 && r.state !== 'PENDING'
|
||||
)
|
||||
.map(
|
||||
(r: {
|
||||
id: number;
|
||||
user: { login: string; avatar_url?: string; type?: string } | null;
|
||||
body: string;
|
||||
state: string;
|
||||
submitted_at: string;
|
||||
performed_via_github_app?: { slug: string } | null;
|
||||
}) => ({
|
||||
id: `review-${r.id}`,
|
||||
author: r.user?.login || r.performed_via_github_app?.slug || 'unknown',
|
||||
avatarUrl: r.user?.avatar_url,
|
||||
body: r.body,
|
||||
createdAt: r.submitted_at,
|
||||
isReviewComment: false,
|
||||
isOutdated: false,
|
||||
isResolved: false,
|
||||
isBot: r.user?.type === 'Bot' || !!r.performed_via_github_app,
|
||||
})
|
||||
);
|
||||
|
||||
allComments.push(...reviewBodyComments);
|
||||
} catch (error) {
|
||||
logError(error, 'Failed to fetch review body comments');
|
||||
}
|
||||
|
||||
// Wait for resolved status and apply to inline review comments
|
||||
const resolvedMap = await resolvedStatusPromise;
|
||||
for (const comment of allComments) {
|
||||
if (comment.isReviewComment && resolvedMap.has(comment.id)) {
|
||||
const info = resolvedMap.get(comment.id)!;
|
||||
comment.isResolved = info.isResolved;
|
||||
comment.threadId = info.threadId;
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by createdAt descending (newest first)
|
||||
allComments.sort((a, b) => new Date(b.createdAt).getTime() - new Date(a.createdAt).getTime());
|
||||
|
||||
return allComments;
|
||||
}
|
||||
225
apps/server/src/services/pr-service.ts
Normal file
225
apps/server/src/services/pr-service.ts
Normal file
@@ -0,0 +1,225 @@
|
||||
/**
|
||||
* Service for resolving PR target information from git remotes.
|
||||
*
|
||||
* Extracts remote-parsing and target-resolution logic that was previously
|
||||
* inline in the create-pr route handler.
|
||||
*/
|
||||
|
||||
// TODO: Move execAsync/execEnv to a shared lib (lib/exec.ts or @automaker/utils) so that
|
||||
// services no longer depend on route internals. Tracking issue: route-to-service dependency
|
||||
// inversion. For now, a local thin wrapper is used within the service boundary.
|
||||
import { exec } from 'child_process';
|
||||
import { promisify } from 'util';
|
||||
import { createLogger, isValidRemoteName } from '@automaker/utils';
|
||||
|
||||
// Thin local wrapper — duplicates the route-level execAsync/execEnv until a
|
||||
// shared lib/exec.ts (or @automaker/utils export) is created.
|
||||
const execAsync = promisify(exec);
|
||||
|
||||
const pathSeparator = process.platform === 'win32' ? ';' : ':';
|
||||
const _additionalPaths: string[] = [];
|
||||
if (process.platform === 'win32') {
|
||||
if (process.env.LOCALAPPDATA)
|
||||
_additionalPaths.push(`${process.env.LOCALAPPDATA}\\Programs\\Git\\cmd`);
|
||||
if (process.env.PROGRAMFILES) _additionalPaths.push(`${process.env.PROGRAMFILES}\\Git\\cmd`);
|
||||
if (process.env['ProgramFiles(x86)'])
|
||||
_additionalPaths.push(`${process.env['ProgramFiles(x86)']}\\Git\\cmd`);
|
||||
} else {
|
||||
_additionalPaths.push(
|
||||
'/opt/homebrew/bin',
|
||||
'/usr/local/bin',
|
||||
'/home/linuxbrew/.linuxbrew/bin',
|
||||
`${process.env.HOME}/.local/bin`
|
||||
);
|
||||
}
|
||||
const execEnv = {
|
||||
...process.env,
|
||||
PATH: [process.env.PATH, ..._additionalPaths.filter(Boolean)].filter(Boolean).join(pathSeparator),
|
||||
};
|
||||
|
||||
const logger = createLogger('PRService');
|
||||
|
||||
export interface ParsedRemote {
|
||||
owner: string;
|
||||
repo: string;
|
||||
}
|
||||
|
||||
export interface PrTargetResult {
|
||||
repoUrl: string | null;
|
||||
targetRepo: string | null;
|
||||
pushOwner: string | null;
|
||||
upstreamRepo: string | null;
|
||||
originOwner: string | null;
|
||||
parsedRemotes: Map<string, ParsedRemote>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse all git remotes for the given repo path and resolve the PR target.
|
||||
*
|
||||
* @param worktreePath - Working directory of the repository / worktree
|
||||
* @param pushRemote - Remote used for pushing (e.g. "origin")
|
||||
* @param targetRemote - Explicit remote to target the PR against (optional)
|
||||
*
|
||||
* @throws {Error} When targetRemote is specified but not found among repository remotes
|
||||
* @throws {Error} When pushRemote is not found among parsed remotes (when targetRemote is specified)
|
||||
*/
|
||||
export async function resolvePrTarget({
|
||||
worktreePath,
|
||||
pushRemote,
|
||||
targetRemote,
|
||||
}: {
|
||||
worktreePath: string;
|
||||
pushRemote: string;
|
||||
targetRemote?: string;
|
||||
}): Promise<PrTargetResult> {
|
||||
// Validate remote names — pushRemote is a required string so the undefined
|
||||
// guard is unnecessary, but targetRemote is optional.
|
||||
if (!isValidRemoteName(pushRemote)) {
|
||||
throw new Error(`Invalid push remote name: "${pushRemote}"`);
|
||||
}
|
||||
if (targetRemote !== undefined && !isValidRemoteName(targetRemote)) {
|
||||
throw new Error(`Invalid target remote name: "${targetRemote}"`);
|
||||
}
|
||||
|
||||
let repoUrl: string | null = null;
|
||||
let upstreamRepo: string | null = null;
|
||||
let originOwner: string | null = null;
|
||||
const parsedRemotes: Map<string, ParsedRemote> = new Map();
|
||||
|
||||
try {
|
||||
const { stdout: remotes } = await execAsync('git remote -v', {
|
||||
cwd: worktreePath,
|
||||
env: execEnv,
|
||||
});
|
||||
|
||||
// Parse remotes to detect fork workflow and get repo URL
|
||||
const lines = remotes.split(/\r?\n/); // Handle both Unix and Windows line endings
|
||||
for (const line of lines) {
|
||||
// Try multiple patterns to match different remote URL formats
|
||||
// Pattern 1: git@github.com:owner/repo.git (fetch)
|
||||
// Pattern 2: https://github.com/owner/repo.git (fetch)
|
||||
// Pattern 3: https://github.com/owner/repo (fetch)
|
||||
let match = line.match(
|
||||
/^([a-zA-Z0-9._-]+)\s+.*[:/]([^/]+)\/([^/\s]+?)(?:\.git)?\s+\(fetch\)/
|
||||
);
|
||||
if (!match) {
|
||||
// Try SSH format: git@github.com:owner/repo.git
|
||||
match = line.match(
|
||||
/^([a-zA-Z0-9._-]+)\s+git@[^:]+:([^/]+)\/([^\s]+?)(?:\.git)?\s+\(fetch\)/
|
||||
);
|
||||
}
|
||||
if (!match) {
|
||||
// Try HTTPS format: https://github.com/owner/repo.git
|
||||
match = line.match(
|
||||
/^([a-zA-Z0-9._-]+)\s+https?:\/\/[^/]+\/([^/]+)\/([^\s]+?)(?:\.git)?\s+\(fetch\)/
|
||||
);
|
||||
}
|
||||
|
||||
if (match) {
|
||||
const [, remoteName, owner, repo] = match;
|
||||
parsedRemotes.set(remoteName, { owner, repo });
|
||||
if (remoteName === 'upstream') {
|
||||
upstreamRepo = `${owner}/${repo}`;
|
||||
repoUrl = `https://github.com/${owner}/${repo}`;
|
||||
} else if (remoteName === 'origin') {
|
||||
originOwner = owner;
|
||||
if (!repoUrl) {
|
||||
repoUrl = `https://github.com/${owner}/${repo}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
// Log the failure for debugging — control flow falls through to auto-detection
|
||||
logger.debug('Failed to parse git remotes', { worktreePath, error: err });
|
||||
}
|
||||
|
||||
// When targetRemote is explicitly provided but remote parsing failed entirely
|
||||
// (parsedRemotes is empty), we cannot validate or resolve the requested remote.
|
||||
// Silently proceeding to auto-detection would ignore the caller's explicit intent,
|
||||
// so we fail fast with a clear error instead.
|
||||
if (targetRemote && parsedRemotes.size === 0) {
|
||||
throw new Error(
|
||||
`targetRemote "${targetRemote}" was specified but no remotes could be parsed from the repository. ` +
|
||||
`Ensure the repository has at least one configured remote (parsedRemotes is empty).`
|
||||
);
|
||||
}
|
||||
|
||||
// When a targetRemote is explicitly specified, validate that it is known
|
||||
// before using it. Silently falling back to auto-detection when the caller
|
||||
// explicitly requested a remote that doesn't exist is misleading, so we
|
||||
// fail fast here instead.
|
||||
if (targetRemote && parsedRemotes.size > 0 && !parsedRemotes.has(targetRemote)) {
|
||||
throw new Error(`targetRemote "${targetRemote}" not found in repository remotes`);
|
||||
}
|
||||
|
||||
// When a targetRemote is explicitly specified, override fork detection
|
||||
// to use the specified remote as the PR target
|
||||
let targetRepo: string | null = null;
|
||||
let pushOwner: string | null = null;
|
||||
if (targetRemote && parsedRemotes.size > 0) {
|
||||
const targetInfo = parsedRemotes.get(targetRemote);
|
||||
const pushInfo = parsedRemotes.get(pushRemote);
|
||||
|
||||
// If the push remote is not found in the parsed remotes, we cannot
|
||||
// determine the push owner and would build incorrect URLs. Fail fast
|
||||
// instead of silently proceeding with null values.
|
||||
if (!pushInfo) {
|
||||
logger.warn('Push remote not found in parsed remotes', {
|
||||
pushRemote,
|
||||
targetRemote,
|
||||
availableRemotes: [...parsedRemotes.keys()],
|
||||
});
|
||||
throw new Error(`Push remote "${pushRemote}" not found in repository remotes`);
|
||||
}
|
||||
|
||||
if (targetInfo) {
|
||||
targetRepo = `${targetInfo.owner}/${targetInfo.repo}`;
|
||||
repoUrl = `https://github.com/${targetInfo.owner}/${targetInfo.repo}`;
|
||||
}
|
||||
pushOwner = pushInfo.owner;
|
||||
|
||||
// Override the auto-detected upstream/origin with explicit targetRemote
|
||||
// Only treat as cross-remote if target differs from push remote
|
||||
if (targetRemote !== pushRemote && targetInfo) {
|
||||
upstreamRepo = targetRepo;
|
||||
originOwner = pushOwner;
|
||||
} else if (targetInfo) {
|
||||
// Same remote for push and target - regular (non-fork) workflow
|
||||
upstreamRepo = null;
|
||||
originOwner = targetInfo.owner;
|
||||
repoUrl = `https://github.com/${targetInfo.owner}/${targetInfo.repo}`;
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: Try to get repo URL from git config if remote parsing failed
|
||||
if (!repoUrl) {
|
||||
try {
|
||||
const { stdout: originUrl } = await execAsync('git config --get remote.origin.url', {
|
||||
cwd: worktreePath,
|
||||
env: execEnv,
|
||||
});
|
||||
const url = originUrl.trim();
|
||||
|
||||
// Parse URL to extract owner/repo
|
||||
// Handle both SSH (git@github.com:owner/repo.git) and HTTPS (https://github.com/owner/repo.git)
|
||||
const match = url.match(/[:/]([^/]+)\/([^/\s]+?)(?:\.git)?$/);
|
||||
if (match) {
|
||||
const [, owner, repo] = match;
|
||||
originOwner = owner;
|
||||
repoUrl = `https://github.com/${owner}/${repo}`;
|
||||
}
|
||||
} catch {
|
||||
// Failed to get repo URL from config
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
repoUrl,
|
||||
targetRepo,
|
||||
pushOwner,
|
||||
upstreamRepo,
|
||||
originOwner,
|
||||
parsedRemotes,
|
||||
};
|
||||
}
|
||||
@@ -46,6 +46,12 @@ export interface PullResult {
|
||||
conflictSource?: 'pull' | 'stash';
|
||||
conflictFiles?: string[];
|
||||
message?: string;
|
||||
/** Whether the pull resulted in a merge commit (not fast-forward) */
|
||||
isMerge?: boolean;
|
||||
/** Whether the pull was a fast-forward (no merge commit needed) */
|
||||
isFastForward?: boolean;
|
||||
/** Files affected by the merge (only present when isMerge is true) */
|
||||
mergeAffectedFiles?: string[];
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
@@ -178,6 +184,31 @@ function isConflictError(errorOutput: string): boolean {
|
||||
return errorOutput.includes('CONFLICT') || errorOutput.includes('Automatic merge failed');
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine whether the current HEAD commit is a merge commit by checking
|
||||
* whether it has two or more parent hashes.
|
||||
*
|
||||
* Runs `git show -s --pretty=%P HEAD` which prints the parent SHAs separated
|
||||
* by spaces. A merge commit has at least two parents; a regular commit has one.
|
||||
*
|
||||
* @param worktreePath - Path to the git worktree
|
||||
* @returns true if HEAD is a merge commit, false otherwise
|
||||
*/
|
||||
async function isMergeCommit(worktreePath: string): Promise<boolean> {
|
||||
try {
|
||||
const output = await execGitCommand(['show', '-s', '--pretty=%P', 'HEAD'], worktreePath);
|
||||
// Each parent SHA is separated by a space; two or more means it's a merge
|
||||
const parents = output
|
||||
.trim()
|
||||
.split(/\s+/)
|
||||
.filter((p) => p.length > 0);
|
||||
return parents.length >= 2;
|
||||
} catch {
|
||||
// If the check fails for any reason, assume it is not a merge commit
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether an output string indicates a stash conflict.
|
||||
*/
|
||||
@@ -302,10 +333,39 @@ export async function performPull(
|
||||
const pullArgs = upstreamStatus === 'tracking' ? ['pull'] : ['pull', targetRemote, branchName];
|
||||
let pullConflict = false;
|
||||
let pullConflictFiles: string[] = [];
|
||||
|
||||
// Declare merge detection variables before the try block so they are accessible
|
||||
// in the stash reapplication path even when didStash is true.
|
||||
let isMerge = false;
|
||||
let isFastForward = false;
|
||||
let mergeAffectedFiles: string[] = [];
|
||||
|
||||
try {
|
||||
const pullOutput = await execGitCommand(pullArgs, worktreePath);
|
||||
|
||||
const alreadyUpToDate = pullOutput.includes('Already up to date');
|
||||
// Detect fast-forward from git pull output
|
||||
isFastForward = pullOutput.includes('Fast-forward') || pullOutput.includes('fast-forward');
|
||||
// Detect merge by checking whether the new HEAD has two parents (more reliable
|
||||
// than string-matching localised pull output which may not contain 'Merge').
|
||||
isMerge = !alreadyUpToDate && !isFastForward ? await isMergeCommit(worktreePath) : false;
|
||||
|
||||
// If it was a real merge (not fast-forward), get the affected files
|
||||
if (isMerge) {
|
||||
try {
|
||||
// Get files changed in the merge commit
|
||||
const diffOutput = await execGitCommand(
|
||||
['diff', '--name-only', 'HEAD~1', 'HEAD'],
|
||||
worktreePath
|
||||
);
|
||||
mergeAffectedFiles = diffOutput
|
||||
.trim()
|
||||
.split('\n')
|
||||
.filter((f: string) => f.trim().length > 0);
|
||||
} catch {
|
||||
// Ignore errors - this is best-effort
|
||||
}
|
||||
}
|
||||
|
||||
// If no stash to reapply, return success
|
||||
if (!didStash) {
|
||||
@@ -317,6 +377,8 @@ export async function performPull(
|
||||
stashed: false,
|
||||
stashRestored: false,
|
||||
message: alreadyUpToDate ? 'Already up to date' : 'Pulled latest changes',
|
||||
...(isMerge ? { isMerge: true, mergeAffectedFiles } : {}),
|
||||
...(isFastForward ? { isFastForward: true } : {}),
|
||||
};
|
||||
}
|
||||
} catch (pullError: unknown) {
|
||||
@@ -374,7 +436,11 @@ export async function performPull(
|
||||
|
||||
// 10. Pull succeeded, now try to reapply stash
|
||||
if (didStash) {
|
||||
return await reapplyStash(worktreePath, branchName);
|
||||
return await reapplyStash(worktreePath, branchName, {
|
||||
isMerge,
|
||||
isFastForward,
|
||||
mergeAffectedFiles,
|
||||
});
|
||||
}
|
||||
|
||||
// Shouldn't reach here, but return a safe default
|
||||
@@ -392,9 +458,21 @@ export async function performPull(
|
||||
*
|
||||
* @param worktreePath - Path to the git worktree
|
||||
* @param branchName - Current branch name
|
||||
* @param mergeInfo - Merge/fast-forward detection info from the pull step
|
||||
* @returns PullResult reflecting stash reapplication status
|
||||
*/
|
||||
async function reapplyStash(worktreePath: string, branchName: string): Promise<PullResult> {
|
||||
async function reapplyStash(
|
||||
worktreePath: string,
|
||||
branchName: string,
|
||||
mergeInfo: { isMerge: boolean; isFastForward: boolean; mergeAffectedFiles: string[] }
|
||||
): Promise<PullResult> {
|
||||
const mergeFields: Partial<PullResult> = {
|
||||
...(mergeInfo.isMerge
|
||||
? { isMerge: true, mergeAffectedFiles: mergeInfo.mergeAffectedFiles }
|
||||
: {}),
|
||||
...(mergeInfo.isFastForward ? { isFastForward: true } : {}),
|
||||
};
|
||||
|
||||
try {
|
||||
await popStash(worktreePath);
|
||||
|
||||
@@ -406,6 +484,7 @@ async function reapplyStash(worktreePath: string, branchName: string): Promise<P
|
||||
hasConflicts: false,
|
||||
stashed: true,
|
||||
stashRestored: true,
|
||||
...mergeFields,
|
||||
message: 'Pulled latest changes and restored your stashed changes.',
|
||||
};
|
||||
} catch (stashPopError: unknown) {
|
||||
@@ -431,6 +510,7 @@ async function reapplyStash(worktreePath: string, branchName: string): Promise<P
|
||||
conflictFiles: stashConflictFiles,
|
||||
stashed: true,
|
||||
stashRestored: false,
|
||||
...mergeFields,
|
||||
message: 'Pull succeeded but reapplying your stashed changes resulted in merge conflicts.',
|
||||
};
|
||||
}
|
||||
@@ -445,6 +525,7 @@ async function reapplyStash(worktreePath: string, branchName: string): Promise<P
|
||||
hasConflicts: false,
|
||||
stashed: true,
|
||||
stashRestored: false,
|
||||
...mergeFields,
|
||||
message:
|
||||
'Pull succeeded but failed to reapply stashed changes. Your changes are still in the stash list.',
|
||||
};
|
||||
|
||||
258
apps/server/src/services/push-service.ts
Normal file
258
apps/server/src/services/push-service.ts
Normal file
@@ -0,0 +1,258 @@
|
||||
/**
|
||||
* PushService - Push git operations without HTTP
|
||||
*
|
||||
* Encapsulates the full git push workflow including:
|
||||
* - Branch name and detached HEAD detection
|
||||
* - Safe array-based command execution (no shell interpolation)
|
||||
* - Divergent branch detection and auto-resolution via pull-then-retry
|
||||
* - Structured result reporting
|
||||
*
|
||||
* Mirrors the pull-service.ts pattern for consistency.
|
||||
*/
|
||||
|
||||
import { createLogger, getErrorMessage } from '@automaker/utils';
|
||||
import { execGitCommand } from '@automaker/git-utils';
|
||||
import { getCurrentBranch } from '../lib/git.js';
|
||||
import { performPull } from './pull-service.js';
|
||||
|
||||
const logger = createLogger('PushService');
|
||||
|
||||
// ============================================================================
|
||||
// Types
|
||||
// ============================================================================
|
||||
|
||||
export interface PushOptions {
|
||||
/** Remote name to push to (defaults to 'origin') */
|
||||
remote?: string;
|
||||
/** Force push */
|
||||
force?: boolean;
|
||||
/** When true and push is rejected due to divergence, pull then retry push */
|
||||
autoResolve?: boolean;
|
||||
}
|
||||
|
||||
export interface PushResult {
|
||||
success: boolean;
|
||||
error?: string;
|
||||
branch?: string;
|
||||
pushed?: boolean;
|
||||
/** Whether the push was initially rejected because the branches diverged */
|
||||
diverged?: boolean;
|
||||
/** Whether divergence was automatically resolved via pull-then-retry */
|
||||
autoResolved?: boolean;
|
||||
/** Whether the auto-resolve pull resulted in merge conflicts */
|
||||
hasConflicts?: boolean;
|
||||
/** Files with merge conflicts (only when hasConflicts is true) */
|
||||
conflictFiles?: string[];
|
||||
message?: string;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Helper Functions
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Detect whether push error output indicates a diverged/non-fast-forward rejection.
|
||||
*/
|
||||
function isDivergenceError(errorOutput: string): boolean {
|
||||
const lower = errorOutput.toLowerCase();
|
||||
// Require specific divergence indicators rather than just 'rejected' alone,
|
||||
// which could match pre-receive hook rejections or protected branch errors.
|
||||
const hasNonFastForward = lower.includes('non-fast-forward');
|
||||
const hasFetchFirst = lower.includes('fetch first');
|
||||
const hasFailedToPush = lower.includes('failed to push some refs');
|
||||
const hasRejected = lower.includes('rejected');
|
||||
return hasNonFastForward || hasFetchFirst || (hasRejected && hasFailedToPush);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Main Service Function
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Perform a git push on the given worktree.
|
||||
*
|
||||
* The workflow:
|
||||
* 1. Get current branch name (detect detached HEAD)
|
||||
* 2. Attempt `git push <remote> <branch>` with safe array args
|
||||
* 3. If push fails with divergence and autoResolve is true:
|
||||
* a. Pull from the same remote (with stash support)
|
||||
* b. If pull succeeds without conflicts, retry push
|
||||
* 4. If push fails with "no upstream" error, retry with --set-upstream
|
||||
* 5. Return structured result
|
||||
*
|
||||
* @param worktreePath - Path to the git worktree
|
||||
* @param options - Push options (remote, force, autoResolve)
|
||||
* @returns PushResult with detailed status information
|
||||
*/
|
||||
export async function performPush(
|
||||
worktreePath: string,
|
||||
options?: PushOptions
|
||||
): Promise<PushResult> {
|
||||
const targetRemote = options?.remote || 'origin';
|
||||
const force = options?.force ?? false;
|
||||
const autoResolve = options?.autoResolve ?? false;
|
||||
|
||||
// 1. Get current branch name
|
||||
let branchName: string;
|
||||
try {
|
||||
branchName = await getCurrentBranch(worktreePath);
|
||||
} catch (err) {
|
||||
return {
|
||||
success: false,
|
||||
error: `Failed to get current branch: ${getErrorMessage(err)}`,
|
||||
};
|
||||
}
|
||||
|
||||
// 2. Check for detached HEAD state
|
||||
if (branchName === 'HEAD') {
|
||||
return {
|
||||
success: false,
|
||||
error: 'Cannot push in detached HEAD state. Please checkout a branch first.',
|
||||
};
|
||||
}
|
||||
|
||||
// 3. Build push args (no -u flag; upstream is set in the fallback path only when needed)
|
||||
const pushArgs = ['push', targetRemote, branchName];
|
||||
if (force) {
|
||||
pushArgs.push('--force');
|
||||
}
|
||||
|
||||
// 4. Attempt push
|
||||
try {
|
||||
await execGitCommand(pushArgs, worktreePath);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
branch: branchName,
|
||||
pushed: true,
|
||||
message: `Successfully pushed ${branchName} to ${targetRemote}`,
|
||||
};
|
||||
} catch (pushError: unknown) {
|
||||
const err = pushError as { stderr?: string; stdout?: string; message?: string };
|
||||
const errorOutput = `${err.stderr || ''} ${err.stdout || ''} ${err.message || ''}`;
|
||||
|
||||
// 5. Check if the error is a divergence rejection
|
||||
if (isDivergenceError(errorOutput)) {
|
||||
if (!autoResolve) {
|
||||
return {
|
||||
success: false,
|
||||
branch: branchName,
|
||||
pushed: false,
|
||||
diverged: true,
|
||||
error: `Push rejected: remote has changes not present locally. Use sync or pull first, or enable auto-resolve.`,
|
||||
message: `Push to ${targetRemote} was rejected because the remote branch has diverged.`,
|
||||
};
|
||||
}
|
||||
|
||||
// 6. Auto-resolve: pull then retry push
|
||||
logger.info('Push rejected due to divergence, attempting auto-resolve via pull', {
|
||||
worktreePath,
|
||||
remote: targetRemote,
|
||||
branch: branchName,
|
||||
});
|
||||
|
||||
try {
|
||||
const pullResult = await performPull(worktreePath, {
|
||||
remote: targetRemote,
|
||||
stashIfNeeded: true,
|
||||
});
|
||||
|
||||
if (!pullResult.success) {
|
||||
return {
|
||||
success: false,
|
||||
branch: branchName,
|
||||
pushed: false,
|
||||
diverged: true,
|
||||
autoResolved: false,
|
||||
error: `Auto-resolve failed during pull: ${pullResult.error}`,
|
||||
};
|
||||
}
|
||||
|
||||
if (pullResult.hasConflicts) {
|
||||
return {
|
||||
success: false,
|
||||
branch: branchName,
|
||||
pushed: false,
|
||||
diverged: true,
|
||||
autoResolved: false,
|
||||
hasConflicts: true,
|
||||
conflictFiles: pullResult.conflictFiles,
|
||||
error:
|
||||
'Auto-resolve pull resulted in merge conflicts. Resolve conflicts and push again.',
|
||||
};
|
||||
}
|
||||
|
||||
// 7. Retry push after successful pull
|
||||
try {
|
||||
await execGitCommand(pushArgs, worktreePath);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
branch: branchName,
|
||||
pushed: true,
|
||||
diverged: true,
|
||||
autoResolved: true,
|
||||
message: `Push succeeded after auto-resolving divergence (pulled from ${targetRemote} first).`,
|
||||
};
|
||||
} catch (retryError: unknown) {
|
||||
const retryErr = retryError as { stderr?: string; message?: string };
|
||||
return {
|
||||
success: false,
|
||||
branch: branchName,
|
||||
pushed: false,
|
||||
diverged: true,
|
||||
autoResolved: false,
|
||||
error: `Push failed after auto-resolve pull: ${retryErr.stderr || retryErr.message || 'Unknown error'}`,
|
||||
};
|
||||
}
|
||||
} catch (pullError) {
|
||||
return {
|
||||
success: false,
|
||||
branch: branchName,
|
||||
pushed: false,
|
||||
diverged: true,
|
||||
autoResolved: false,
|
||||
error: `Auto-resolve pull failed: ${getErrorMessage(pullError)}`,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// 6b. Non-divergence error (e.g. no upstream configured) - retry with --set-upstream
|
||||
const isNoUpstreamError =
|
||||
errorOutput.toLowerCase().includes('no upstream') ||
|
||||
errorOutput.toLowerCase().includes('has no upstream branch') ||
|
||||
errorOutput.toLowerCase().includes('set-upstream');
|
||||
if (isNoUpstreamError) {
|
||||
try {
|
||||
const setUpstreamArgs = ['push', '--set-upstream', targetRemote, branchName];
|
||||
if (force) {
|
||||
setUpstreamArgs.push('--force');
|
||||
}
|
||||
await execGitCommand(setUpstreamArgs, worktreePath);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
branch: branchName,
|
||||
pushed: true,
|
||||
message: `Successfully pushed ${branchName} to ${targetRemote} (set upstream)`,
|
||||
};
|
||||
} catch (upstreamError: unknown) {
|
||||
const upstreamErr = upstreamError as { stderr?: string; message?: string };
|
||||
return {
|
||||
success: false,
|
||||
branch: branchName,
|
||||
pushed: false,
|
||||
error: upstreamErr.stderr || upstreamErr.message || getErrorMessage(pushError),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// 6c. Other push error - return as-is
|
||||
return {
|
||||
success: false,
|
||||
branch: branchName,
|
||||
pushed: false,
|
||||
error: err.stderr || err.message || getErrorMessage(pushError),
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -7,7 +7,7 @@
|
||||
|
||||
import fs from 'fs/promises';
|
||||
import path from 'path';
|
||||
import { createLogger, getErrorMessage } from '@automaker/utils';
|
||||
import { createLogger, getErrorMessage, isValidRemoteName } from '@automaker/utils';
|
||||
import { execGitCommand, getCurrentBranch, getConflictFiles } from '@automaker/git-utils';
|
||||
|
||||
const logger = createLogger('RebaseService');
|
||||
@@ -16,6 +16,11 @@ const logger = createLogger('RebaseService');
|
||||
// Types
|
||||
// ============================================================================
|
||||
|
||||
export interface RebaseOptions {
|
||||
/** Remote name to fetch from before rebasing (defaults to 'origin') */
|
||||
remote?: string;
|
||||
}
|
||||
|
||||
export interface RebaseResult {
|
||||
success: boolean;
|
||||
error?: string;
|
||||
@@ -36,9 +41,14 @@ export interface RebaseResult {
|
||||
*
|
||||
* @param worktreePath - Path to the git worktree
|
||||
* @param ontoBranch - The branch to rebase onto (e.g., 'origin/main')
|
||||
* @param options - Optional rebase options (remote name for fetch)
|
||||
* @returns RebaseResult with success/failure information
|
||||
*/
|
||||
export async function runRebase(worktreePath: string, ontoBranch: string): Promise<RebaseResult> {
|
||||
export async function runRebase(
|
||||
worktreePath: string,
|
||||
ontoBranch: string,
|
||||
options?: RebaseOptions
|
||||
): Promise<RebaseResult> {
|
||||
// Reject empty, whitespace-only, or dash-prefixed branch names.
|
||||
const normalizedOntoBranch = ontoBranch?.trim() ?? '';
|
||||
if (normalizedOntoBranch === '' || normalizedOntoBranch.startsWith('-')) {
|
||||
@@ -59,6 +69,33 @@ export async function runRebase(worktreePath: string, ontoBranch: string): Promi
|
||||
};
|
||||
}
|
||||
|
||||
// Validate the remote name to prevent git option injection.
|
||||
// Reject invalid remote names so the caller knows their input was wrong,
|
||||
// consistent with how invalid branch names are handled above.
|
||||
const remote = options?.remote || 'origin';
|
||||
if (!isValidRemoteName(remote)) {
|
||||
logger.warn('Invalid remote name supplied to rebase-service', {
|
||||
remote,
|
||||
worktreePath,
|
||||
});
|
||||
return {
|
||||
success: false,
|
||||
error: `Invalid remote name: "${remote}"`,
|
||||
};
|
||||
}
|
||||
|
||||
// Fetch latest from remote before rebasing to ensure we have up-to-date refs
|
||||
try {
|
||||
await execGitCommand(['fetch', remote], worktreePath);
|
||||
} catch (fetchError) {
|
||||
logger.warn('Failed to fetch from remote before rebase; proceeding with local refs', {
|
||||
remote,
|
||||
worktreePath,
|
||||
error: getErrorMessage(fetchError),
|
||||
});
|
||||
// Non-fatal: proceed with local refs if fetch fails (e.g. offline)
|
||||
}
|
||||
|
||||
try {
|
||||
// Pass ontoBranch after '--' so git treats it as a ref, not an option.
|
||||
// Set LC_ALL=C so git always emits English output regardless of the system
|
||||
|
||||
@@ -31,6 +31,7 @@ import type {
|
||||
WorktreeInfo,
|
||||
PhaseModelConfig,
|
||||
PhaseModelEntry,
|
||||
FeatureTemplate,
|
||||
ClaudeApiProfile,
|
||||
ClaudeCompatibleProvider,
|
||||
ProviderModel,
|
||||
@@ -40,6 +41,7 @@ import {
|
||||
DEFAULT_CREDENTIALS,
|
||||
DEFAULT_PROJECT_SETTINGS,
|
||||
DEFAULT_PHASE_MODELS,
|
||||
DEFAULT_FEATURE_TEMPLATES,
|
||||
SETTINGS_VERSION,
|
||||
CREDENTIALS_VERSION,
|
||||
PROJECT_SETTINGS_VERSION,
|
||||
@@ -139,6 +141,11 @@ export class SettingsService {
|
||||
// Migrate model IDs to canonical format
|
||||
const migratedModelSettings = this.migrateModelSettings(settings);
|
||||
|
||||
// Merge built-in feature templates: ensure all built-in templates exist in user settings.
|
||||
// User customizations (enabled/disabled state, order overrides) are preserved.
|
||||
// New built-in templates added in code updates are injected for existing users.
|
||||
const mergedFeatureTemplates = this.mergeBuiltInTemplates(settings.featureTemplates);
|
||||
|
||||
// Apply any missing defaults (for backwards compatibility)
|
||||
let result: GlobalSettings = {
|
||||
...DEFAULT_GLOBAL_SETTINGS,
|
||||
@@ -149,6 +156,7 @@ export class SettingsService {
|
||||
...settings.keyboardShortcuts,
|
||||
},
|
||||
phaseModels: migratedPhaseModels,
|
||||
featureTemplates: mergedFeatureTemplates,
|
||||
};
|
||||
|
||||
// Version-based migrations
|
||||
@@ -250,6 +258,32 @@ export class SettingsService {
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge built-in feature templates with user's stored templates.
|
||||
*
|
||||
* Ensures new built-in templates added in code updates are available to existing users
|
||||
* without overwriting their customizations (e.g., enabled/disabled state, custom order).
|
||||
* Built-in templates missing from stored settings are appended with their defaults.
|
||||
*
|
||||
* @param storedTemplates - Templates from user's settings file (may be undefined for new installs)
|
||||
* @returns Merged template list with all built-in templates present
|
||||
*/
|
||||
private mergeBuiltInTemplates(storedTemplates: FeatureTemplate[] | undefined): FeatureTemplate[] {
|
||||
if (!storedTemplates) {
|
||||
return DEFAULT_FEATURE_TEMPLATES;
|
||||
}
|
||||
|
||||
const storedIds = new Set(storedTemplates.map((t) => t.id));
|
||||
const missingBuiltIns = DEFAULT_FEATURE_TEMPLATES.filter((t) => !storedIds.has(t.id));
|
||||
|
||||
if (missingBuiltIns.length === 0) {
|
||||
return storedTemplates;
|
||||
}
|
||||
|
||||
// Append missing built-in templates after existing ones
|
||||
return [...storedTemplates, ...missingBuiltIns];
|
||||
}
|
||||
|
||||
/**
|
||||
* Migrate legacy enhancementModel/validationModel fields to phaseModels structure
|
||||
*
|
||||
@@ -573,6 +607,17 @@ export class SettingsService {
|
||||
ignoreEmptyArrayOverwrite('claudeApiProfiles');
|
||||
// Note: claudeCompatibleProviders intentionally NOT guarded - users should be able to delete all providers
|
||||
|
||||
// Check for explicit permission to clear eventHooks (escape hatch for intentional clearing)
|
||||
const allowEmptyEventHooks =
|
||||
(sanitizedUpdates as Record<string, unknown>).__allowEmptyEventHooks === true;
|
||||
// Remove the flag so it doesn't get persisted
|
||||
delete (sanitizedUpdates as Record<string, unknown>).__allowEmptyEventHooks;
|
||||
|
||||
// Only guard eventHooks if explicit permission wasn't granted
|
||||
if (!allowEmptyEventHooks) {
|
||||
ignoreEmptyArrayOverwrite('eventHooks');
|
||||
}
|
||||
|
||||
// Empty object overwrite guard
|
||||
const ignoreEmptyObjectOverwrite = <K extends keyof GlobalSettings>(key: K): void => {
|
||||
const nextVal = sanitizedUpdates[key] as unknown;
|
||||
|
||||
209
apps/server/src/services/sync-service.ts
Normal file
209
apps/server/src/services/sync-service.ts
Normal file
@@ -0,0 +1,209 @@
|
||||
/**
|
||||
* SyncService - Pull then push in a single operation
|
||||
*
|
||||
* Composes performPull() and performPush() to synchronize a branch
|
||||
* with its remote. Always uses stashIfNeeded for the pull step.
|
||||
* If push fails with divergence after pull, retries once.
|
||||
*
|
||||
* Follows the same pattern as pull-service.ts and push-service.ts.
|
||||
*/
|
||||
|
||||
import { createLogger, getErrorMessage } from '@automaker/utils';
|
||||
import { performPull } from './pull-service.js';
|
||||
import { performPush } from './push-service.js';
|
||||
import type { PullResult } from './pull-service.js';
|
||||
import type { PushResult } from './push-service.js';
|
||||
|
||||
const logger = createLogger('SyncService');
|
||||
|
||||
// ============================================================================
|
||||
// Types
|
||||
// ============================================================================
|
||||
|
||||
export interface SyncOptions {
|
||||
/** Remote name (defaults to 'origin') */
|
||||
remote?: string;
|
||||
}
|
||||
|
||||
export interface SyncResult {
|
||||
success: boolean;
|
||||
error?: string;
|
||||
branch?: string;
|
||||
/** Whether the pull step was performed */
|
||||
pulled?: boolean;
|
||||
/** Whether the push step was performed */
|
||||
pushed?: boolean;
|
||||
/** Pull resulted in conflicts */
|
||||
hasConflicts?: boolean;
|
||||
/** Files with merge conflicts */
|
||||
conflictFiles?: string[];
|
||||
/** Source of conflicts ('pull' | 'stash') */
|
||||
conflictSource?: 'pull' | 'stash';
|
||||
/** Whether the pull was a fast-forward */
|
||||
isFastForward?: boolean;
|
||||
/** Whether the pull resulted in a merge commit */
|
||||
isMerge?: boolean;
|
||||
/** Whether push divergence was auto-resolved */
|
||||
autoResolved?: boolean;
|
||||
message?: string;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Main Service Function
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Perform a sync operation (pull then push) on the given worktree.
|
||||
*
|
||||
* The workflow:
|
||||
* 1. Pull from remote with stashIfNeeded: true
|
||||
* 2. If pull has conflicts, stop and return conflict info
|
||||
* 3. Push to remote
|
||||
* 4. If push fails with divergence after pull, retry once
|
||||
*
|
||||
* @param worktreePath - Path to the git worktree
|
||||
* @param options - Sync options (remote)
|
||||
* @returns SyncResult with detailed status information
|
||||
*/
|
||||
export async function performSync(
|
||||
worktreePath: string,
|
||||
options?: SyncOptions
|
||||
): Promise<SyncResult> {
|
||||
const targetRemote = options?.remote || 'origin';
|
||||
|
||||
// 1. Pull from remote
|
||||
logger.info('Sync: starting pull', { worktreePath, remote: targetRemote });
|
||||
|
||||
let pullResult: PullResult;
|
||||
try {
|
||||
pullResult = await performPull(worktreePath, {
|
||||
remote: targetRemote,
|
||||
stashIfNeeded: true,
|
||||
});
|
||||
} catch (pullError) {
|
||||
return {
|
||||
success: false,
|
||||
error: `Sync pull failed: ${getErrorMessage(pullError)}`,
|
||||
};
|
||||
}
|
||||
|
||||
if (!pullResult.success) {
|
||||
return {
|
||||
success: false,
|
||||
branch: pullResult.branch,
|
||||
pulled: false,
|
||||
pushed: false,
|
||||
error: `Sync pull failed: ${pullResult.error}`,
|
||||
hasConflicts: pullResult.hasConflicts,
|
||||
conflictFiles: pullResult.conflictFiles,
|
||||
conflictSource: pullResult.conflictSource,
|
||||
};
|
||||
}
|
||||
|
||||
// 2. If pull had conflicts, stop and return conflict info
|
||||
if (pullResult.hasConflicts) {
|
||||
return {
|
||||
success: false,
|
||||
branch: pullResult.branch,
|
||||
pulled: true,
|
||||
pushed: false,
|
||||
hasConflicts: true,
|
||||
conflictFiles: pullResult.conflictFiles,
|
||||
conflictSource: pullResult.conflictSource,
|
||||
isFastForward: pullResult.isFastForward,
|
||||
isMerge: pullResult.isMerge,
|
||||
error: 'Sync stopped: pull resulted in merge conflicts. Resolve conflicts and try again.',
|
||||
message: pullResult.message,
|
||||
};
|
||||
}
|
||||
|
||||
// 3. Push to remote
|
||||
logger.info('Sync: pull succeeded, starting push', { worktreePath, remote: targetRemote });
|
||||
|
||||
let pushResult: PushResult;
|
||||
try {
|
||||
pushResult = await performPush(worktreePath, {
|
||||
remote: targetRemote,
|
||||
});
|
||||
} catch (pushError) {
|
||||
return {
|
||||
success: false,
|
||||
branch: pullResult.branch,
|
||||
pulled: true,
|
||||
pushed: false,
|
||||
isFastForward: pullResult.isFastForward,
|
||||
isMerge: pullResult.isMerge,
|
||||
error: `Sync push failed: ${getErrorMessage(pushError)}`,
|
||||
};
|
||||
}
|
||||
|
||||
if (!pushResult.success) {
|
||||
// 4. If push diverged after pull, retry once with autoResolve
|
||||
if (pushResult.diverged) {
|
||||
logger.info('Sync: push diverged after pull, retrying with autoResolve', {
|
||||
worktreePath,
|
||||
remote: targetRemote,
|
||||
});
|
||||
|
||||
try {
|
||||
const retryResult = await performPush(worktreePath, {
|
||||
remote: targetRemote,
|
||||
autoResolve: true,
|
||||
});
|
||||
|
||||
if (retryResult.success) {
|
||||
return {
|
||||
success: true,
|
||||
branch: retryResult.branch,
|
||||
pulled: true,
|
||||
pushed: true,
|
||||
autoResolved: true,
|
||||
isFastForward: pullResult.isFastForward,
|
||||
isMerge: pullResult.isMerge,
|
||||
message: 'Sync completed (push required auto-resolve).',
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
success: false,
|
||||
branch: retryResult.branch,
|
||||
pulled: true,
|
||||
pushed: false,
|
||||
hasConflicts: retryResult.hasConflicts,
|
||||
conflictFiles: retryResult.conflictFiles,
|
||||
error: retryResult.error,
|
||||
};
|
||||
} catch (retryError) {
|
||||
return {
|
||||
success: false,
|
||||
branch: pullResult.branch,
|
||||
pulled: true,
|
||||
pushed: false,
|
||||
error: `Sync push retry failed: ${getErrorMessage(retryError)}`,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
success: false,
|
||||
branch: pushResult.branch,
|
||||
pulled: true,
|
||||
pushed: false,
|
||||
isFastForward: pullResult.isFastForward,
|
||||
isMerge: pullResult.isMerge,
|
||||
error: `Sync push failed: ${pushResult.error}`,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
branch: pushResult.branch,
|
||||
pulled: pullResult.pulled ?? true,
|
||||
pushed: true,
|
||||
isFastForward: pullResult.isFastForward,
|
||||
isMerge: pullResult.isMerge,
|
||||
message: pullResult.pulled
|
||||
? 'Sync completed: pulled latest changes and pushed.'
|
||||
: 'Sync completed: already up to date, pushed local commits.',
|
||||
};
|
||||
}
|
||||
@@ -9,7 +9,8 @@
|
||||
* For remote branches (e.g., "origin/feature"), automatically creates a
|
||||
* local tracking branch and checks it out.
|
||||
*
|
||||
* Also fetches the latest remote refs after switching.
|
||||
* Fetches the latest remote refs before switching to ensure remote branch
|
||||
* references are up-to-date for accurate detection and checkout.
|
||||
*
|
||||
* Extracted from the worktree switch-branch route to improve organization
|
||||
* and testability. Follows the same pattern as pull-service.ts and
|
||||
@@ -57,7 +58,8 @@ const FETCH_TIMEOUT_MS = 30_000;
|
||||
* slow or unresponsive remote does not block the branch-switch flow
|
||||
* indefinitely. Timeout errors are logged and treated as non-fatal
|
||||
* (the same as network-unavailable errors) so the rest of the workflow
|
||||
* continues normally.
|
||||
* continues normally. This is called before the branch switch to
|
||||
* ensure remote refs are up-to-date for branch detection and checkout.
|
||||
*/
|
||||
async function fetchRemotes(cwd: string): Promise<void> {
|
||||
const controller = new AbortController();
|
||||
@@ -66,15 +68,15 @@ async function fetchRemotes(cwd: string): Promise<void> {
|
||||
try {
|
||||
await execGitCommand(['fetch', '--all', '--quiet'], cwd, undefined, controller);
|
||||
} catch (error) {
|
||||
if (error instanceof Error && error.message === 'Process aborted') {
|
||||
if (controller.signal.aborted) {
|
||||
// Fetch timed out - log and continue; callers should not be blocked by a slow remote
|
||||
logger.warn(
|
||||
`fetchRemotes timed out after ${FETCH_TIMEOUT_MS}ms - continuing without latest remote refs`
|
||||
);
|
||||
} else {
|
||||
logger.warn(`fetchRemotes failed: ${getErrorMessage(error)} - continuing with local refs`);
|
||||
}
|
||||
// Ignore all fetch errors (timeout or otherwise) - we may be offline or the
|
||||
// remote may be temporarily unavailable. The branch switch itself has
|
||||
// already succeeded at this point.
|
||||
// Non-fatal: continue with locally available refs regardless of failure type
|
||||
} finally {
|
||||
clearTimeout(timerId);
|
||||
}
|
||||
@@ -126,13 +128,13 @@ async function isRemoteBranch(cwd: string, branchName: string): Promise<boolean>
|
||||
* Perform a full branch switch workflow on the given worktree.
|
||||
*
|
||||
* The workflow:
|
||||
* 1. Get current branch name
|
||||
* 2. Detect remote vs local branch and determine target
|
||||
* 3. Return early if already on target branch
|
||||
* 4. Validate branch existence
|
||||
* 5. Stash local changes if any
|
||||
* 6. Checkout the target branch
|
||||
* 7. Fetch latest from remotes
|
||||
* 1. Fetch latest from all remotes (ensures remote refs are up-to-date)
|
||||
* 2. Get current branch name
|
||||
* 3. Detect remote vs local branch and determine target
|
||||
* 4. Return early if already on target branch
|
||||
* 5. Validate branch existence
|
||||
* 6. Stash local changes if any
|
||||
* 7. Checkout the target branch
|
||||
* 8. Reapply stashed changes (detect conflicts)
|
||||
* 9. Handle error recovery (restore stash if checkout fails)
|
||||
*
|
||||
@@ -149,14 +151,20 @@ export async function performSwitchBranch(
|
||||
// Emit start event
|
||||
events?.emit('switch:start', { worktreePath, branchName });
|
||||
|
||||
// 1. Get current branch
|
||||
// 1. Fetch latest from all remotes before switching
|
||||
// This ensures remote branch refs are up-to-date so that isRemoteBranch()
|
||||
// can detect newly created remote branches and local tracking branches
|
||||
// are aware of upstream changes.
|
||||
await fetchRemotes(worktreePath);
|
||||
|
||||
// 2. Get current branch
|
||||
const currentBranchOutput = await execGitCommand(
|
||||
['rev-parse', '--abbrev-ref', 'HEAD'],
|
||||
worktreePath
|
||||
);
|
||||
const previousBranch = currentBranchOutput.trim();
|
||||
|
||||
// 2. Determine the actual target branch name for checkout
|
||||
// 3. Determine the actual target branch name for checkout
|
||||
let targetBranch = branchName;
|
||||
let isRemote = false;
|
||||
|
||||
@@ -180,7 +188,7 @@ export async function performSwitchBranch(
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Return early if already on the target branch
|
||||
// 4. Return early if already on the target branch
|
||||
if (previousBranch === targetBranch) {
|
||||
events?.emit('switch:done', {
|
||||
worktreePath,
|
||||
@@ -198,7 +206,7 @@ export async function performSwitchBranch(
|
||||
};
|
||||
}
|
||||
|
||||
// 4. Check if target branch exists as a local branch
|
||||
// 5. Check if target branch exists as a local branch
|
||||
if (!isRemote) {
|
||||
if (!(await localBranchExists(worktreePath, branchName))) {
|
||||
events?.emit('switch:error', {
|
||||
@@ -213,7 +221,7 @@ export async function performSwitchBranch(
|
||||
}
|
||||
}
|
||||
|
||||
// 5. Stash local changes if any exist
|
||||
// 6. Stash local changes if any exist
|
||||
const hadChanges = await hasAnyChanges(worktreePath, { excludeWorktreePaths: true });
|
||||
let didStash = false;
|
||||
|
||||
@@ -242,7 +250,7 @@ export async function performSwitchBranch(
|
||||
}
|
||||
|
||||
try {
|
||||
// 6. Switch to the target branch
|
||||
// 7. Switch to the target branch
|
||||
events?.emit('switch:checkout', {
|
||||
worktreePath,
|
||||
targetBranch,
|
||||
@@ -265,9 +273,6 @@ export async function performSwitchBranch(
|
||||
await execGitCommand(['checkout', targetBranch], worktreePath);
|
||||
}
|
||||
|
||||
// 7. Fetch latest from remotes after switching
|
||||
await fetchRemotes(worktreePath);
|
||||
|
||||
// 8. Reapply stashed changes if we stashed earlier
|
||||
let hasConflicts = false;
|
||||
let conflictMessage = '';
|
||||
@@ -347,7 +352,7 @@ export async function performSwitchBranch(
|
||||
};
|
||||
}
|
||||
} catch (checkoutError) {
|
||||
// 9. If checkout failed and we stashed, try to restore the stash
|
||||
// 9. Error recovery: if checkout failed and we stashed, try to restore the stash
|
||||
if (didStash) {
|
||||
const popResult = await popStash(worktreePath);
|
||||
if (popResult.hasConflicts) {
|
||||
|
||||
@@ -8,9 +8,60 @@
|
||||
|
||||
import path from 'path';
|
||||
import fs from 'fs/promises';
|
||||
import { execGitCommand } from '@automaker/git-utils';
|
||||
import type { EventEmitter } from '../lib/events.js';
|
||||
import type { SettingsService } from './settings-service.js';
|
||||
|
||||
/**
|
||||
* Get the list of remote names that have a branch matching the given branch name.
|
||||
*
|
||||
* Uses `git for-each-ref` to check cached remote refs, returning the names of
|
||||
* any remotes that already have a branch with the same name as `currentBranch`.
|
||||
* Returns an empty array when `hasAnyRemotes` is false or when no matching
|
||||
* remote refs are found.
|
||||
*
|
||||
* This helps the UI distinguish between "branch exists on the tracking remote"
|
||||
* vs "branch was pushed to a different remote".
|
||||
*
|
||||
* @param worktreePath - Path to the git worktree
|
||||
* @param currentBranch - Branch name to search for on remotes
|
||||
* @param hasAnyRemotes - Whether the repository has any remotes configured
|
||||
* @returns Array of remote names (e.g. ["origin", "upstream"]) that contain the branch
|
||||
*/
|
||||
export async function getRemotesWithBranch(
|
||||
worktreePath: string,
|
||||
currentBranch: string,
|
||||
hasAnyRemotes: boolean
|
||||
): Promise<string[]> {
|
||||
if (!hasAnyRemotes) {
|
||||
return [];
|
||||
}
|
||||
|
||||
try {
|
||||
const remoteRefsOutput = await execGitCommand(
|
||||
['for-each-ref', '--format=%(refname:short)', `refs/remotes/*/${currentBranch}`],
|
||||
worktreePath
|
||||
);
|
||||
|
||||
if (!remoteRefsOutput.trim()) {
|
||||
return [];
|
||||
}
|
||||
|
||||
return remoteRefsOutput
|
||||
.trim()
|
||||
.split('\n')
|
||||
.map((ref) => {
|
||||
// Extract remote name from "remote/branch" format
|
||||
const slashIdx = ref.indexOf('/');
|
||||
return slashIdx !== -1 ? ref.slice(0, slashIdx) : ref;
|
||||
})
|
||||
.filter((name) => name.length > 0);
|
||||
} catch {
|
||||
// Ignore errors - return empty array
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Error thrown when one or more file copy operations fail during
|
||||
* `copyConfiguredFiles`. The caller can inspect `failures` for details.
|
||||
|
||||
@@ -23,6 +23,7 @@ export type {
|
||||
PhaseModelConfig,
|
||||
PhaseModelKey,
|
||||
PhaseModelEntry,
|
||||
FeatureTemplate,
|
||||
// Claude-compatible provider types
|
||||
ApiKeySource,
|
||||
ClaudeCompatibleProviderType,
|
||||
@@ -41,6 +42,7 @@ export {
|
||||
DEFAULT_CREDENTIALS,
|
||||
DEFAULT_PROJECT_SETTINGS,
|
||||
DEFAULT_PHASE_MODELS,
|
||||
DEFAULT_FEATURE_TEMPLATES,
|
||||
SETTINGS_VERSION,
|
||||
CREDENTIALS_VERSION,
|
||||
PROJECT_SETTINGS_VERSION,
|
||||
|
||||
@@ -168,7 +168,7 @@ describe('enhancement-prompts.ts', () => {
|
||||
const prompt = buildUserPrompt('improve', testText);
|
||||
expect(prompt).toContain('Example 1:');
|
||||
expect(prompt).toContain(testText);
|
||||
expect(prompt).toContain('Now, please enhance the following task description:');
|
||||
expect(prompt).toContain('Please enhance the following task description:');
|
||||
});
|
||||
|
||||
it('should build prompt without examples when includeExamples is false', () => {
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
import { describe, it, expect } from 'vitest';
|
||||
import { normalizeThinkingLevelForModel } from '@automaker/types';
|
||||
|
||||
describe('normalizeThinkingLevelForModel', () => {
|
||||
it('preserves explicitly selected none for Opus models', () => {
|
||||
expect(normalizeThinkingLevelForModel('claude-opus', 'none')).toBe('none');
|
||||
});
|
||||
|
||||
it('falls back to none when Opus receives an unsupported manual thinking level', () => {
|
||||
expect(normalizeThinkingLevelForModel('claude-opus', 'medium')).toBe('none');
|
||||
});
|
||||
|
||||
it('keeps adaptive for Opus when adaptive is selected', () => {
|
||||
expect(normalizeThinkingLevelForModel('claude-opus', 'adaptive')).toBe('adaptive');
|
||||
});
|
||||
|
||||
it('preserves supported manual levels for non-Opus models', () => {
|
||||
expect(normalizeThinkingLevelForModel('claude-sonnet', 'high')).toBe('high');
|
||||
});
|
||||
});
|
||||
@@ -198,7 +198,7 @@ describe('claude-provider.ts', () => {
|
||||
expect(typeof callArgs.prompt).not.toBe('string');
|
||||
});
|
||||
|
||||
it('should use maxTurns default of 100', async () => {
|
||||
it('should use maxTurns default of 1000', async () => {
|
||||
vi.mocked(sdk.query).mockReturnValue(
|
||||
(async function* () {
|
||||
yield { type: 'text', text: 'test' };
|
||||
@@ -216,7 +216,7 @@ describe('claude-provider.ts', () => {
|
||||
expect(sdk.query).toHaveBeenCalledWith({
|
||||
prompt: 'Test',
|
||||
options: expect.objectContaining({
|
||||
maxTurns: 100,
|
||||
maxTurns: 1000,
|
||||
}),
|
||||
});
|
||||
});
|
||||
|
||||
@@ -170,6 +170,30 @@ describe('codex-provider.ts', () => {
|
||||
expect(call.args).toContain('--json');
|
||||
});
|
||||
|
||||
it('uses exec resume when sdkSessionId is provided', async () => {
|
||||
vi.mocked(spawnJSONLProcess).mockReturnValue((async function* () {})());
|
||||
|
||||
await collectAsyncGenerator(
|
||||
provider.executeQuery({
|
||||
prompt: 'Continue',
|
||||
model: 'gpt-5.2',
|
||||
cwd: '/tmp',
|
||||
sdkSessionId: 'codex-session-123',
|
||||
outputFormat: { type: 'json_schema', schema: { type: 'object', properties: {} } },
|
||||
codexSettings: { additionalDirs: ['/extra/dir'] },
|
||||
})
|
||||
);
|
||||
|
||||
const call = vi.mocked(spawnJSONLProcess).mock.calls[0][0];
|
||||
expect(call.args[0]).toBe('exec');
|
||||
expect(call.args[1]).toBe('resume');
|
||||
expect(call.args).toContain('codex-session-123');
|
||||
expect(call.args).toContain('--json');
|
||||
// Resume queries must not include --output-schema or --add-dir
|
||||
expect(call.args).not.toContain('--output-schema');
|
||||
expect(call.args).not.toContain('--add-dir');
|
||||
});
|
||||
|
||||
it('overrides approval policy when MCP auto-approval is enabled', async () => {
|
||||
// Note: With full-permissions always on (--dangerously-bypass-approvals-and-sandbox),
|
||||
// approval policy is bypassed, not configured via --config
|
||||
@@ -320,8 +344,10 @@ describe('codex-provider.ts', () => {
|
||||
);
|
||||
|
||||
const call = vi.mocked(spawnJSONLProcess).mock.calls[0][0];
|
||||
// High reasoning effort should have 3x the default timeout (90000ms)
|
||||
expect(call.timeout).toBe(DEFAULT_TIMEOUT_MS * REASONING_TIMEOUT_MULTIPLIERS.high);
|
||||
// High reasoning effort should have 3x the CLI base timeout (120000ms)
|
||||
// CODEX_CLI_TIMEOUT_MS = 120000, multiplier for 'high' = 3.0 → 360000ms
|
||||
const CODEX_CLI_TIMEOUT_MS = 120000;
|
||||
expect(call.timeout).toBe(CODEX_CLI_TIMEOUT_MS * REASONING_TIMEOUT_MULTIPLIERS.high);
|
||||
});
|
||||
|
||||
it('passes extended timeout for xhigh reasoning effort', async () => {
|
||||
@@ -357,8 +383,10 @@ describe('codex-provider.ts', () => {
|
||||
);
|
||||
|
||||
const call = vi.mocked(spawnJSONLProcess).mock.calls[0][0];
|
||||
// No reasoning effort should use the default timeout
|
||||
expect(call.timeout).toBe(DEFAULT_TIMEOUT_MS);
|
||||
// No reasoning effort should use the CLI base timeout (2 minutes)
|
||||
// CODEX_CLI_TIMEOUT_MS = 120000ms, no multiplier applied
|
||||
const CODEX_CLI_TIMEOUT_MS = 120000;
|
||||
expect(call.timeout).toBe(CODEX_CLI_TIMEOUT_MS);
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -1,17 +1,35 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { CopilotProvider, CopilotErrorCode } from '@/providers/copilot-provider.js';
|
||||
import { collectAsyncGenerator } from '../../utils/helpers.js';
|
||||
import { CopilotClient } from '@github/copilot-sdk';
|
||||
|
||||
const createSessionMock = vi.fn();
|
||||
const resumeSessionMock = vi.fn();
|
||||
|
||||
function createMockSession(sessionId = 'test-session') {
|
||||
let eventHandler: ((event: any) => void) | null = null;
|
||||
return {
|
||||
sessionId,
|
||||
send: vi.fn().mockImplementation(async () => {
|
||||
if (eventHandler) {
|
||||
eventHandler({ type: 'assistant.message', data: { content: 'hello' } });
|
||||
eventHandler({ type: 'session.idle' });
|
||||
}
|
||||
}),
|
||||
destroy: vi.fn().mockResolvedValue(undefined),
|
||||
on: vi.fn().mockImplementation((handler: (event: any) => void) => {
|
||||
eventHandler = handler;
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
||||
// Mock the Copilot SDK
|
||||
vi.mock('@github/copilot-sdk', () => ({
|
||||
CopilotClient: vi.fn().mockImplementation(() => ({
|
||||
start: vi.fn().mockResolvedValue(undefined),
|
||||
stop: vi.fn().mockResolvedValue(undefined),
|
||||
createSession: vi.fn().mockResolvedValue({
|
||||
sessionId: 'test-session',
|
||||
send: vi.fn().mockResolvedValue(undefined),
|
||||
destroy: vi.fn().mockResolvedValue(undefined),
|
||||
on: vi.fn(),
|
||||
}),
|
||||
createSession: createSessionMock,
|
||||
resumeSession: resumeSessionMock,
|
||||
})),
|
||||
}));
|
||||
|
||||
@@ -49,6 +67,16 @@ describe('copilot-provider.ts', () => {
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
vi.mocked(CopilotClient).mockImplementation(function () {
|
||||
return {
|
||||
start: vi.fn().mockResolvedValue(undefined),
|
||||
stop: vi.fn().mockResolvedValue(undefined),
|
||||
createSession: createSessionMock,
|
||||
resumeSession: resumeSessionMock,
|
||||
} as any;
|
||||
});
|
||||
createSessionMock.mockResolvedValue(createMockSession());
|
||||
resumeSessionMock.mockResolvedValue(createMockSession('resumed-session'));
|
||||
|
||||
// Mock fs.existsSync for CLI path validation
|
||||
vi.mocked(fs.existsSync).mockReturnValue(true);
|
||||
@@ -369,6 +397,45 @@ describe('copilot-provider.ts', () => {
|
||||
});
|
||||
});
|
||||
|
||||
it('should use error code in fallback when session.error message is empty', () => {
|
||||
const event = {
|
||||
type: 'session.error',
|
||||
data: { message: '', code: 'RATE_LIMIT_EXCEEDED' },
|
||||
};
|
||||
|
||||
const result = provider.normalizeEvent(event);
|
||||
expect(result).not.toBeNull();
|
||||
expect(result!.type).toBe('error');
|
||||
expect(result!.error).toContain('RATE_LIMIT_EXCEEDED');
|
||||
expect(result!.error).not.toBe('Unknown error');
|
||||
});
|
||||
|
||||
it('should return generic "Copilot agent error" fallback when both message and code are empty', () => {
|
||||
const event = {
|
||||
type: 'session.error',
|
||||
data: { message: '', code: '' },
|
||||
};
|
||||
|
||||
const result = provider.normalizeEvent(event);
|
||||
expect(result).not.toBeNull();
|
||||
expect(result!.type).toBe('error');
|
||||
expect(result!.error).toBe('Copilot agent error');
|
||||
// Must NOT be the old opaque 'Unknown error'
|
||||
expect(result!.error).not.toBe('Unknown error');
|
||||
});
|
||||
|
||||
it('should return generic "Copilot agent error" fallback when data has no code field', () => {
|
||||
const event = {
|
||||
type: 'session.error',
|
||||
data: { message: '' },
|
||||
};
|
||||
|
||||
const result = provider.normalizeEvent(event);
|
||||
expect(result).not.toBeNull();
|
||||
expect(result!.type).toBe('error');
|
||||
expect(result!.error).toBe('Copilot agent error');
|
||||
});
|
||||
|
||||
it('should return null for unknown event types', () => {
|
||||
const event = { type: 'unknown.event' };
|
||||
|
||||
@@ -514,4 +581,45 @@ describe('copilot-provider.ts', () => {
|
||||
expect(todoInput.todos[0].status).toBe('completed');
|
||||
});
|
||||
});
|
||||
|
||||
describe('executeQuery resume behavior', () => {
|
||||
it('uses resumeSession when sdkSessionId is provided', async () => {
|
||||
const results = await collectAsyncGenerator(
|
||||
provider.executeQuery({
|
||||
prompt: 'Hello',
|
||||
model: 'claude-sonnet-4.6',
|
||||
cwd: '/tmp/project',
|
||||
sdkSessionId: 'session-123',
|
||||
})
|
||||
);
|
||||
|
||||
expect(resumeSessionMock).toHaveBeenCalledWith(
|
||||
'session-123',
|
||||
expect.objectContaining({ model: 'claude-sonnet-4.6', streaming: true })
|
||||
);
|
||||
expect(createSessionMock).not.toHaveBeenCalled();
|
||||
expect(results.some((msg) => msg.session_id === 'resumed-session')).toBe(true);
|
||||
});
|
||||
|
||||
it('falls back to createSession when resumeSession fails', async () => {
|
||||
resumeSessionMock.mockRejectedValueOnce(new Error('session not found'));
|
||||
createSessionMock.mockResolvedValueOnce(createMockSession('fresh-session'));
|
||||
|
||||
const results = await collectAsyncGenerator(
|
||||
provider.executeQuery({
|
||||
prompt: 'Hello',
|
||||
model: 'claude-sonnet-4.6',
|
||||
cwd: '/tmp/project',
|
||||
sdkSessionId: 'stale-session',
|
||||
})
|
||||
);
|
||||
|
||||
expect(resumeSessionMock).toHaveBeenCalledWith(
|
||||
'stale-session',
|
||||
expect.objectContaining({ model: 'claude-sonnet-4.6', streaming: true })
|
||||
);
|
||||
expect(createSessionMock).toHaveBeenCalledTimes(1);
|
||||
expect(results.some((msg) => msg.session_id === 'fresh-session')).toBe(true);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
157
apps/server/tests/unit/providers/cursor-provider.test.ts
Normal file
157
apps/server/tests/unit/providers/cursor-provider.test.ts
Normal file
@@ -0,0 +1,157 @@
|
||||
import { describe, it, expect, beforeEach } from 'vitest';
|
||||
import { CursorProvider } from '@/providers/cursor-provider.js';
|
||||
|
||||
describe('cursor-provider.ts', () => {
|
||||
describe('buildCliArgs', () => {
|
||||
it('adds --resume when sdkSessionId is provided', () => {
|
||||
const provider = Object.create(CursorProvider.prototype) as CursorProvider & {
|
||||
cliPath?: string;
|
||||
};
|
||||
provider.cliPath = '/usr/local/bin/cursor-agent';
|
||||
|
||||
const args = provider.buildCliArgs({
|
||||
prompt: 'Continue the task',
|
||||
model: 'gpt-5',
|
||||
cwd: '/tmp/project',
|
||||
sdkSessionId: 'cursor-session-123',
|
||||
});
|
||||
|
||||
const resumeIndex = args.indexOf('--resume');
|
||||
expect(resumeIndex).toBeGreaterThan(-1);
|
||||
expect(args[resumeIndex + 1]).toBe('cursor-session-123');
|
||||
});
|
||||
|
||||
it('does not add --resume when sdkSessionId is omitted', () => {
|
||||
const provider = Object.create(CursorProvider.prototype) as CursorProvider & {
|
||||
cliPath?: string;
|
||||
};
|
||||
provider.cliPath = '/usr/local/bin/cursor-agent';
|
||||
|
||||
const args = provider.buildCliArgs({
|
||||
prompt: 'Start a new task',
|
||||
model: 'gpt-5',
|
||||
cwd: '/tmp/project',
|
||||
});
|
||||
|
||||
expect(args).not.toContain('--resume');
|
||||
});
|
||||
});
|
||||
|
||||
describe('normalizeEvent - result error handling', () => {
|
||||
let provider: CursorProvider;
|
||||
|
||||
beforeEach(() => {
|
||||
provider = Object.create(CursorProvider.prototype) as CursorProvider;
|
||||
});
|
||||
|
||||
it('returns error message from resultEvent.error when is_error=true', () => {
|
||||
const event = {
|
||||
type: 'result',
|
||||
is_error: true,
|
||||
error: 'Rate limit exceeded',
|
||||
result: '',
|
||||
subtype: 'error',
|
||||
duration_ms: 3000,
|
||||
session_id: 'sess-123',
|
||||
};
|
||||
|
||||
const msg = provider.normalizeEvent(event);
|
||||
|
||||
expect(msg).not.toBeNull();
|
||||
expect(msg!.type).toBe('error');
|
||||
expect(msg!.error).toBe('Rate limit exceeded');
|
||||
});
|
||||
|
||||
it('falls back to resultEvent.result when error field is empty and is_error=true', () => {
|
||||
const event = {
|
||||
type: 'result',
|
||||
is_error: true,
|
||||
error: '',
|
||||
result: 'Process terminated unexpectedly',
|
||||
subtype: 'error',
|
||||
duration_ms: 5000,
|
||||
session_id: 'sess-456',
|
||||
};
|
||||
|
||||
const msg = provider.normalizeEvent(event);
|
||||
|
||||
expect(msg).not.toBeNull();
|
||||
expect(msg!.type).toBe('error');
|
||||
expect(msg!.error).toBe('Process terminated unexpectedly');
|
||||
});
|
||||
|
||||
it('builds diagnostic fallback when both error and result are empty and is_error=true', () => {
|
||||
const event = {
|
||||
type: 'result',
|
||||
is_error: true,
|
||||
error: '',
|
||||
result: '',
|
||||
subtype: 'error',
|
||||
duration_ms: 5000,
|
||||
session_id: 'sess-789',
|
||||
};
|
||||
|
||||
const msg = provider.normalizeEvent(event);
|
||||
|
||||
expect(msg).not.toBeNull();
|
||||
expect(msg!.type).toBe('error');
|
||||
// Should contain diagnostic info rather than 'Unknown error'
|
||||
expect(msg!.error).toContain('5000ms');
|
||||
expect(msg!.error).toContain('sess-789');
|
||||
expect(msg!.error).not.toBe('Unknown error');
|
||||
});
|
||||
|
||||
it('preserves session_id in error message', () => {
|
||||
const event = {
|
||||
type: 'result',
|
||||
is_error: true,
|
||||
error: 'Timeout occurred',
|
||||
result: '',
|
||||
subtype: 'error',
|
||||
duration_ms: 30000,
|
||||
session_id: 'my-session-id',
|
||||
};
|
||||
|
||||
const msg = provider.normalizeEvent(event);
|
||||
|
||||
expect(msg!.session_id).toBe('my-session-id');
|
||||
});
|
||||
|
||||
it('uses "none" when session_id is missing from diagnostic fallback', () => {
|
||||
const event = {
|
||||
type: 'result',
|
||||
is_error: true,
|
||||
error: '',
|
||||
result: '',
|
||||
subtype: 'error',
|
||||
duration_ms: 5000,
|
||||
// session_id intentionally omitted
|
||||
};
|
||||
|
||||
const msg = provider.normalizeEvent(event);
|
||||
|
||||
expect(msg).not.toBeNull();
|
||||
expect(msg!.type).toBe('error');
|
||||
expect(msg!.error).toContain('none');
|
||||
expect(msg!.error).not.toContain('undefined');
|
||||
});
|
||||
|
||||
it('returns success result when is_error=false', () => {
|
||||
const event = {
|
||||
type: 'result',
|
||||
is_error: false,
|
||||
error: '',
|
||||
result: 'Completed successfully',
|
||||
subtype: 'success',
|
||||
duration_ms: 2000,
|
||||
session_id: 'sess-ok',
|
||||
};
|
||||
|
||||
const msg = provider.normalizeEvent(event);
|
||||
|
||||
expect(msg).not.toBeNull();
|
||||
expect(msg!.type).toBe('result');
|
||||
expect(msg!.subtype).toBe('success');
|
||||
});
|
||||
});
|
||||
});
|
||||
256
apps/server/tests/unit/providers/gemini-provider.test.ts
Normal file
256
apps/server/tests/unit/providers/gemini-provider.test.ts
Normal file
@@ -0,0 +1,256 @@
|
||||
import { describe, it, expect, beforeEach } from 'vitest';
|
||||
import { GeminiProvider } from '@/providers/gemini-provider.js';
|
||||
import type { ProviderMessage } from '@automaker/types';
|
||||
|
||||
describe('gemini-provider.ts', () => {
|
||||
let provider: GeminiProvider;
|
||||
|
||||
beforeEach(() => {
|
||||
provider = new GeminiProvider();
|
||||
});
|
||||
|
||||
describe('buildCliArgs', () => {
|
||||
it('should include --prompt with empty string to force headless mode', () => {
|
||||
const args = provider.buildCliArgs({
|
||||
prompt: 'Hello from Gemini',
|
||||
model: '2.5-flash',
|
||||
cwd: '/tmp/project',
|
||||
});
|
||||
|
||||
const promptIndex = args.indexOf('--prompt');
|
||||
expect(promptIndex).toBeGreaterThan(-1);
|
||||
expect(args[promptIndex + 1]).toBe('');
|
||||
});
|
||||
|
||||
it('should include --resume when sdkSessionId is provided', () => {
|
||||
const args = provider.buildCliArgs({
|
||||
prompt: 'Hello',
|
||||
model: '2.5-flash',
|
||||
cwd: '/tmp/project',
|
||||
sdkSessionId: 'gemini-session-123',
|
||||
});
|
||||
|
||||
const resumeIndex = args.indexOf('--resume');
|
||||
expect(resumeIndex).toBeGreaterThan(-1);
|
||||
expect(args[resumeIndex + 1]).toBe('gemini-session-123');
|
||||
});
|
||||
|
||||
it('should not include --resume when sdkSessionId is missing', () => {
|
||||
const args = provider.buildCliArgs({
|
||||
prompt: 'Hello',
|
||||
model: '2.5-flash',
|
||||
cwd: '/tmp/project',
|
||||
});
|
||||
|
||||
expect(args).not.toContain('--resume');
|
||||
});
|
||||
|
||||
it('should include --sandbox false for faster execution', () => {
|
||||
const args = provider.buildCliArgs({
|
||||
prompt: 'Hello',
|
||||
model: '2.5-flash',
|
||||
cwd: '/tmp/project',
|
||||
});
|
||||
|
||||
const sandboxIndex = args.indexOf('--sandbox');
|
||||
expect(sandboxIndex).toBeGreaterThan(-1);
|
||||
expect(args[sandboxIndex + 1]).toBe('false');
|
||||
});
|
||||
|
||||
it('should include --approval-mode yolo for non-interactive use', () => {
|
||||
const args = provider.buildCliArgs({
|
||||
prompt: 'Hello',
|
||||
model: '2.5-flash',
|
||||
cwd: '/tmp/project',
|
||||
});
|
||||
|
||||
const approvalIndex = args.indexOf('--approval-mode');
|
||||
expect(approvalIndex).toBeGreaterThan(-1);
|
||||
expect(args[approvalIndex + 1]).toBe('yolo');
|
||||
});
|
||||
|
||||
it('should include --output-format stream-json', () => {
|
||||
const args = provider.buildCliArgs({
|
||||
prompt: 'Hello',
|
||||
model: '2.5-flash',
|
||||
cwd: '/tmp/project',
|
||||
});
|
||||
|
||||
const formatIndex = args.indexOf('--output-format');
|
||||
expect(formatIndex).toBeGreaterThan(-1);
|
||||
expect(args[formatIndex + 1]).toBe('stream-json');
|
||||
});
|
||||
|
||||
it('should include --include-directories with cwd', () => {
|
||||
const args = provider.buildCliArgs({
|
||||
prompt: 'Hello',
|
||||
model: '2.5-flash',
|
||||
cwd: '/tmp/my-project',
|
||||
});
|
||||
|
||||
const dirIndex = args.indexOf('--include-directories');
|
||||
expect(dirIndex).toBeGreaterThan(-1);
|
||||
expect(args[dirIndex + 1]).toBe('/tmp/my-project');
|
||||
});
|
||||
|
||||
it('should add gemini- prefix to bare model names', () => {
|
||||
const args = provider.buildCliArgs({
|
||||
prompt: 'Hello',
|
||||
model: '2.5-flash',
|
||||
cwd: '/tmp/project',
|
||||
});
|
||||
|
||||
const modelIndex = args.indexOf('--model');
|
||||
expect(modelIndex).toBeGreaterThan(-1);
|
||||
expect(args[modelIndex + 1]).toBe('gemini-2.5-flash');
|
||||
});
|
||||
|
||||
it('should not double-prefix model names that already have gemini-', () => {
|
||||
const args = provider.buildCliArgs({
|
||||
prompt: 'Hello',
|
||||
model: 'gemini-2.5-pro',
|
||||
cwd: '/tmp/project',
|
||||
});
|
||||
|
||||
const modelIndex = args.indexOf('--model');
|
||||
expect(modelIndex).toBeGreaterThan(-1);
|
||||
expect(args[modelIndex + 1]).toBe('gemini-2.5-pro');
|
||||
});
|
||||
});
|
||||
|
||||
describe('normalizeEvent - error handling', () => {
|
||||
it('returns error from result event when status=error and error field is set', () => {
|
||||
const event = {
|
||||
type: 'result',
|
||||
status: 'error',
|
||||
error: 'Model overloaded',
|
||||
session_id: 'sess-gemini-1',
|
||||
stats: { duration_ms: 4000, total_tokens: 0 },
|
||||
};
|
||||
|
||||
const msg = provider.normalizeEvent(event) as ProviderMessage;
|
||||
|
||||
expect(msg).not.toBeNull();
|
||||
expect(msg.type).toBe('error');
|
||||
expect(msg.error).toBe('Model overloaded');
|
||||
expect(msg.session_id).toBe('sess-gemini-1');
|
||||
});
|
||||
|
||||
it('builds diagnostic fallback when result event has status=error but empty error field', () => {
|
||||
const event = {
|
||||
type: 'result',
|
||||
status: 'error',
|
||||
error: '',
|
||||
session_id: 'sess-gemini-2',
|
||||
stats: { duration_ms: 7500, total_tokens: 0 },
|
||||
};
|
||||
|
||||
const msg = provider.normalizeEvent(event) as ProviderMessage;
|
||||
|
||||
expect(msg).not.toBeNull();
|
||||
expect(msg.type).toBe('error');
|
||||
// Diagnostic info should be present instead of 'Unknown error'
|
||||
expect(msg.error).toContain('7500ms');
|
||||
expect(msg.error).toContain('sess-gemini-2');
|
||||
expect(msg.error).not.toBe('Unknown error');
|
||||
});
|
||||
|
||||
it('builds fallback with "unknown" duration when stats are missing', () => {
|
||||
const event = {
|
||||
type: 'result',
|
||||
status: 'error',
|
||||
error: '',
|
||||
session_id: 'sess-gemini-nostats',
|
||||
// no stats field
|
||||
};
|
||||
|
||||
const msg = provider.normalizeEvent(event) as ProviderMessage;
|
||||
|
||||
expect(msg).not.toBeNull();
|
||||
expect(msg.type).toBe('error');
|
||||
expect(msg.error).toContain('unknown');
|
||||
});
|
||||
|
||||
it('returns error from standalone error event with error field set', () => {
|
||||
const event = {
|
||||
type: 'error',
|
||||
error: 'API key invalid',
|
||||
session_id: 'sess-gemini-3',
|
||||
};
|
||||
|
||||
const msg = provider.normalizeEvent(event) as ProviderMessage;
|
||||
|
||||
expect(msg).not.toBeNull();
|
||||
expect(msg.type).toBe('error');
|
||||
expect(msg.error).toBe('API key invalid');
|
||||
});
|
||||
|
||||
it('builds diagnostic fallback when standalone error event has empty error field', () => {
|
||||
const event = {
|
||||
type: 'error',
|
||||
error: '',
|
||||
session_id: 'sess-gemini-empty',
|
||||
};
|
||||
|
||||
const msg = provider.normalizeEvent(event) as ProviderMessage;
|
||||
|
||||
expect(msg).not.toBeNull();
|
||||
expect(msg.type).toBe('error');
|
||||
// Should include session_id, not just 'Unknown error'
|
||||
expect(msg.error).toContain('sess-gemini-empty');
|
||||
expect(msg.error).not.toBe('Unknown error');
|
||||
});
|
||||
|
||||
it('builds fallback mentioning "none" when session_id is missing from error event', () => {
|
||||
const event = {
|
||||
type: 'error',
|
||||
error: '',
|
||||
// no session_id
|
||||
};
|
||||
|
||||
const msg = provider.normalizeEvent(event) as ProviderMessage;
|
||||
|
||||
expect(msg).not.toBeNull();
|
||||
expect(msg.type).toBe('error');
|
||||
expect(msg.error).toContain('none');
|
||||
});
|
||||
|
||||
it('uses consistent "Gemini agent failed" label for both result and error event fallbacks', () => {
|
||||
const resultEvent = {
|
||||
type: 'result',
|
||||
status: 'error',
|
||||
error: '',
|
||||
session_id: 'sess-r',
|
||||
stats: { duration_ms: 1000 },
|
||||
};
|
||||
const errorEvent = {
|
||||
type: 'error',
|
||||
error: '',
|
||||
session_id: 'sess-e',
|
||||
};
|
||||
|
||||
const resultMsg = provider.normalizeEvent(resultEvent) as ProviderMessage;
|
||||
const errorMsg = provider.normalizeEvent(errorEvent) as ProviderMessage;
|
||||
|
||||
// Both fallback messages should use the same "Gemini agent failed" prefix
|
||||
expect(resultMsg.error).toContain('Gemini agent failed');
|
||||
expect(errorMsg.error).toContain('Gemini agent failed');
|
||||
});
|
||||
|
||||
it('returns success result when result event has status=success', () => {
|
||||
const event = {
|
||||
type: 'result',
|
||||
status: 'success',
|
||||
error: '',
|
||||
session_id: 'sess-gemini-ok',
|
||||
stats: { duration_ms: 1200, total_tokens: 500 },
|
||||
};
|
||||
|
||||
const msg = provider.normalizeEvent(event) as ProviderMessage;
|
||||
|
||||
expect(msg).not.toBeNull();
|
||||
expect(msg.type).toBe('result');
|
||||
expect(msg.subtype).toBe('success');
|
||||
});
|
||||
});
|
||||
});
|
||||
218
apps/server/tests/unit/routes/backlog-plan/generate-plan.test.ts
Normal file
218
apps/server/tests/unit/routes/backlog-plan/generate-plan.test.ts
Normal file
@@ -0,0 +1,218 @@
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import type { BacklogPlanResult, ProviderMessage } from '@automaker/types';
|
||||
|
||||
const {
|
||||
mockGetAll,
|
||||
mockExecuteQuery,
|
||||
mockSaveBacklogPlan,
|
||||
mockSetRunningState,
|
||||
mockSetRunningDetails,
|
||||
mockGetPromptCustomization,
|
||||
mockGetAutoLoadClaudeMdSetting,
|
||||
mockGetUseClaudeCodeSystemPromptSetting,
|
||||
} = vi.hoisted(() => ({
|
||||
mockGetAll: vi.fn(),
|
||||
mockExecuteQuery: vi.fn(),
|
||||
mockSaveBacklogPlan: vi.fn(),
|
||||
mockSetRunningState: vi.fn(),
|
||||
mockSetRunningDetails: vi.fn(),
|
||||
mockGetPromptCustomization: vi.fn(),
|
||||
mockGetAutoLoadClaudeMdSetting: vi.fn(),
|
||||
mockGetUseClaudeCodeSystemPromptSetting: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock('@/services/feature-loader.js', () => ({
|
||||
FeatureLoader: class {
|
||||
getAll = mockGetAll;
|
||||
},
|
||||
}));
|
||||
|
||||
vi.mock('@/providers/provider-factory.js', () => ({
|
||||
ProviderFactory: {
|
||||
getProviderForModel: vi.fn(() => ({
|
||||
executeQuery: mockExecuteQuery,
|
||||
})),
|
||||
},
|
||||
}));
|
||||
|
||||
vi.mock('@/routes/backlog-plan/common.js', () => ({
|
||||
logger: {
|
||||
debug: vi.fn(),
|
||||
info: vi.fn(),
|
||||
warn: vi.fn(),
|
||||
error: vi.fn(),
|
||||
},
|
||||
setRunningState: mockSetRunningState,
|
||||
setRunningDetails: mockSetRunningDetails,
|
||||
getErrorMessage: (error: unknown) => (error instanceof Error ? error.message : String(error)),
|
||||
saveBacklogPlan: mockSaveBacklogPlan,
|
||||
}));
|
||||
|
||||
vi.mock('@/lib/settings-helpers.js', () => ({
|
||||
getPromptCustomization: mockGetPromptCustomization,
|
||||
getAutoLoadClaudeMdSetting: mockGetAutoLoadClaudeMdSetting,
|
||||
getUseClaudeCodeSystemPromptSetting: mockGetUseClaudeCodeSystemPromptSetting,
|
||||
getPhaseModelWithOverrides: vi.fn(),
|
||||
}));
|
||||
|
||||
import { generateBacklogPlan } from '@/routes/backlog-plan/generate-plan.js';
|
||||
|
||||
function createMockEvents() {
|
||||
return {
|
||||
emit: vi.fn(),
|
||||
};
|
||||
}
|
||||
|
||||
describe('generateBacklogPlan', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
mockGetAll.mockResolvedValue([]);
|
||||
mockGetPromptCustomization.mockResolvedValue({
|
||||
backlogPlan: {
|
||||
systemPrompt: 'System instructions',
|
||||
userPromptTemplate:
|
||||
'Current features:\n{{currentFeatures}}\n\nUser request:\n{{userRequest}}',
|
||||
},
|
||||
});
|
||||
mockGetAutoLoadClaudeMdSetting.mockResolvedValue(false);
|
||||
mockGetUseClaudeCodeSystemPromptSetting.mockResolvedValue(true);
|
||||
});
|
||||
|
||||
it('salvages valid streamed JSON when Claude process exits with code 1', async () => {
|
||||
const partialResult: BacklogPlanResult = {
|
||||
changes: [
|
||||
{
|
||||
type: 'add',
|
||||
feature: {
|
||||
title: 'Add signup form',
|
||||
description: 'Create signup UI and validation',
|
||||
category: 'frontend',
|
||||
},
|
||||
reason: 'Required for user onboarding',
|
||||
},
|
||||
],
|
||||
summary: 'Adds signup feature to the backlog',
|
||||
dependencyUpdates: [],
|
||||
};
|
||||
|
||||
const responseJson = JSON.stringify(partialResult);
|
||||
|
||||
async function* streamWithExitError(): AsyncGenerator<ProviderMessage> {
|
||||
yield {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: [{ type: 'text', text: responseJson }],
|
||||
},
|
||||
};
|
||||
throw new Error('Claude Code process exited with code 1');
|
||||
}
|
||||
|
||||
mockExecuteQuery.mockReturnValueOnce(streamWithExitError());
|
||||
|
||||
const events = createMockEvents();
|
||||
const abortController = new AbortController();
|
||||
|
||||
const result = await generateBacklogPlan(
|
||||
'/tmp/project',
|
||||
'Please add a signup feature',
|
||||
events as any,
|
||||
abortController,
|
||||
undefined,
|
||||
'claude-opus'
|
||||
);
|
||||
|
||||
expect(mockExecuteQuery).toHaveBeenCalledTimes(1);
|
||||
expect(result).toEqual(partialResult);
|
||||
expect(mockSaveBacklogPlan).toHaveBeenCalledWith(
|
||||
'/tmp/project',
|
||||
expect.objectContaining({
|
||||
prompt: 'Please add a signup feature',
|
||||
model: 'claude-opus-4-6',
|
||||
result: partialResult,
|
||||
})
|
||||
);
|
||||
expect(events.emit).toHaveBeenCalledWith('backlog-plan:event', {
|
||||
type: 'backlog_plan_complete',
|
||||
result: partialResult,
|
||||
});
|
||||
expect(mockSetRunningState).toHaveBeenCalledWith(false, null);
|
||||
expect(mockSetRunningDetails).toHaveBeenCalledWith(null);
|
||||
});
|
||||
|
||||
it('prefers parseable provider result over longer non-JSON accumulated text on exit', async () => {
|
||||
const recoveredResult: BacklogPlanResult = {
|
||||
changes: [
|
||||
{
|
||||
type: 'add',
|
||||
feature: {
|
||||
title: 'Add reset password flow',
|
||||
description: 'Implement reset password request and token validation UI',
|
||||
category: 'frontend',
|
||||
},
|
||||
reason: 'Supports account recovery',
|
||||
},
|
||||
],
|
||||
summary: 'Adds password reset capability',
|
||||
dependencyUpdates: [],
|
||||
};
|
||||
|
||||
const validProviderResult = JSON.stringify(recoveredResult);
|
||||
const invalidAccumulatedText = `${validProviderResult}\n\nAdditional commentary that breaks raw JSON parsing.`;
|
||||
|
||||
async function* streamWithResultThenExit(): AsyncGenerator<ProviderMessage> {
|
||||
yield {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: [{ type: 'text', text: invalidAccumulatedText }],
|
||||
},
|
||||
};
|
||||
yield {
|
||||
type: 'result',
|
||||
subtype: 'success',
|
||||
duration_ms: 10,
|
||||
duration_api_ms: 10,
|
||||
is_error: false,
|
||||
num_turns: 1,
|
||||
result: validProviderResult,
|
||||
session_id: 'session-1',
|
||||
total_cost_usd: 0,
|
||||
usage: {
|
||||
input_tokens: 10,
|
||||
cache_creation_input_tokens: 0,
|
||||
cache_read_input_tokens: 0,
|
||||
output_tokens: 10,
|
||||
server_tool_use: {
|
||||
web_search_requests: 0,
|
||||
},
|
||||
service_tier: 'standard',
|
||||
},
|
||||
};
|
||||
throw new Error('Claude Code process exited with code 1');
|
||||
}
|
||||
|
||||
mockExecuteQuery.mockReturnValueOnce(streamWithResultThenExit());
|
||||
|
||||
const events = createMockEvents();
|
||||
const abortController = new AbortController();
|
||||
|
||||
const result = await generateBacklogPlan(
|
||||
'/tmp/project',
|
||||
'Add password reset support',
|
||||
events as any,
|
||||
abortController,
|
||||
undefined,
|
||||
'claude-opus'
|
||||
);
|
||||
|
||||
expect(result).toEqual(recoveredResult);
|
||||
expect(mockSaveBacklogPlan).toHaveBeenCalledWith(
|
||||
'/tmp/project',
|
||||
expect.objectContaining({
|
||||
result: recoveredResult,
|
||||
})
|
||||
);
|
||||
});
|
||||
});
|
||||
@@ -685,6 +685,309 @@ describe('AgentExecutor', () => {
|
||||
await expect(executor.execute(options, callbacks)).rejects.toThrow('API rate limit exceeded');
|
||||
});
|
||||
|
||||
it('should throw "Unknown error" when provider stream yields error with empty message', async () => {
|
||||
const executor = new AgentExecutor(
|
||||
mockEventBus,
|
||||
mockFeatureStateManager,
|
||||
mockPlanApprovalService,
|
||||
mockSettingsService
|
||||
);
|
||||
|
||||
const mockProvider = {
|
||||
getName: () => 'mock',
|
||||
executeQuery: vi.fn().mockImplementation(function* () {
|
||||
yield {
|
||||
type: 'error',
|
||||
error: '',
|
||||
session_id: 'sess-123',
|
||||
};
|
||||
}),
|
||||
} as unknown as BaseProvider;
|
||||
|
||||
const options: AgentExecutionOptions = {
|
||||
workDir: '/test',
|
||||
featureId: 'test-feature',
|
||||
prompt: 'Test prompt',
|
||||
projectPath: '/project',
|
||||
abortController: new AbortController(),
|
||||
provider: mockProvider,
|
||||
effectiveBareModel: 'claude-sonnet-4-6',
|
||||
planningMode: 'skip',
|
||||
};
|
||||
|
||||
const callbacks = {
|
||||
waitForApproval: vi.fn().mockResolvedValue({ approved: true }),
|
||||
saveFeatureSummary: vi.fn(),
|
||||
updateFeatureSummary: vi.fn(),
|
||||
buildTaskPrompt: vi.fn().mockReturnValue('task prompt'),
|
||||
};
|
||||
|
||||
await expect(executor.execute(options, callbacks)).rejects.toThrow('Unknown error');
|
||||
});
|
||||
|
||||
it('should throw with sanitized error when provider yields ANSI-decorated error', async () => {
|
||||
const executor = new AgentExecutor(
|
||||
mockEventBus,
|
||||
mockFeatureStateManager,
|
||||
mockPlanApprovalService,
|
||||
mockSettingsService
|
||||
);
|
||||
|
||||
const mockProvider = {
|
||||
getName: () => 'mock',
|
||||
executeQuery: vi.fn().mockImplementation(function* () {
|
||||
yield {
|
||||
type: 'error',
|
||||
// ANSI color codes + "Error: " prefix that should be stripped
|
||||
error: '\x1b[31mError: Connection refused\x1b[0m',
|
||||
};
|
||||
}),
|
||||
} as unknown as BaseProvider;
|
||||
|
||||
const options: AgentExecutionOptions = {
|
||||
workDir: '/test',
|
||||
featureId: 'test-feature',
|
||||
prompt: 'Test prompt',
|
||||
projectPath: '/project',
|
||||
abortController: new AbortController(),
|
||||
provider: mockProvider,
|
||||
effectiveBareModel: 'claude-sonnet-4-6',
|
||||
planningMode: 'skip',
|
||||
};
|
||||
|
||||
const callbacks = {
|
||||
waitForApproval: vi.fn().mockResolvedValue({ approved: true }),
|
||||
saveFeatureSummary: vi.fn(),
|
||||
updateFeatureSummary: vi.fn(),
|
||||
buildTaskPrompt: vi.fn().mockReturnValue('task prompt'),
|
||||
};
|
||||
|
||||
// Should strip ANSI codes and "Error: " prefix
|
||||
await expect(executor.execute(options, callbacks)).rejects.toThrow('Connection refused');
|
||||
});
|
||||
|
||||
it('should throw when result subtype is error_max_turns', async () => {
|
||||
const executor = new AgentExecutor(
|
||||
mockEventBus,
|
||||
mockFeatureStateManager,
|
||||
mockPlanApprovalService,
|
||||
mockSettingsService
|
||||
);
|
||||
|
||||
const mockProvider = {
|
||||
getName: () => 'mock',
|
||||
executeQuery: vi.fn().mockImplementation(function* () {
|
||||
yield {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{ type: 'text', text: 'Working on it...' }],
|
||||
},
|
||||
};
|
||||
yield {
|
||||
type: 'result',
|
||||
subtype: 'error_max_turns',
|
||||
session_id: 'sess-456',
|
||||
};
|
||||
}),
|
||||
} as unknown as BaseProvider;
|
||||
|
||||
const options: AgentExecutionOptions = {
|
||||
workDir: '/test',
|
||||
featureId: 'test-feature',
|
||||
prompt: 'Test prompt',
|
||||
projectPath: '/project',
|
||||
abortController: new AbortController(),
|
||||
provider: mockProvider,
|
||||
effectiveBareModel: 'claude-sonnet-4-6',
|
||||
planningMode: 'skip',
|
||||
};
|
||||
|
||||
const callbacks = {
|
||||
waitForApproval: vi.fn().mockResolvedValue({ approved: true }),
|
||||
saveFeatureSummary: vi.fn(),
|
||||
updateFeatureSummary: vi.fn(),
|
||||
buildTaskPrompt: vi.fn().mockReturnValue('task prompt'),
|
||||
};
|
||||
|
||||
await expect(executor.execute(options, callbacks)).rejects.toThrow(
|
||||
'Agent execution ended with: error_max_turns'
|
||||
);
|
||||
});
|
||||
|
||||
it('should throw when result subtype is error_during_execution', async () => {
|
||||
const executor = new AgentExecutor(
|
||||
mockEventBus,
|
||||
mockFeatureStateManager,
|
||||
mockPlanApprovalService,
|
||||
mockSettingsService
|
||||
);
|
||||
|
||||
const mockProvider = {
|
||||
getName: () => 'mock',
|
||||
executeQuery: vi.fn().mockImplementation(function* () {
|
||||
yield {
|
||||
type: 'result',
|
||||
subtype: 'error_during_execution',
|
||||
session_id: 'sess-789',
|
||||
};
|
||||
}),
|
||||
} as unknown as BaseProvider;
|
||||
|
||||
const options: AgentExecutionOptions = {
|
||||
workDir: '/test',
|
||||
featureId: 'test-feature',
|
||||
prompt: 'Test prompt',
|
||||
projectPath: '/project',
|
||||
abortController: new AbortController(),
|
||||
provider: mockProvider,
|
||||
effectiveBareModel: 'claude-sonnet-4-6',
|
||||
planningMode: 'skip',
|
||||
};
|
||||
|
||||
const callbacks = {
|
||||
waitForApproval: vi.fn().mockResolvedValue({ approved: true }),
|
||||
saveFeatureSummary: vi.fn(),
|
||||
updateFeatureSummary: vi.fn(),
|
||||
buildTaskPrompt: vi.fn().mockReturnValue('task prompt'),
|
||||
};
|
||||
|
||||
await expect(executor.execute(options, callbacks)).rejects.toThrow(
|
||||
'Agent execution ended with: error_during_execution'
|
||||
);
|
||||
});
|
||||
|
||||
it('should throw when result subtype is error_max_structured_output_retries', async () => {
|
||||
const executor = new AgentExecutor(
|
||||
mockEventBus,
|
||||
mockFeatureStateManager,
|
||||
mockPlanApprovalService,
|
||||
mockSettingsService
|
||||
);
|
||||
|
||||
const mockProvider = {
|
||||
getName: () => 'mock',
|
||||
executeQuery: vi.fn().mockImplementation(function* () {
|
||||
yield {
|
||||
type: 'result',
|
||||
subtype: 'error_max_structured_output_retries',
|
||||
};
|
||||
}),
|
||||
} as unknown as BaseProvider;
|
||||
|
||||
const options: AgentExecutionOptions = {
|
||||
workDir: '/test',
|
||||
featureId: 'test-feature',
|
||||
prompt: 'Test prompt',
|
||||
projectPath: '/project',
|
||||
abortController: new AbortController(),
|
||||
provider: mockProvider,
|
||||
effectiveBareModel: 'claude-sonnet-4-6',
|
||||
planningMode: 'skip',
|
||||
};
|
||||
|
||||
const callbacks = {
|
||||
waitForApproval: vi.fn().mockResolvedValue({ approved: true }),
|
||||
saveFeatureSummary: vi.fn(),
|
||||
updateFeatureSummary: vi.fn(),
|
||||
buildTaskPrompt: vi.fn().mockReturnValue('task prompt'),
|
||||
};
|
||||
|
||||
await expect(executor.execute(options, callbacks)).rejects.toThrow(
|
||||
'Agent execution ended with: error_max_structured_output_retries'
|
||||
);
|
||||
});
|
||||
|
||||
it('should throw when result subtype is error_max_budget_usd', async () => {
|
||||
const executor = new AgentExecutor(
|
||||
mockEventBus,
|
||||
mockFeatureStateManager,
|
||||
mockPlanApprovalService,
|
||||
mockSettingsService
|
||||
);
|
||||
|
||||
const mockProvider = {
|
||||
getName: () => 'mock',
|
||||
executeQuery: vi.fn().mockImplementation(function* () {
|
||||
yield {
|
||||
type: 'result',
|
||||
subtype: 'error_max_budget_usd',
|
||||
session_id: 'sess-budget',
|
||||
};
|
||||
}),
|
||||
} as unknown as BaseProvider;
|
||||
|
||||
const options: AgentExecutionOptions = {
|
||||
workDir: '/test',
|
||||
featureId: 'test-feature',
|
||||
prompt: 'Test prompt',
|
||||
projectPath: '/project',
|
||||
abortController: new AbortController(),
|
||||
provider: mockProvider,
|
||||
effectiveBareModel: 'claude-sonnet-4-6',
|
||||
planningMode: 'skip',
|
||||
};
|
||||
|
||||
const callbacks = {
|
||||
waitForApproval: vi.fn().mockResolvedValue({ approved: true }),
|
||||
saveFeatureSummary: vi.fn(),
|
||||
updateFeatureSummary: vi.fn(),
|
||||
buildTaskPrompt: vi.fn().mockReturnValue('task prompt'),
|
||||
};
|
||||
|
||||
await expect(executor.execute(options, callbacks)).rejects.toThrow(
|
||||
'Agent execution ended with: error_max_budget_usd'
|
||||
);
|
||||
});
|
||||
|
||||
it('should NOT throw when result subtype is success', async () => {
|
||||
const executor = new AgentExecutor(
|
||||
mockEventBus,
|
||||
mockFeatureStateManager,
|
||||
mockPlanApprovalService,
|
||||
mockSettingsService
|
||||
);
|
||||
|
||||
const mockProvider = {
|
||||
getName: () => 'mock',
|
||||
executeQuery: vi.fn().mockImplementation(function* () {
|
||||
yield {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
content: [{ type: 'text', text: 'Done!' }],
|
||||
},
|
||||
};
|
||||
yield {
|
||||
type: 'result',
|
||||
subtype: 'success',
|
||||
session_id: 'sess-ok',
|
||||
};
|
||||
}),
|
||||
} as unknown as BaseProvider;
|
||||
|
||||
const options: AgentExecutionOptions = {
|
||||
workDir: '/test',
|
||||
featureId: 'test-feature',
|
||||
prompt: 'Test prompt',
|
||||
projectPath: '/project',
|
||||
abortController: new AbortController(),
|
||||
provider: mockProvider,
|
||||
effectiveBareModel: 'claude-sonnet-4-6',
|
||||
planningMode: 'skip',
|
||||
};
|
||||
|
||||
const callbacks = {
|
||||
waitForApproval: vi.fn().mockResolvedValue({ approved: true }),
|
||||
saveFeatureSummary: vi.fn(),
|
||||
updateFeatureSummary: vi.fn(),
|
||||
buildTaskPrompt: vi.fn().mockReturnValue('task prompt'),
|
||||
};
|
||||
|
||||
// Should resolve without throwing
|
||||
const result = await executor.execute(options, callbacks);
|
||||
expect(result.aborted).toBe(false);
|
||||
expect(result.responseText).toContain('Done!');
|
||||
});
|
||||
|
||||
it('should throw error when authentication fails in response', async () => {
|
||||
const executor = new AgentExecutor(
|
||||
mockEventBus,
|
||||
|
||||
192
apps/server/tests/unit/services/agent-output-validation.test.ts
Normal file
192
apps/server/tests/unit/services/agent-output-validation.test.ts
Normal file
@@ -0,0 +1,192 @@
|
||||
import { describe, it, expect } from 'vitest';
|
||||
|
||||
/**
|
||||
* Contract tests verifying the tool marker format used by agent-executor
|
||||
* (which writes agent output) and execution-service (which reads it to
|
||||
* determine if the agent did meaningful work).
|
||||
*
|
||||
* The agent-executor writes: `\n🔧 Tool: ${block.name}\n`
|
||||
* The execution-service checks: `agentOutput.includes('🔧 Tool:')`
|
||||
*
|
||||
* These tests ensure the marker format contract stays consistent and
|
||||
* document the exact detection logic used for status determination.
|
||||
*/
|
||||
|
||||
// The exact marker prefix that execution-service searches for
|
||||
const TOOL_MARKER = '🔧 Tool:';
|
||||
|
||||
// Minimum output length threshold for "meaningful work"
|
||||
const MIN_OUTPUT_LENGTH = 200;
|
||||
|
||||
/**
|
||||
* Simulates the agent-executor's tool_use output format.
|
||||
* See: agent-executor.ts line ~293
|
||||
*/
|
||||
function formatToolUseBlock(toolName: string, input?: Record<string, unknown>): string {
|
||||
let output = `\n${TOOL_MARKER} ${toolName}\n`;
|
||||
if (input) output += `Input: ${JSON.stringify(input, null, 2)}\n`;
|
||||
return output;
|
||||
}
|
||||
|
||||
/**
|
||||
* Simulates the execution-service's output validation logic.
|
||||
* See: execution-service.ts lines ~427-429
|
||||
*/
|
||||
function validateAgentOutput(
|
||||
agentOutput: string,
|
||||
skipTests: boolean
|
||||
): 'verified' | 'waiting_approval' {
|
||||
const hasToolUsage = agentOutput.includes(TOOL_MARKER);
|
||||
const hasMinimalOutput = agentOutput.trim().length < MIN_OUTPUT_LENGTH;
|
||||
const agentDidWork = hasToolUsage && !hasMinimalOutput;
|
||||
|
||||
if (skipTests) return 'waiting_approval';
|
||||
if (!agentDidWork) return 'waiting_approval';
|
||||
return 'verified';
|
||||
}
|
||||
|
||||
describe('Agent Output Validation - Contract Tests', () => {
|
||||
describe('tool marker format contract', () => {
|
||||
it('agent-executor tool format contains the expected marker', () => {
|
||||
const toolOutput = formatToolUseBlock('Read', { file_path: '/src/index.ts' });
|
||||
expect(toolOutput).toContain(TOOL_MARKER);
|
||||
});
|
||||
|
||||
it('agent-executor tool format includes tool name after marker', () => {
|
||||
const toolOutput = formatToolUseBlock('Edit', {
|
||||
file_path: '/src/app.ts',
|
||||
old_string: 'foo',
|
||||
new_string: 'bar',
|
||||
});
|
||||
expect(toolOutput).toContain('🔧 Tool: Edit');
|
||||
});
|
||||
|
||||
it('agent-executor tool format includes JSON input', () => {
|
||||
const input = { file_path: '/src/index.ts' };
|
||||
const toolOutput = formatToolUseBlock('Read', input);
|
||||
expect(toolOutput).toContain('Input: ');
|
||||
expect(toolOutput).toContain('"file_path": "/src/index.ts"');
|
||||
});
|
||||
|
||||
it('agent-executor tool format works without input', () => {
|
||||
const toolOutput = formatToolUseBlock('Bash');
|
||||
expect(toolOutput).toContain('🔧 Tool: Bash');
|
||||
expect(toolOutput).not.toContain('Input:');
|
||||
});
|
||||
|
||||
it('marker includes colon and space to avoid false positives', () => {
|
||||
// Ensure the marker is specific enough to avoid matching other emoji patterns
|
||||
expect(TOOL_MARKER).toBe('🔧 Tool:');
|
||||
expect(TOOL_MARKER).toContain(':');
|
||||
});
|
||||
});
|
||||
|
||||
describe('output validation logic', () => {
|
||||
it('verified: tool usage + sufficient output', () => {
|
||||
const output =
|
||||
'Starting implementation of the new feature...\n' +
|
||||
formatToolUseBlock('Read', { file_path: '/src/index.ts' }) +
|
||||
'I can see the existing code. Let me make the needed changes.\n' +
|
||||
formatToolUseBlock('Edit', { file_path: '/src/index.ts' }) +
|
||||
'Changes complete. The implementation adds new validation logic and tests.';
|
||||
expect(output.trim().length).toBeGreaterThanOrEqual(MIN_OUTPUT_LENGTH);
|
||||
|
||||
expect(validateAgentOutput(output, false)).toBe('verified');
|
||||
});
|
||||
|
||||
it('waiting_approval: no tool markers regardless of length', () => {
|
||||
const longOutput = 'I analyzed the codebase. '.repeat(50);
|
||||
expect(longOutput.trim().length).toBeGreaterThan(MIN_OUTPUT_LENGTH);
|
||||
|
||||
expect(validateAgentOutput(longOutput, false)).toBe('waiting_approval');
|
||||
});
|
||||
|
||||
it('waiting_approval: tool markers but insufficient length', () => {
|
||||
const shortOutput = formatToolUseBlock('Read', { file_path: '/src/a.ts' });
|
||||
expect(shortOutput.trim().length).toBeLessThan(MIN_OUTPUT_LENGTH);
|
||||
|
||||
expect(validateAgentOutput(shortOutput, false)).toBe('waiting_approval');
|
||||
});
|
||||
|
||||
it('waiting_approval: empty output', () => {
|
||||
expect(validateAgentOutput('', false)).toBe('waiting_approval');
|
||||
});
|
||||
|
||||
it('waiting_approval: skipTests always overrides', () => {
|
||||
const goodOutput =
|
||||
'Starting...\n' +
|
||||
formatToolUseBlock('Read', { file_path: '/src/index.ts' }) +
|
||||
formatToolUseBlock('Edit', { file_path: '/src/index.ts' }) +
|
||||
'Done implementing. '.repeat(15);
|
||||
expect(goodOutput.trim().length).toBeGreaterThanOrEqual(MIN_OUTPUT_LENGTH);
|
||||
|
||||
expect(validateAgentOutput(goodOutput, true)).toBe('waiting_approval');
|
||||
});
|
||||
|
||||
it('boundary: exactly MIN_OUTPUT_LENGTH chars with tool is verified', () => {
|
||||
const tool = formatToolUseBlock('Read');
|
||||
const padding = 'x'.repeat(MIN_OUTPUT_LENGTH - tool.trim().length);
|
||||
const output = tool + padding;
|
||||
expect(output.trim().length).toBeGreaterThanOrEqual(MIN_OUTPUT_LENGTH);
|
||||
|
||||
expect(validateAgentOutput(output, false)).toBe('verified');
|
||||
});
|
||||
|
||||
it('boundary: MIN_OUTPUT_LENGTH - 1 chars with tool is waiting_approval', () => {
|
||||
const marker = `${TOOL_MARKER} Read\n`;
|
||||
const padding = 'x'.repeat(MIN_OUTPUT_LENGTH - 1 - marker.length);
|
||||
const output = marker + padding;
|
||||
expect(output.trim().length).toBe(MIN_OUTPUT_LENGTH - 1);
|
||||
|
||||
expect(validateAgentOutput(output, false)).toBe('waiting_approval');
|
||||
});
|
||||
});
|
||||
|
||||
describe('realistic provider scenarios', () => {
|
||||
it('Claude SDK agent with multiple tools → verified', () => {
|
||||
let output = "I'll implement the feature.\n\n";
|
||||
output += formatToolUseBlock('Read', { file_path: '/src/components/App.tsx' });
|
||||
output += 'I see the component. Let me update it.\n\n';
|
||||
output += formatToolUseBlock('Edit', {
|
||||
file_path: '/src/components/App.tsx',
|
||||
old_string: 'const App = () => {',
|
||||
new_string: 'const App: React.FC = () => {',
|
||||
});
|
||||
output += 'Done. The component is now typed correctly.\n';
|
||||
|
||||
expect(validateAgentOutput(output, false)).toBe('verified');
|
||||
});
|
||||
|
||||
it('Cursor CLI quick exit (no tools) → waiting_approval', () => {
|
||||
const output = 'Task received. Processing...\nResult: completed successfully.';
|
||||
expect(validateAgentOutput(output, false)).toBe('waiting_approval');
|
||||
});
|
||||
|
||||
it('Codex CLI with brief acknowledgment → waiting_approval', () => {
|
||||
const output = 'Understood the task. Starting implementation.\nDone.';
|
||||
expect(validateAgentOutput(output, false)).toBe('waiting_approval');
|
||||
});
|
||||
|
||||
it('Agent that only reads but makes no edits (single Read tool, short output) → waiting_approval', () => {
|
||||
const output = formatToolUseBlock('Read', { file_path: '/src/index.ts' }) + 'File read.';
|
||||
expect(output.trim().length).toBeLessThan(MIN_OUTPUT_LENGTH);
|
||||
expect(validateAgentOutput(output, false)).toBe('waiting_approval');
|
||||
});
|
||||
|
||||
it('Agent with extensive tool usage and explanation → verified', () => {
|
||||
let output = 'Analyzing the codebase for the authentication feature.\n\n';
|
||||
for (let i = 0; i < 5; i++) {
|
||||
output += formatToolUseBlock('Read', { file_path: `/src/auth/handler${i}.ts` });
|
||||
output += `Found handler ${i}. `;
|
||||
}
|
||||
output += formatToolUseBlock('Edit', {
|
||||
file_path: '/src/auth/handler0.ts',
|
||||
old_string: 'function login() {}',
|
||||
new_string: 'async function login(creds: Credentials) { ... }',
|
||||
});
|
||||
output += 'Implementation complete with all authentication changes applied.\n';
|
||||
|
||||
expect(validateAgentOutput(output, false)).toBe('verified');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -188,6 +188,125 @@ describe('agent-service.ts', () => {
|
||||
expect(mockEvents.emit).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should emit tool_result events from provider stream', async () => {
|
||||
const mockProvider = {
|
||||
getName: () => 'gemini',
|
||||
executeQuery: async function* () {
|
||||
yield {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{
|
||||
type: 'tool_use',
|
||||
name: 'Read',
|
||||
tool_use_id: 'tool-1',
|
||||
input: { file_path: 'README.md' },
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
yield {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{
|
||||
type: 'tool_result',
|
||||
tool_use_id: 'tool-1',
|
||||
content: 'File contents here',
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
yield {
|
||||
type: 'result',
|
||||
subtype: 'success',
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
vi.mocked(ProviderFactory.getProviderForModel).mockReturnValue(mockProvider as any);
|
||||
|
||||
vi.mocked(promptBuilder.buildPromptWithImages).mockResolvedValue({
|
||||
content: 'Hello',
|
||||
hasImages: false,
|
||||
});
|
||||
|
||||
await service.sendMessage({
|
||||
sessionId: 'session-1',
|
||||
message: 'Hello',
|
||||
});
|
||||
|
||||
expect(mockEvents.emit).toHaveBeenCalledWith(
|
||||
'agent:stream',
|
||||
expect.objectContaining({
|
||||
sessionId: 'session-1',
|
||||
type: 'tool_result',
|
||||
tool: {
|
||||
name: 'Read',
|
||||
input: {
|
||||
toolUseId: 'tool-1',
|
||||
content: 'File contents here',
|
||||
},
|
||||
},
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should emit tool_result with unknown tool name for unregistered tool_use_id', async () => {
|
||||
const mockProvider = {
|
||||
getName: () => 'gemini',
|
||||
executeQuery: async function* () {
|
||||
// Yield tool_result WITHOUT a preceding tool_use (unregistered tool_use_id)
|
||||
yield {
|
||||
type: 'assistant',
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{
|
||||
type: 'tool_result',
|
||||
tool_use_id: 'unregistered-id',
|
||||
content: 'Some result content',
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
yield {
|
||||
type: 'result',
|
||||
subtype: 'success',
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
vi.mocked(ProviderFactory.getProviderForModel).mockReturnValue(mockProvider as any);
|
||||
|
||||
vi.mocked(promptBuilder.buildPromptWithImages).mockResolvedValue({
|
||||
content: 'Hello',
|
||||
hasImages: false,
|
||||
});
|
||||
|
||||
await service.sendMessage({
|
||||
sessionId: 'session-1',
|
||||
message: 'Hello',
|
||||
});
|
||||
|
||||
expect(mockEvents.emit).toHaveBeenCalledWith(
|
||||
'agent:stream',
|
||||
expect.objectContaining({
|
||||
sessionId: 'session-1',
|
||||
type: 'tool_result',
|
||||
tool: {
|
||||
name: 'unknown',
|
||||
input: {
|
||||
toolUseId: 'unregistered-id',
|
||||
content: 'Some result content',
|
||||
},
|
||||
},
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle images in message', async () => {
|
||||
const mockProvider = {
|
||||
getName: () => 'claude',
|
||||
@@ -303,6 +422,36 @@ describe('agent-service.ts', () => {
|
||||
|
||||
expect(fs.writeFile).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should include context/history preparation for Gemini requests', async () => {
|
||||
let capturedOptions: any;
|
||||
const mockProvider = {
|
||||
getName: () => 'gemini',
|
||||
executeQuery: async function* (options: any) {
|
||||
capturedOptions = options;
|
||||
yield {
|
||||
type: 'result',
|
||||
subtype: 'success',
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
vi.mocked(ProviderFactory.getProviderForModelName).mockReturnValue('gemini');
|
||||
vi.mocked(ProviderFactory.getProviderForModel).mockReturnValue(mockProvider as any);
|
||||
vi.mocked(promptBuilder.buildPromptWithImages).mockResolvedValue({
|
||||
content: 'Hello',
|
||||
hasImages: false,
|
||||
});
|
||||
|
||||
await service.sendMessage({
|
||||
sessionId: 'session-1',
|
||||
message: 'Hello',
|
||||
model: 'gemini-2.5-flash',
|
||||
});
|
||||
|
||||
expect(contextLoader.loadContextFiles).toHaveBeenCalled();
|
||||
expect(capturedOptions).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('stopExecution', () => {
|
||||
|
||||
@@ -328,6 +328,86 @@ describe('auto-loop-coordinator.ts', () => {
|
||||
// Should not have executed features because at capacity
|
||||
expect(mockExecuteFeature).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('counts all running features (auto + manual) against concurrency limit', async () => {
|
||||
vi.mocked(mockLoadPendingFeatures).mockResolvedValue([testFeature]);
|
||||
// 2 manual features running — total count is 2
|
||||
vi.mocked(mockConcurrencyManager.getRunningCountForWorktree).mockResolvedValue(2);
|
||||
|
||||
await coordinator.startAutoLoopForProject('/test/project', null, 2);
|
||||
|
||||
await vi.advanceTimersByTimeAsync(6000);
|
||||
|
||||
await coordinator.stopAutoLoopForProject('/test/project', null);
|
||||
|
||||
// Should NOT execute because total running count (2) meets the concurrency limit (2)
|
||||
expect(mockExecuteFeature).not.toHaveBeenCalled();
|
||||
// Verify it was called WITHOUT autoModeOnly (counts all tasks)
|
||||
// The coordinator's wrapper passes options through as undefined when not specified
|
||||
expect(mockConcurrencyManager.getRunningCountForWorktree).toHaveBeenCalledWith(
|
||||
'/test/project',
|
||||
null,
|
||||
undefined
|
||||
);
|
||||
});
|
||||
|
||||
it('allows auto dispatch when manual tasks finish and capacity becomes available', async () => {
|
||||
vi.mocked(mockLoadPendingFeatures).mockResolvedValue([testFeature]);
|
||||
// First call: at capacity (2 manual features running)
|
||||
// Second call: capacity freed (1 feature running)
|
||||
vi.mocked(mockConcurrencyManager.getRunningCountForWorktree)
|
||||
.mockResolvedValueOnce(2) // at capacity
|
||||
.mockResolvedValueOnce(1); // capacity available after manual task completes
|
||||
|
||||
await coordinator.startAutoLoopForProject('/test/project', null, 2);
|
||||
|
||||
// First iteration: at capacity, should wait
|
||||
await vi.advanceTimersByTimeAsync(5000);
|
||||
|
||||
// Second iteration: capacity available, should execute
|
||||
await vi.advanceTimersByTimeAsync(6000);
|
||||
|
||||
await coordinator.stopAutoLoopForProject('/test/project', null);
|
||||
|
||||
// Should execute after capacity freed
|
||||
expect(mockExecuteFeature).toHaveBeenCalledWith('/test/project', 'feature-1', true, true);
|
||||
});
|
||||
|
||||
it('waits when manually started tasks already fill concurrency limit at auto mode activation', async () => {
|
||||
vi.mocked(mockLoadPendingFeatures).mockResolvedValue([testFeature]);
|
||||
// Manual tasks already fill the limit
|
||||
vi.mocked(mockConcurrencyManager.getRunningCountForWorktree).mockResolvedValue(3);
|
||||
|
||||
await coordinator.startAutoLoopForProject('/test/project', null, 3);
|
||||
|
||||
await vi.advanceTimersByTimeAsync(6000);
|
||||
|
||||
await coordinator.stopAutoLoopForProject('/test/project', null);
|
||||
|
||||
// Auto mode should remain waiting, not dispatch
|
||||
expect(mockExecuteFeature).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('resumes dispatching when all running tasks complete simultaneously', async () => {
|
||||
vi.mocked(mockLoadPendingFeatures).mockResolvedValue([testFeature]);
|
||||
// First check: all 3 slots occupied
|
||||
// Second check: all tasks completed simultaneously
|
||||
vi.mocked(mockConcurrencyManager.getRunningCountForWorktree)
|
||||
.mockResolvedValueOnce(3) // all slots full
|
||||
.mockResolvedValueOnce(0); // all tasks completed at once
|
||||
|
||||
await coordinator.startAutoLoopForProject('/test/project', null, 3);
|
||||
|
||||
// First iteration: at capacity
|
||||
await vi.advanceTimersByTimeAsync(5000);
|
||||
// Second iteration: all freed
|
||||
await vi.advanceTimersByTimeAsync(6000);
|
||||
|
||||
await coordinator.stopAutoLoopForProject('/test/project', null);
|
||||
|
||||
// Should execute after all tasks freed capacity
|
||||
expect(mockExecuteFeature).toHaveBeenCalledWith('/test/project', 'feature-1', true, true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('priority-based feature selection', () => {
|
||||
@@ -788,7 +868,23 @@ describe('auto-loop-coordinator.ts', () => {
|
||||
expect(count).toBe(3);
|
||||
expect(mockConcurrencyManager.getRunningCountForWorktree).toHaveBeenCalledWith(
|
||||
'/test/project',
|
||||
null
|
||||
null,
|
||||
undefined
|
||||
);
|
||||
});
|
||||
|
||||
it('passes autoModeOnly option to ConcurrencyManager', async () => {
|
||||
vi.mocked(mockConcurrencyManager.getRunningCountForWorktree).mockResolvedValue(1);
|
||||
|
||||
const count = await coordinator.getRunningCountForWorktree('/test/project', null, {
|
||||
autoModeOnly: true,
|
||||
});
|
||||
|
||||
expect(count).toBe(1);
|
||||
expect(mockConcurrencyManager.getRunningCountForWorktree).toHaveBeenCalledWith(
|
||||
'/test/project',
|
||||
null,
|
||||
{ autoModeOnly: true }
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -416,6 +416,90 @@ describe('ConcurrencyManager', () => {
|
||||
expect(mainCount).toBe(2);
|
||||
});
|
||||
|
||||
it('should count only auto-mode features when autoModeOnly is true', async () => {
|
||||
// Auto-mode feature on main worktree
|
||||
manager.acquire({
|
||||
featureId: 'feature-auto',
|
||||
projectPath: '/test/project',
|
||||
isAutoMode: true,
|
||||
});
|
||||
|
||||
// Manual feature on main worktree
|
||||
manager.acquire({
|
||||
featureId: 'feature-manual',
|
||||
projectPath: '/test/project',
|
||||
isAutoMode: false,
|
||||
});
|
||||
|
||||
// Without autoModeOnly: counts both
|
||||
const totalCount = await manager.getRunningCountForWorktree('/test/project', null);
|
||||
expect(totalCount).toBe(2);
|
||||
|
||||
// With autoModeOnly: counts only auto-mode features
|
||||
const autoModeCount = await manager.getRunningCountForWorktree('/test/project', null, {
|
||||
autoModeOnly: true,
|
||||
});
|
||||
expect(autoModeCount).toBe(1);
|
||||
});
|
||||
|
||||
it('should count only auto-mode features on specific worktree when autoModeOnly is true', async () => {
|
||||
// Auto-mode feature on feature branch
|
||||
manager.acquire({
|
||||
featureId: 'feature-auto',
|
||||
projectPath: '/test/project',
|
||||
isAutoMode: true,
|
||||
});
|
||||
manager.updateRunningFeature('feature-auto', { branchName: 'feature-branch' });
|
||||
|
||||
// Manual feature on same feature branch
|
||||
manager.acquire({
|
||||
featureId: 'feature-manual',
|
||||
projectPath: '/test/project',
|
||||
isAutoMode: false,
|
||||
});
|
||||
manager.updateRunningFeature('feature-manual', { branchName: 'feature-branch' });
|
||||
|
||||
// Another auto-mode feature on different branch (should not be counted)
|
||||
manager.acquire({
|
||||
featureId: 'feature-other',
|
||||
projectPath: '/test/project',
|
||||
isAutoMode: true,
|
||||
});
|
||||
manager.updateRunningFeature('feature-other', { branchName: 'other-branch' });
|
||||
|
||||
const autoModeCount = await manager.getRunningCountForWorktree(
|
||||
'/test/project',
|
||||
'feature-branch',
|
||||
{ autoModeOnly: true }
|
||||
);
|
||||
expect(autoModeCount).toBe(1);
|
||||
|
||||
const totalCount = await manager.getRunningCountForWorktree(
|
||||
'/test/project',
|
||||
'feature-branch'
|
||||
);
|
||||
expect(totalCount).toBe(2);
|
||||
});
|
||||
|
||||
it('should return 0 when autoModeOnly is true and only manual features are running', async () => {
|
||||
manager.acquire({
|
||||
featureId: 'feature-manual-1',
|
||||
projectPath: '/test/project',
|
||||
isAutoMode: false,
|
||||
});
|
||||
|
||||
manager.acquire({
|
||||
featureId: 'feature-manual-2',
|
||||
projectPath: '/test/project',
|
||||
isAutoMode: false,
|
||||
});
|
||||
|
||||
const autoModeCount = await manager.getRunningCountForWorktree('/test/project', null, {
|
||||
autoModeOnly: true,
|
||||
});
|
||||
expect(autoModeCount).toBe(0);
|
||||
});
|
||||
|
||||
it('should filter by both projectPath and branchName', async () => {
|
||||
manager.acquire({
|
||||
featureId: 'feature-1',
|
||||
|
||||
@@ -486,7 +486,7 @@ describe('dev-server-service.ts', () => {
|
||||
await service.startDevServer(testDir, testDir);
|
||||
|
||||
// Simulate HTTPS dev server
|
||||
mockProcess.stdout.emit('data', Buffer.from('Server at https://localhost:3443\n'));
|
||||
mockProcess.stdout.emit('data', Buffer.from('Server listening at https://localhost:3443\n'));
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
|
||||
@@ -521,6 +521,368 @@ describe('dev-server-service.ts', () => {
|
||||
expect(serverInfo?.url).toBe(firstUrl);
|
||||
expect(serverInfo?.url).toBe('http://localhost:5173/');
|
||||
});
|
||||
|
||||
it('should detect Astro format URL', async () => {
|
||||
vi.mocked(secureFs.access).mockResolvedValue(undefined);
|
||||
|
||||
const mockProcess = createMockProcess();
|
||||
vi.mocked(spawn).mockReturnValue(mockProcess as any);
|
||||
|
||||
const { getDevServerService } = await import('@/services/dev-server-service.js');
|
||||
const service = getDevServerService();
|
||||
|
||||
await service.startDevServer(testDir, testDir);
|
||||
|
||||
// Astro uses the same "Local:" prefix as Vite
|
||||
mockProcess.stdout.emit('data', Buffer.from(' 🚀 astro v4.0.0 started in 200ms\n'));
|
||||
mockProcess.stdout.emit('data', Buffer.from(' ┃ Local http://localhost:4321/\n'));
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
|
||||
const serverInfo = service.getServerInfo(testDir);
|
||||
// Astro doesn't use "Local:" with colon, so it should be caught by the localhost URL pattern
|
||||
expect(serverInfo?.url).toBe('http://localhost:4321/');
|
||||
expect(serverInfo?.urlDetected).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect Remix format URL', async () => {
|
||||
vi.mocked(secureFs.access).mockResolvedValue(undefined);
|
||||
|
||||
const mockProcess = createMockProcess();
|
||||
vi.mocked(spawn).mockReturnValue(mockProcess as any);
|
||||
|
||||
const { getDevServerService } = await import('@/services/dev-server-service.js');
|
||||
const service = getDevServerService();
|
||||
|
||||
await service.startDevServer(testDir, testDir);
|
||||
|
||||
mockProcess.stdout.emit(
|
||||
'data',
|
||||
Buffer.from('Remix App Server started at http://localhost:3000\n')
|
||||
);
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
|
||||
const serverInfo = service.getServerInfo(testDir);
|
||||
expect(serverInfo?.url).toBe('http://localhost:3000');
|
||||
expect(serverInfo?.urlDetected).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect Django format URL', async () => {
|
||||
vi.mocked(secureFs.access).mockResolvedValue(undefined);
|
||||
|
||||
const mockProcess = createMockProcess();
|
||||
vi.mocked(spawn).mockReturnValue(mockProcess as any);
|
||||
|
||||
const { getDevServerService } = await import('@/services/dev-server-service.js');
|
||||
const service = getDevServerService();
|
||||
|
||||
await service.startDevServer(testDir, testDir);
|
||||
|
||||
mockProcess.stdout.emit(
|
||||
'data',
|
||||
Buffer.from('Starting development server at http://127.0.0.1:8000/\n')
|
||||
);
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
|
||||
const serverInfo = service.getServerInfo(testDir);
|
||||
expect(serverInfo?.url).toBe('http://127.0.0.1:8000/');
|
||||
expect(serverInfo?.urlDetected).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect Webpack Dev Server format URL', async () => {
|
||||
vi.mocked(secureFs.access).mockResolvedValue(undefined);
|
||||
|
||||
const mockProcess = createMockProcess();
|
||||
vi.mocked(spawn).mockReturnValue(mockProcess as any);
|
||||
|
||||
const { getDevServerService } = await import('@/services/dev-server-service.js');
|
||||
const service = getDevServerService();
|
||||
|
||||
await service.startDevServer(testDir, testDir);
|
||||
|
||||
mockProcess.stdout.emit(
|
||||
'data',
|
||||
Buffer.from('<i> [webpack-dev-server] Project is running at http://localhost:8080/\n')
|
||||
);
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
|
||||
const serverInfo = service.getServerInfo(testDir);
|
||||
expect(serverInfo?.url).toBe('http://localhost:8080/');
|
||||
expect(serverInfo?.urlDetected).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect PHP built-in server format URL', async () => {
|
||||
vi.mocked(secureFs.access).mockResolvedValue(undefined);
|
||||
|
||||
const mockProcess = createMockProcess();
|
||||
vi.mocked(spawn).mockReturnValue(mockProcess as any);
|
||||
|
||||
const { getDevServerService } = await import('@/services/dev-server-service.js');
|
||||
const service = getDevServerService();
|
||||
|
||||
await service.startDevServer(testDir, testDir);
|
||||
|
||||
mockProcess.stdout.emit(
|
||||
'data',
|
||||
Buffer.from('Development Server (http://localhost:8000) started\n')
|
||||
);
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
|
||||
const serverInfo = service.getServerInfo(testDir);
|
||||
expect(serverInfo?.url).toBe('http://localhost:8000');
|
||||
expect(serverInfo?.urlDetected).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect "listening on port" format (port-only detection)', async () => {
|
||||
vi.mocked(secureFs.access).mockResolvedValue(undefined);
|
||||
|
||||
const mockProcess = createMockProcess();
|
||||
vi.mocked(spawn).mockReturnValue(mockProcess as any);
|
||||
|
||||
const { getDevServerService } = await import('@/services/dev-server-service.js');
|
||||
const service = getDevServerService();
|
||||
|
||||
await service.startDevServer(testDir, testDir);
|
||||
|
||||
// Some servers only print the port number, not a full URL
|
||||
mockProcess.stdout.emit('data', Buffer.from('Server listening on port 4000\n'));
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
|
||||
const serverInfo = service.getServerInfo(testDir);
|
||||
expect(serverInfo?.url).toBe('http://localhost:4000');
|
||||
expect(serverInfo?.urlDetected).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect "running on port" format (port-only detection)', async () => {
|
||||
vi.mocked(secureFs.access).mockResolvedValue(undefined);
|
||||
|
||||
const mockProcess = createMockProcess();
|
||||
vi.mocked(spawn).mockReturnValue(mockProcess as any);
|
||||
|
||||
const { getDevServerService } = await import('@/services/dev-server-service.js');
|
||||
const service = getDevServerService();
|
||||
|
||||
await service.startDevServer(testDir, testDir);
|
||||
|
||||
mockProcess.stdout.emit('data', Buffer.from('Application running on port 9000\n'));
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
|
||||
const serverInfo = service.getServerInfo(testDir);
|
||||
expect(serverInfo?.url).toBe('http://localhost:9000');
|
||||
expect(serverInfo?.urlDetected).toBe(true);
|
||||
});
|
||||
|
||||
it('should strip ANSI escape codes before detecting URL', async () => {
|
||||
vi.mocked(secureFs.access).mockResolvedValue(undefined);
|
||||
|
||||
const mockProcess = createMockProcess();
|
||||
vi.mocked(spawn).mockReturnValue(mockProcess as any);
|
||||
|
||||
const { getDevServerService } = await import('@/services/dev-server-service.js');
|
||||
const service = getDevServerService();
|
||||
|
||||
await service.startDevServer(testDir, testDir);
|
||||
|
||||
// Simulate Vite output with ANSI color codes
|
||||
mockProcess.stdout.emit(
|
||||
'data',
|
||||
Buffer.from(
|
||||
' \x1B[32m➜\x1B[0m \x1B[1mLocal:\x1B[0m \x1B[36mhttp://localhost:5173/\x1B[0m\n'
|
||||
)
|
||||
);
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
|
||||
const serverInfo = service.getServerInfo(testDir);
|
||||
expect(serverInfo?.url).toBe('http://localhost:5173/');
|
||||
expect(serverInfo?.urlDetected).toBe(true);
|
||||
});
|
||||
|
||||
it('should normalize 0.0.0.0 to localhost', async () => {
|
||||
vi.mocked(secureFs.access).mockResolvedValue(undefined);
|
||||
|
||||
const mockProcess = createMockProcess();
|
||||
vi.mocked(spawn).mockReturnValue(mockProcess as any);
|
||||
|
||||
const { getDevServerService } = await import('@/services/dev-server-service.js');
|
||||
const service = getDevServerService();
|
||||
|
||||
await service.startDevServer(testDir, testDir);
|
||||
|
||||
mockProcess.stdout.emit('data', Buffer.from('Server listening at http://0.0.0.0:3000\n'));
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
|
||||
const serverInfo = service.getServerInfo(testDir);
|
||||
expect(serverInfo?.url).toBe('http://localhost:3000');
|
||||
expect(serverInfo?.urlDetected).toBe(true);
|
||||
});
|
||||
|
||||
it('should normalize [::] to localhost', async () => {
|
||||
vi.mocked(secureFs.access).mockResolvedValue(undefined);
|
||||
|
||||
const mockProcess = createMockProcess();
|
||||
vi.mocked(spawn).mockReturnValue(mockProcess as any);
|
||||
|
||||
const { getDevServerService } = await import('@/services/dev-server-service.js');
|
||||
const service = getDevServerService();
|
||||
|
||||
await service.startDevServer(testDir, testDir);
|
||||
|
||||
mockProcess.stdout.emit('data', Buffer.from('Local: http://[::]:4000/\n'));
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
|
||||
const serverInfo = service.getServerInfo(testDir);
|
||||
expect(serverInfo?.url).toBe('http://localhost:4000/');
|
||||
expect(serverInfo?.urlDetected).toBe(true);
|
||||
});
|
||||
|
||||
it('should update port field when detected URL has different port', async () => {
|
||||
vi.mocked(secureFs.access).mockResolvedValue(undefined);
|
||||
|
||||
const mockProcess = createMockProcess();
|
||||
vi.mocked(spawn).mockReturnValue(mockProcess as any);
|
||||
|
||||
const { getDevServerService } = await import('@/services/dev-server-service.js');
|
||||
const service = getDevServerService();
|
||||
|
||||
const result = await service.startDevServer(testDir, testDir);
|
||||
const allocatedPort = result.result?.port;
|
||||
|
||||
// Server starts on a completely different port (ignoring PORT env var)
|
||||
mockProcess.stdout.emit('data', Buffer.from('Local: http://localhost:9999/\n'));
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
|
||||
const serverInfo = service.getServerInfo(testDir);
|
||||
expect(serverInfo?.url).toBe('http://localhost:9999/');
|
||||
expect(serverInfo?.port).toBe(9999);
|
||||
// The port should be different from what was initially allocated
|
||||
if (allocatedPort !== 9999) {
|
||||
expect(serverInfo?.port).not.toBe(allocatedPort);
|
||||
}
|
||||
});
|
||||
|
||||
it('should detect URL from stderr output', async () => {
|
||||
vi.mocked(secureFs.access).mockResolvedValue(undefined);
|
||||
|
||||
const mockProcess = createMockProcess();
|
||||
vi.mocked(spawn).mockReturnValue(mockProcess as any);
|
||||
|
||||
const { getDevServerService } = await import('@/services/dev-server-service.js');
|
||||
const service = getDevServerService();
|
||||
|
||||
await service.startDevServer(testDir, testDir);
|
||||
|
||||
// Some servers output URL info to stderr
|
||||
mockProcess.stderr.emit('data', Buffer.from('Local: http://localhost:3000/\n'));
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
|
||||
const serverInfo = service.getServerInfo(testDir);
|
||||
expect(serverInfo?.url).toBe('http://localhost:3000/');
|
||||
expect(serverInfo?.urlDetected).toBe(true);
|
||||
});
|
||||
|
||||
it('should not match URLs without a port (non-dev-server URLs)', async () => {
|
||||
vi.mocked(secureFs.access).mockResolvedValue(undefined);
|
||||
|
||||
const mockProcess = createMockProcess();
|
||||
vi.mocked(spawn).mockReturnValue(mockProcess as any);
|
||||
|
||||
const { getDevServerService } = await import('@/services/dev-server-service.js');
|
||||
const service = getDevServerService();
|
||||
|
||||
const result = await service.startDevServer(testDir, testDir);
|
||||
|
||||
// CDN/external URLs should not be detected
|
||||
mockProcess.stdout.emit(
|
||||
'data',
|
||||
Buffer.from('Downloading from https://cdn.example.com/bundle.js\n')
|
||||
);
|
||||
mockProcess.stdout.emit('data', Buffer.from('Fetching https://registry.npmjs.org/package\n'));
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
|
||||
const serverInfo = service.getServerInfo(testDir);
|
||||
// Should keep the initial allocated URL since external URLs don't match
|
||||
expect(serverInfo?.url).toBe(result.result?.url);
|
||||
expect(serverInfo?.urlDetected).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle URLs with trailing punctuation', async () => {
|
||||
vi.mocked(secureFs.access).mockResolvedValue(undefined);
|
||||
|
||||
const mockProcess = createMockProcess();
|
||||
vi.mocked(spawn).mockReturnValue(mockProcess as any);
|
||||
|
||||
const { getDevServerService } = await import('@/services/dev-server-service.js');
|
||||
const service = getDevServerService();
|
||||
|
||||
await service.startDevServer(testDir, testDir);
|
||||
|
||||
// URL followed by punctuation
|
||||
mockProcess.stdout.emit('data', Buffer.from('Server started at http://localhost:3000.\n'));
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
|
||||
const serverInfo = service.getServerInfo(testDir);
|
||||
expect(serverInfo?.url).toBe('http://localhost:3000');
|
||||
expect(serverInfo?.urlDetected).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect Express/Fastify format URL', async () => {
|
||||
vi.mocked(secureFs.access).mockResolvedValue(undefined);
|
||||
|
||||
const mockProcess = createMockProcess();
|
||||
vi.mocked(spawn).mockReturnValue(mockProcess as any);
|
||||
|
||||
const { getDevServerService } = await import('@/services/dev-server-service.js');
|
||||
const service = getDevServerService();
|
||||
|
||||
await service.startDevServer(testDir, testDir);
|
||||
|
||||
mockProcess.stdout.emit('data', Buffer.from('Server listening on http://localhost:3000\n'));
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
|
||||
const serverInfo = service.getServerInfo(testDir);
|
||||
expect(serverInfo?.url).toBe('http://localhost:3000');
|
||||
expect(serverInfo?.urlDetected).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect Angular CLI format URL', async () => {
|
||||
vi.mocked(secureFs.access).mockResolvedValue(undefined);
|
||||
|
||||
const mockProcess = createMockProcess();
|
||||
vi.mocked(spawn).mockReturnValue(mockProcess as any);
|
||||
|
||||
const { getDevServerService } = await import('@/services/dev-server-service.js');
|
||||
const service = getDevServerService();
|
||||
|
||||
await service.startDevServer(testDir, testDir);
|
||||
|
||||
// Angular CLI output
|
||||
mockProcess.stderr.emit(
|
||||
'data',
|
||||
Buffer.from(
|
||||
'** Angular Live Development Server is listening on localhost:4200, open your browser on http://localhost:4200/ **\n'
|
||||
)
|
||||
);
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
|
||||
const serverInfo = service.getServerInfo(testDir);
|
||||
expect(serverInfo?.url).toBe('http://localhost:4200/');
|
||||
expect(serverInfo?.urlDetected).toBe(true);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -531,6 +893,7 @@ function createMockProcess() {
|
||||
mockProcess.stderr = new EventEmitter();
|
||||
mockProcess.kill = vi.fn();
|
||||
mockProcess.killed = false;
|
||||
mockProcess.pid = 12345;
|
||||
|
||||
// Don't exit immediately - let the test control the lifecycle
|
||||
return mockProcess;
|
||||
|
||||
835
apps/server/tests/unit/services/event-hook-service.test.ts
Normal file
835
apps/server/tests/unit/services/event-hook-service.test.ts
Normal file
@@ -0,0 +1,835 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { EventHookService } from '../../../src/services/event-hook-service.js';
|
||||
import type { EventEmitter, EventCallback, EventType } from '../../../src/lib/events.js';
|
||||
import type { SettingsService } from '../../../src/services/settings-service.js';
|
||||
import type { EventHistoryService } from '../../../src/services/event-history-service.js';
|
||||
import type { FeatureLoader } from '../../../src/services/feature-loader.js';
|
||||
|
||||
/**
|
||||
* Create a mock EventEmitter for testing
|
||||
*/
|
||||
function createMockEventEmitter(): EventEmitter & {
|
||||
subscribers: Set<EventCallback>;
|
||||
simulateEvent: (type: EventType, payload: unknown) => void;
|
||||
} {
|
||||
const subscribers = new Set<EventCallback>();
|
||||
|
||||
return {
|
||||
subscribers,
|
||||
emit(type: EventType, payload: unknown) {
|
||||
for (const callback of subscribers) {
|
||||
callback(type, payload);
|
||||
}
|
||||
},
|
||||
subscribe(callback: EventCallback) {
|
||||
subscribers.add(callback);
|
||||
return () => {
|
||||
subscribers.delete(callback);
|
||||
};
|
||||
},
|
||||
simulateEvent(type: EventType, payload: unknown) {
|
||||
for (const callback of subscribers) {
|
||||
callback(type, payload);
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a mock SettingsService
|
||||
*/
|
||||
function createMockSettingsService(hooks: unknown[] = []): SettingsService {
|
||||
return {
|
||||
getGlobalSettings: vi.fn().mockResolvedValue({ eventHooks: hooks }),
|
||||
} as unknown as SettingsService;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a mock EventHistoryService
|
||||
*/
|
||||
function createMockEventHistoryService() {
|
||||
return {
|
||||
storeEvent: vi.fn().mockResolvedValue({ id: 'test-event-id' }),
|
||||
} as unknown as EventHistoryService;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a mock FeatureLoader
|
||||
*/
|
||||
function createMockFeatureLoader(features: Record<string, { title: string }> = {}) {
|
||||
return {
|
||||
get: vi.fn().mockImplementation((_projectPath: string, featureId: string) => {
|
||||
return Promise.resolve(features[featureId] || null);
|
||||
}),
|
||||
} as unknown as FeatureLoader;
|
||||
}
|
||||
|
||||
describe('EventHookService', () => {
|
||||
let service: EventHookService;
|
||||
let mockEmitter: ReturnType<typeof createMockEventEmitter>;
|
||||
let mockSettingsService: ReturnType<typeof createMockSettingsService>;
|
||||
let mockEventHistoryService: ReturnType<typeof createMockEventHistoryService>;
|
||||
let mockFeatureLoader: ReturnType<typeof createMockFeatureLoader>;
|
||||
|
||||
beforeEach(() => {
|
||||
service = new EventHookService();
|
||||
mockEmitter = createMockEventEmitter();
|
||||
mockSettingsService = createMockSettingsService();
|
||||
mockEventHistoryService = createMockEventHistoryService();
|
||||
mockFeatureLoader = createMockFeatureLoader();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
service.destroy();
|
||||
});
|
||||
|
||||
describe('initialize', () => {
|
||||
it('should subscribe to the event emitter', () => {
|
||||
service.initialize(mockEmitter, mockSettingsService, mockEventHistoryService);
|
||||
expect(mockEmitter.subscribers.size).toBe(1);
|
||||
});
|
||||
|
||||
it('should log initialization', () => {
|
||||
service.initialize(mockEmitter, mockSettingsService);
|
||||
expect(mockEmitter.subscribers.size).toBe(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('destroy', () => {
|
||||
it('should unsubscribe from the event emitter', () => {
|
||||
service.initialize(mockEmitter, mockSettingsService);
|
||||
expect(mockEmitter.subscribers.size).toBe(1);
|
||||
|
||||
service.destroy();
|
||||
expect(mockEmitter.subscribers.size).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('event mapping - auto_mode_feature_complete', () => {
|
||||
it('should map to feature_success when passes is true', async () => {
|
||||
service.initialize(
|
||||
mockEmitter,
|
||||
mockSettingsService,
|
||||
mockEventHistoryService,
|
||||
mockFeatureLoader
|
||||
);
|
||||
|
||||
mockEmitter.simulateEvent('auto-mode:event', {
|
||||
type: 'auto_mode_feature_complete',
|
||||
executionMode: 'auto',
|
||||
featureId: 'feat-1',
|
||||
featureName: 'Test Feature',
|
||||
passes: true,
|
||||
message: 'Feature completed in 30s',
|
||||
projectPath: '/test/project',
|
||||
});
|
||||
|
||||
// Allow async processing
|
||||
await vi.waitFor(() => {
|
||||
expect(mockEventHistoryService.storeEvent).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
const storeCall = (mockEventHistoryService.storeEvent as ReturnType<typeof vi.fn>).mock
|
||||
.calls[0][0];
|
||||
expect(storeCall.trigger).toBe('feature_success');
|
||||
expect(storeCall.passes).toBe(true);
|
||||
});
|
||||
|
||||
it('should map to feature_error when passes is false', async () => {
|
||||
service.initialize(
|
||||
mockEmitter,
|
||||
mockSettingsService,
|
||||
mockEventHistoryService,
|
||||
mockFeatureLoader
|
||||
);
|
||||
|
||||
mockEmitter.simulateEvent('auto-mode:event', {
|
||||
type: 'auto_mode_feature_complete',
|
||||
executionMode: 'auto',
|
||||
featureId: 'feat-1',
|
||||
featureName: 'Test Feature',
|
||||
passes: false,
|
||||
message: 'Feature stopped by user',
|
||||
projectPath: '/test/project',
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockEventHistoryService.storeEvent).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
const storeCall = (mockEventHistoryService.storeEvent as ReturnType<typeof vi.fn>).mock
|
||||
.calls[0][0];
|
||||
expect(storeCall.trigger).toBe('feature_error');
|
||||
expect(storeCall.passes).toBe(false);
|
||||
});
|
||||
|
||||
it('should NOT populate error field for successful feature completion', async () => {
|
||||
service.initialize(
|
||||
mockEmitter,
|
||||
mockSettingsService,
|
||||
mockEventHistoryService,
|
||||
mockFeatureLoader
|
||||
);
|
||||
|
||||
mockEmitter.simulateEvent('auto-mode:event', {
|
||||
type: 'auto_mode_feature_complete',
|
||||
executionMode: 'auto',
|
||||
featureId: 'feat-1',
|
||||
featureName: 'Test Feature',
|
||||
passes: true,
|
||||
message: 'Feature completed in 30s - auto-verified',
|
||||
projectPath: '/test/project',
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockEventHistoryService.storeEvent).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
const storeCall = (mockEventHistoryService.storeEvent as ReturnType<typeof vi.fn>).mock
|
||||
.calls[0][0];
|
||||
expect(storeCall.trigger).toBe('feature_success');
|
||||
// Critical: error should NOT contain the success message
|
||||
expect(storeCall.error).toBeUndefined();
|
||||
expect(storeCall.errorType).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should populate error field for failed feature completion', async () => {
|
||||
service.initialize(
|
||||
mockEmitter,
|
||||
mockSettingsService,
|
||||
mockEventHistoryService,
|
||||
mockFeatureLoader
|
||||
);
|
||||
|
||||
mockEmitter.simulateEvent('auto-mode:event', {
|
||||
type: 'auto_mode_feature_complete',
|
||||
executionMode: 'auto',
|
||||
featureId: 'feat-1',
|
||||
featureName: 'Test Feature',
|
||||
passes: false,
|
||||
message: 'Feature stopped by user',
|
||||
projectPath: '/test/project',
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockEventHistoryService.storeEvent).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
const storeCall = (mockEventHistoryService.storeEvent as ReturnType<typeof vi.fn>).mock
|
||||
.calls[0][0];
|
||||
expect(storeCall.trigger).toBe('feature_error');
|
||||
// Error field should be populated for error triggers
|
||||
expect(storeCall.error).toBe('Feature stopped by user');
|
||||
});
|
||||
|
||||
it('should ignore feature complete events without explicit auto execution mode', async () => {
|
||||
service.initialize(
|
||||
mockEmitter,
|
||||
mockSettingsService,
|
||||
mockEventHistoryService,
|
||||
mockFeatureLoader
|
||||
);
|
||||
|
||||
mockEmitter.simulateEvent('auto-mode:event', {
|
||||
type: 'auto_mode_feature_complete',
|
||||
featureId: 'feat-1',
|
||||
featureName: 'Manual Feature',
|
||||
passes: true,
|
||||
message: 'Manually verified',
|
||||
projectPath: '/test/project',
|
||||
});
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
expect(mockEventHistoryService.storeEvent).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('event mapping - feature:completed', () => {
|
||||
it('should map manual completion to feature_success', async () => {
|
||||
service.initialize(
|
||||
mockEmitter,
|
||||
mockSettingsService,
|
||||
mockEventHistoryService,
|
||||
mockFeatureLoader
|
||||
);
|
||||
|
||||
mockEmitter.simulateEvent('feature:completed', {
|
||||
featureId: 'feat-1',
|
||||
featureName: 'Manual Feature',
|
||||
projectPath: '/test/project',
|
||||
passes: true,
|
||||
executionMode: 'manual',
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockEventHistoryService.storeEvent).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
const storeCall = (mockEventHistoryService.storeEvent as ReturnType<typeof vi.fn>).mock
|
||||
.calls[0][0];
|
||||
expect(storeCall.trigger).toBe('feature_success');
|
||||
expect(storeCall.passes).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('event mapping - auto_mode_error', () => {
|
||||
it('should map to feature_error when featureId is present', async () => {
|
||||
service.initialize(
|
||||
mockEmitter,
|
||||
mockSettingsService,
|
||||
mockEventHistoryService,
|
||||
mockFeatureLoader
|
||||
);
|
||||
|
||||
mockEmitter.simulateEvent('auto-mode:event', {
|
||||
type: 'auto_mode_error',
|
||||
featureId: 'feat-1',
|
||||
error: 'Network timeout',
|
||||
errorType: 'network',
|
||||
projectPath: '/test/project',
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockEventHistoryService.storeEvent).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
const storeCall = (mockEventHistoryService.storeEvent as ReturnType<typeof vi.fn>).mock
|
||||
.calls[0][0];
|
||||
expect(storeCall.trigger).toBe('feature_error');
|
||||
expect(storeCall.error).toBe('Network timeout');
|
||||
expect(storeCall.errorType).toBe('network');
|
||||
});
|
||||
|
||||
it('should map to auto_mode_error when featureId is not present', async () => {
|
||||
service.initialize(
|
||||
mockEmitter,
|
||||
mockSettingsService,
|
||||
mockEventHistoryService,
|
||||
mockFeatureLoader
|
||||
);
|
||||
|
||||
mockEmitter.simulateEvent('auto-mode:event', {
|
||||
type: 'auto_mode_error',
|
||||
error: 'System error',
|
||||
errorType: 'system',
|
||||
projectPath: '/test/project',
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockEventHistoryService.storeEvent).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
const storeCall = (mockEventHistoryService.storeEvent as ReturnType<typeof vi.fn>).mock
|
||||
.calls[0][0];
|
||||
expect(storeCall.trigger).toBe('auto_mode_error');
|
||||
expect(storeCall.error).toBe('System error');
|
||||
expect(storeCall.errorType).toBe('system');
|
||||
});
|
||||
});
|
||||
|
||||
describe('event mapping - auto_mode_idle', () => {
|
||||
it('should map to auto_mode_complete', async () => {
|
||||
service.initialize(
|
||||
mockEmitter,
|
||||
mockSettingsService,
|
||||
mockEventHistoryService,
|
||||
mockFeatureLoader
|
||||
);
|
||||
|
||||
mockEmitter.simulateEvent('auto-mode:event', {
|
||||
type: 'auto_mode_idle',
|
||||
projectPath: '/test/project',
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockEventHistoryService.storeEvent).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
const storeCall = (mockEventHistoryService.storeEvent as ReturnType<typeof vi.fn>).mock
|
||||
.calls[0][0];
|
||||
expect(storeCall.trigger).toBe('auto_mode_complete');
|
||||
});
|
||||
});
|
||||
|
||||
describe('event mapping - feature:created', () => {
|
||||
it('should trigger feature_created hook', async () => {
|
||||
service.initialize(
|
||||
mockEmitter,
|
||||
mockSettingsService,
|
||||
mockEventHistoryService,
|
||||
mockFeatureLoader
|
||||
);
|
||||
|
||||
mockEmitter.simulateEvent('feature:created', {
|
||||
featureId: 'feat-1',
|
||||
featureName: 'New Feature',
|
||||
projectPath: '/test/project',
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockEventHistoryService.storeEvent).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
const storeCall = (mockEventHistoryService.storeEvent as ReturnType<typeof vi.fn>).mock
|
||||
.calls[0][0];
|
||||
expect(storeCall.trigger).toBe('feature_created');
|
||||
expect(storeCall.featureId).toBe('feat-1');
|
||||
});
|
||||
});
|
||||
|
||||
describe('event mapping - unhandled events', () => {
|
||||
it('should ignore auto-mode events with unrecognized types', async () => {
|
||||
service.initialize(
|
||||
mockEmitter,
|
||||
mockSettingsService,
|
||||
mockEventHistoryService,
|
||||
mockFeatureLoader
|
||||
);
|
||||
|
||||
mockEmitter.simulateEvent('auto-mode:event', {
|
||||
type: 'auto_mode_progress',
|
||||
featureId: 'feat-1',
|
||||
content: 'Working...',
|
||||
projectPath: '/test/project',
|
||||
});
|
||||
|
||||
// Give it time to process
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
|
||||
expect(mockEventHistoryService.storeEvent).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should ignore events without a type', async () => {
|
||||
service.initialize(
|
||||
mockEmitter,
|
||||
mockSettingsService,
|
||||
mockEventHistoryService,
|
||||
mockFeatureLoader
|
||||
);
|
||||
|
||||
mockEmitter.simulateEvent('auto-mode:event', {
|
||||
featureId: 'feat-1',
|
||||
projectPath: '/test/project',
|
||||
});
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
|
||||
expect(mockEventHistoryService.storeEvent).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('hook execution', () => {
|
||||
it('should execute matching enabled hooks for feature_success', async () => {
|
||||
const hooks = [
|
||||
{
|
||||
id: 'hook-1',
|
||||
enabled: true,
|
||||
trigger: 'feature_success',
|
||||
name: 'Success Hook',
|
||||
action: {
|
||||
type: 'shell',
|
||||
command: 'echo "success"',
|
||||
},
|
||||
},
|
||||
{
|
||||
id: 'hook-2',
|
||||
enabled: true,
|
||||
trigger: 'feature_error',
|
||||
name: 'Error Hook',
|
||||
action: {
|
||||
type: 'shell',
|
||||
command: 'echo "error"',
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
mockSettingsService = createMockSettingsService(hooks);
|
||||
service.initialize(
|
||||
mockEmitter,
|
||||
mockSettingsService,
|
||||
mockEventHistoryService,
|
||||
mockFeatureLoader
|
||||
);
|
||||
|
||||
mockEmitter.simulateEvent('auto-mode:event', {
|
||||
type: 'auto_mode_feature_complete',
|
||||
executionMode: 'auto',
|
||||
featureId: 'feat-1',
|
||||
featureName: 'Test Feature',
|
||||
passes: true,
|
||||
message: 'Feature completed in 30s',
|
||||
projectPath: '/test/project',
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockSettingsService.getGlobalSettings).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
// The error hook should NOT have been triggered for a success event
|
||||
const storeCall = (mockEventHistoryService.storeEvent as ReturnType<typeof vi.fn>).mock
|
||||
.calls[0][0];
|
||||
expect(storeCall.trigger).toBe('feature_success');
|
||||
});
|
||||
|
||||
it('should NOT execute error hooks when feature completes successfully', async () => {
|
||||
// This is the key regression test for the bug:
|
||||
// "Error event hook fired when a feature completes successfully"
|
||||
const hooks = [
|
||||
{
|
||||
id: 'hook-error',
|
||||
enabled: true,
|
||||
trigger: 'feature_error',
|
||||
name: 'Error Notification',
|
||||
action: {
|
||||
type: 'shell',
|
||||
command: 'echo "ERROR FIRED"',
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
mockSettingsService = createMockSettingsService(hooks);
|
||||
service.initialize(
|
||||
mockEmitter,
|
||||
mockSettingsService,
|
||||
mockEventHistoryService,
|
||||
mockFeatureLoader
|
||||
);
|
||||
|
||||
mockEmitter.simulateEvent('auto-mode:event', {
|
||||
type: 'auto_mode_feature_complete',
|
||||
executionMode: 'auto',
|
||||
featureId: 'feat-1',
|
||||
featureName: 'Test Feature',
|
||||
passes: true,
|
||||
message: 'Feature completed in 30s - auto-verified',
|
||||
projectPath: '/test/project',
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockEventHistoryService.storeEvent).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
// Verify the trigger was feature_success, not feature_error
|
||||
const storeCall = (mockEventHistoryService.storeEvent as ReturnType<typeof vi.fn>).mock
|
||||
.calls[0][0];
|
||||
expect(storeCall.trigger).toBe('feature_success');
|
||||
// And no error information should be present
|
||||
expect(storeCall.error).toBeUndefined();
|
||||
expect(storeCall.errorType).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('feature name loading', () => {
|
||||
it('should load feature name from feature loader when not in payload', async () => {
|
||||
mockFeatureLoader = createMockFeatureLoader({
|
||||
'feat-1': { title: 'Loaded Feature Title' },
|
||||
});
|
||||
|
||||
service.initialize(
|
||||
mockEmitter,
|
||||
mockSettingsService,
|
||||
mockEventHistoryService,
|
||||
mockFeatureLoader
|
||||
);
|
||||
|
||||
mockEmitter.simulateEvent('auto-mode:event', {
|
||||
type: 'auto_mode_feature_complete',
|
||||
executionMode: 'auto',
|
||||
featureId: 'feat-1',
|
||||
passes: true,
|
||||
message: 'Done',
|
||||
projectPath: '/test/project',
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockEventHistoryService.storeEvent).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
const storeCall = (mockEventHistoryService.storeEvent as ReturnType<typeof vi.fn>).mock
|
||||
.calls[0][0];
|
||||
expect(storeCall.featureName).toBe('Loaded Feature Title');
|
||||
});
|
||||
|
||||
it('should fall back to payload featureName when loader fails', async () => {
|
||||
mockFeatureLoader = createMockFeatureLoader({}); // Empty - no features found
|
||||
|
||||
service.initialize(
|
||||
mockEmitter,
|
||||
mockSettingsService,
|
||||
mockEventHistoryService,
|
||||
mockFeatureLoader
|
||||
);
|
||||
|
||||
mockEmitter.simulateEvent('auto-mode:event', {
|
||||
type: 'auto_mode_feature_complete',
|
||||
executionMode: 'auto',
|
||||
featureId: 'feat-1',
|
||||
featureName: 'Fallback Name',
|
||||
passes: true,
|
||||
message: 'Done',
|
||||
projectPath: '/test/project',
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockEventHistoryService.storeEvent).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
const storeCall = (mockEventHistoryService.storeEvent as ReturnType<typeof vi.fn>).mock
|
||||
.calls[0][0];
|
||||
expect(storeCall.featureName).toBe('Fallback Name');
|
||||
});
|
||||
});
|
||||
|
||||
describe('event mapping - feature_status_changed (non-auto-mode completion)', () => {
|
||||
it('should trigger feature_success when status changes to verified', async () => {
|
||||
mockFeatureLoader = createMockFeatureLoader({
|
||||
'feat-1': { title: 'Manual Feature' },
|
||||
});
|
||||
|
||||
service.initialize(
|
||||
mockEmitter,
|
||||
mockSettingsService,
|
||||
mockEventHistoryService,
|
||||
mockFeatureLoader
|
||||
);
|
||||
|
||||
mockEmitter.simulateEvent('auto-mode:event', {
|
||||
type: 'feature_status_changed',
|
||||
featureId: 'feat-1',
|
||||
projectPath: '/test/project',
|
||||
status: 'verified',
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockEventHistoryService.storeEvent).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
const storeCall = (mockEventHistoryService.storeEvent as ReturnType<typeof vi.fn>).mock
|
||||
.calls[0][0];
|
||||
expect(storeCall.trigger).toBe('feature_success');
|
||||
expect(storeCall.featureName).toBe('Manual Feature');
|
||||
expect(storeCall.passes).toBe(true);
|
||||
});
|
||||
|
||||
it('should trigger feature_success when status changes to waiting_approval', async () => {
|
||||
mockFeatureLoader = createMockFeatureLoader({
|
||||
'feat-1': { title: 'Manual Feature' },
|
||||
});
|
||||
|
||||
service.initialize(
|
||||
mockEmitter,
|
||||
mockSettingsService,
|
||||
mockEventHistoryService,
|
||||
mockFeatureLoader
|
||||
);
|
||||
|
||||
mockEmitter.simulateEvent('auto-mode:event', {
|
||||
type: 'feature_status_changed',
|
||||
featureId: 'feat-1',
|
||||
projectPath: '/test/project',
|
||||
status: 'waiting_approval',
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockEventHistoryService.storeEvent).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
const storeCall = (mockEventHistoryService.storeEvent as ReturnType<typeof vi.fn>).mock
|
||||
.calls[0][0];
|
||||
expect(storeCall.trigger).toBe('feature_success');
|
||||
expect(storeCall.passes).toBe(true);
|
||||
expect(storeCall.featureName).toBe('Manual Feature');
|
||||
});
|
||||
|
||||
it('should NOT trigger hooks for non-completion status changes', async () => {
|
||||
service.initialize(
|
||||
mockEmitter,
|
||||
mockSettingsService,
|
||||
mockEventHistoryService,
|
||||
mockFeatureLoader
|
||||
);
|
||||
|
||||
mockEmitter.simulateEvent('auto-mode:event', {
|
||||
type: 'feature_status_changed',
|
||||
featureId: 'feat-1',
|
||||
projectPath: '/test/project',
|
||||
status: 'in_progress',
|
||||
});
|
||||
|
||||
// Give it time to process
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
|
||||
expect(mockEventHistoryService.storeEvent).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should NOT double-fire hooks when auto_mode_feature_complete already fired', async () => {
|
||||
service.initialize(
|
||||
mockEmitter,
|
||||
mockSettingsService,
|
||||
mockEventHistoryService,
|
||||
mockFeatureLoader
|
||||
);
|
||||
|
||||
// First: auto_mode_feature_complete fires (auto-mode path)
|
||||
mockEmitter.simulateEvent('auto-mode:event', {
|
||||
type: 'auto_mode_feature_complete',
|
||||
executionMode: 'auto',
|
||||
featureId: 'feat-1',
|
||||
featureName: 'Auto Feature',
|
||||
passes: true,
|
||||
message: 'Feature completed',
|
||||
projectPath: '/test/project',
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockEventHistoryService.storeEvent).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
// Then: feature_status_changed fires for the same feature
|
||||
mockEmitter.simulateEvent('auto-mode:event', {
|
||||
type: 'feature_status_changed',
|
||||
featureId: 'feat-1',
|
||||
projectPath: '/test/project',
|
||||
status: 'verified',
|
||||
});
|
||||
|
||||
// Give it time to process
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
|
||||
// Should still only have been called once (from auto_mode_feature_complete)
|
||||
expect(mockEventHistoryService.storeEvent).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should NOT double-fire hooks when auto_mode_error already fired for feature', async () => {
|
||||
service.initialize(
|
||||
mockEmitter,
|
||||
mockSettingsService,
|
||||
mockEventHistoryService,
|
||||
mockFeatureLoader
|
||||
);
|
||||
|
||||
// First: auto_mode_error fires for a feature
|
||||
mockEmitter.simulateEvent('auto-mode:event', {
|
||||
type: 'auto_mode_error',
|
||||
featureId: 'feat-1',
|
||||
error: 'Something failed',
|
||||
errorType: 'execution',
|
||||
projectPath: '/test/project',
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockEventHistoryService.storeEvent).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
// Then: feature_status_changed fires for the same feature (e.g., reset to backlog)
|
||||
mockEmitter.simulateEvent('auto-mode:event', {
|
||||
type: 'feature_status_changed',
|
||||
featureId: 'feat-1',
|
||||
projectPath: '/test/project',
|
||||
status: 'verified', // unlikely after error, but tests the dedup
|
||||
});
|
||||
|
||||
// Give it time to process
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
|
||||
// Should still only have been called once
|
||||
expect(mockEventHistoryService.storeEvent).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should fire hooks for different features independently', async () => {
|
||||
service.initialize(
|
||||
mockEmitter,
|
||||
mockSettingsService,
|
||||
mockEventHistoryService,
|
||||
mockFeatureLoader
|
||||
);
|
||||
|
||||
// Auto-mode completion for feat-1
|
||||
mockEmitter.simulateEvent('auto-mode:event', {
|
||||
type: 'auto_mode_feature_complete',
|
||||
executionMode: 'auto',
|
||||
featureId: 'feat-1',
|
||||
passes: true,
|
||||
message: 'Done',
|
||||
projectPath: '/test/project',
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockEventHistoryService.storeEvent).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
// Manual completion for feat-2 (different feature)
|
||||
mockEmitter.simulateEvent('auto-mode:event', {
|
||||
type: 'feature_status_changed',
|
||||
featureId: 'feat-2',
|
||||
projectPath: '/test/project',
|
||||
status: 'verified',
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockEventHistoryService.storeEvent).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
|
||||
// feat-2 should have triggered feature_success
|
||||
const secondCall = (mockEventHistoryService.storeEvent as ReturnType<typeof vi.fn>).mock
|
||||
.calls[1][0];
|
||||
expect(secondCall.trigger).toBe('feature_success');
|
||||
expect(secondCall.featureId).toBe('feat-2');
|
||||
});
|
||||
});
|
||||
|
||||
describe('error context for error events', () => {
|
||||
it('should use payload.error when available for error triggers', async () => {
|
||||
service.initialize(
|
||||
mockEmitter,
|
||||
mockSettingsService,
|
||||
mockEventHistoryService,
|
||||
mockFeatureLoader
|
||||
);
|
||||
|
||||
mockEmitter.simulateEvent('auto-mode:event', {
|
||||
type: 'auto_mode_error',
|
||||
featureId: 'feat-1',
|
||||
error: 'Authentication failed',
|
||||
errorType: 'auth',
|
||||
projectPath: '/test/project',
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockEventHistoryService.storeEvent).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
const storeCall = (mockEventHistoryService.storeEvent as ReturnType<typeof vi.fn>).mock
|
||||
.calls[0][0];
|
||||
expect(storeCall.error).toBe('Authentication failed');
|
||||
expect(storeCall.errorType).toBe('auth');
|
||||
});
|
||||
|
||||
it('should fall back to payload.message for error field in error triggers', async () => {
|
||||
service.initialize(
|
||||
mockEmitter,
|
||||
mockSettingsService,
|
||||
mockEventHistoryService,
|
||||
mockFeatureLoader
|
||||
);
|
||||
|
||||
mockEmitter.simulateEvent('auto-mode:event', {
|
||||
type: 'auto_mode_feature_complete',
|
||||
executionMode: 'auto',
|
||||
featureId: 'feat-1',
|
||||
passes: false,
|
||||
message: 'Feature stopped by user',
|
||||
projectPath: '/test/project',
|
||||
});
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(mockEventHistoryService.storeEvent).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
const storeCall = (mockEventHistoryService.storeEvent as ReturnType<typeof vi.fn>).mock
|
||||
.calls[0][0];
|
||||
expect(storeCall.trigger).toBe('feature_error');
|
||||
expect(storeCall.error).toBe('Feature stopped by user');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -34,6 +34,7 @@ import { getFeatureDir } from '@automaker/platform';
|
||||
import {
|
||||
getPromptCustomization,
|
||||
getAutoLoadClaudeMdSetting,
|
||||
getUseClaudeCodeSystemPromptSetting,
|
||||
filterClaudeMdFromContext,
|
||||
} from '../../../src/lib/settings-helpers.js';
|
||||
import { extractSummary } from '../../../src/services/spec-parser.js';
|
||||
@@ -67,6 +68,7 @@ vi.mock('../../../src/lib/settings-helpers.js', () => ({
|
||||
},
|
||||
}),
|
||||
getAutoLoadClaudeMdSetting: vi.fn().mockResolvedValue(true),
|
||||
getUseClaudeCodeSystemPromptSetting: vi.fn().mockResolvedValue(true),
|
||||
filterClaudeMdFromContext: vi.fn().mockReturnValue('context prompt'),
|
||||
}));
|
||||
|
||||
@@ -175,7 +177,10 @@ describe('execution-service.ts', () => {
|
||||
} as unknown as TypedEventBus;
|
||||
|
||||
mockConcurrencyManager = {
|
||||
acquire: vi.fn().mockImplementation(({ featureId }) => createRunningFeature(featureId)),
|
||||
acquire: vi.fn().mockImplementation(({ featureId, isAutoMode }) => ({
|
||||
...createRunningFeature(featureId),
|
||||
isAutoMode: isAutoMode ?? false,
|
||||
})),
|
||||
release: vi.fn(),
|
||||
getRunningFeature: vi.fn(),
|
||||
isRunning: vi.fn(),
|
||||
@@ -206,7 +211,14 @@ describe('execution-service.ts', () => {
|
||||
});
|
||||
|
||||
// Default mocks for secureFs
|
||||
vi.mocked(secureFs.readFile).mockResolvedValue('Agent output content');
|
||||
// Include tool usage markers to simulate meaningful agent output.
|
||||
// The execution service checks for '🔧 Tool:' markers and minimum
|
||||
// output length to determine if the agent did real work.
|
||||
vi.mocked(secureFs.readFile).mockResolvedValue(
|
||||
'Starting implementation...\n\n🔧 Tool: Read\nInput: {"file_path": "/src/index.ts"}\n\n' +
|
||||
'🔧 Tool: Edit\nInput: {"file_path": "/src/index.ts", "old_string": "foo", "new_string": "bar"}\n\n' +
|
||||
'Implementation complete. Updated the code as requested.'
|
||||
);
|
||||
vi.mocked(secureFs.access).mockResolvedValue(undefined);
|
||||
|
||||
// Re-setup platform mocks
|
||||
@@ -227,6 +239,7 @@ describe('execution-service.ts', () => {
|
||||
},
|
||||
} as Awaited<ReturnType<typeof getPromptCustomization>>);
|
||||
vi.mocked(getAutoLoadClaudeMdSetting).mockResolvedValue(true);
|
||||
vi.mocked(getUseClaudeCodeSystemPromptSetting).mockResolvedValue(true);
|
||||
vi.mocked(filterClaudeMdFromContext).mockReturnValue('context prompt');
|
||||
|
||||
// Re-setup spec-parser mock
|
||||
@@ -550,8 +563,8 @@ describe('execution-service.ts', () => {
|
||||
expect(mockRunAgentFn).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('emits feature_complete event on success', async () => {
|
||||
await service.executeFeature('/test/project', 'feature-1');
|
||||
it('emits feature_complete event on success when isAutoMode is true', async () => {
|
||||
await service.executeFeature('/test/project', 'feature-1', false, true);
|
||||
|
||||
expect(mockEventBus.emitAutoModeEvent).toHaveBeenCalledWith(
|
||||
'auto_mode_feature_complete',
|
||||
@@ -561,6 +574,15 @@ describe('execution-service.ts', () => {
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('does not emit feature_complete event on success when isAutoMode is false', async () => {
|
||||
await service.executeFeature('/test/project', 'feature-1', false, false);
|
||||
|
||||
const completeCalls = vi
|
||||
.mocked(mockEventBus.emitAutoModeEvent)
|
||||
.mock.calls.filter((call) => call[0] === 'auto_mode_feature_complete');
|
||||
expect(completeCalls.length).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('executeFeature - approved plan handling', () => {
|
||||
@@ -1110,7 +1132,7 @@ describe('execution-service.ts', () => {
|
||||
);
|
||||
});
|
||||
|
||||
it('handles abort signal without error event', async () => {
|
||||
it('handles abort signal without error event (emits feature_complete when isAutoMode=true)', async () => {
|
||||
const abortError = new Error('abort');
|
||||
abortError.name = 'AbortError';
|
||||
mockRunAgentFn = vi.fn().mockRejectedValue(abortError);
|
||||
@@ -1136,7 +1158,7 @@ describe('execution-service.ts', () => {
|
||||
mockLoadContextFilesFn
|
||||
);
|
||||
|
||||
await svc.executeFeature('/test/project', 'feature-1');
|
||||
await svc.executeFeature('/test/project', 'feature-1', false, true);
|
||||
|
||||
// Should emit feature_complete with stopped by user
|
||||
expect(mockEventBus.emitAutoModeEvent).toHaveBeenCalledWith(
|
||||
@@ -1155,6 +1177,47 @@ describe('execution-service.ts', () => {
|
||||
expect(errorCalls.length).toBe(0);
|
||||
});
|
||||
|
||||
it('handles abort signal without emitting feature_complete when isAutoMode=false', async () => {
|
||||
const abortError = new Error('abort');
|
||||
abortError.name = 'AbortError';
|
||||
mockRunAgentFn = vi.fn().mockRejectedValue(abortError);
|
||||
|
||||
const svc = new ExecutionService(
|
||||
mockEventBus,
|
||||
mockConcurrencyManager,
|
||||
mockWorktreeResolver,
|
||||
mockSettingsService,
|
||||
mockRunAgentFn,
|
||||
mockExecutePipelineFn,
|
||||
mockUpdateFeatureStatusFn,
|
||||
mockLoadFeatureFn,
|
||||
mockGetPlanningPromptPrefixFn,
|
||||
mockSaveFeatureSummaryFn,
|
||||
mockRecordLearningsFn,
|
||||
mockContextExistsFn,
|
||||
mockResumeFeatureFn,
|
||||
mockTrackFailureFn,
|
||||
mockSignalPauseFn,
|
||||
mockRecordSuccessFn,
|
||||
mockSaveExecutionStateFn,
|
||||
mockLoadContextFilesFn
|
||||
);
|
||||
|
||||
await svc.executeFeature('/test/project', 'feature-1', false, false);
|
||||
|
||||
// Should NOT emit feature_complete when isAutoMode is false
|
||||
const completeCalls = vi
|
||||
.mocked(mockEventBus.emitAutoModeEvent)
|
||||
.mock.calls.filter((call) => call[0] === 'auto_mode_feature_complete');
|
||||
expect(completeCalls.length).toBe(0);
|
||||
|
||||
// Should NOT emit error event (abort is not an error)
|
||||
const errorCalls = vi
|
||||
.mocked(mockEventBus.emitAutoModeEvent)
|
||||
.mock.calls.filter((call) => call[0] === 'auto_mode_error');
|
||||
expect(errorCalls.length).toBe(0);
|
||||
});
|
||||
|
||||
it('releases running feature even on error', async () => {
|
||||
const testError = new Error('Test error');
|
||||
mockRunAgentFn = vi.fn().mockRejectedValue(testError);
|
||||
@@ -1213,6 +1276,34 @@ describe('execution-service.ts', () => {
|
||||
|
||||
expect(mockConcurrencyManager.release).toHaveBeenCalledWith('feature-1', { force: true });
|
||||
});
|
||||
|
||||
it('immediately updates feature status to interrupted before subprocess terminates', async () => {
|
||||
const runningFeature = createRunningFeature('feature-1');
|
||||
vi.mocked(mockConcurrencyManager.getRunningFeature).mockReturnValue(runningFeature);
|
||||
|
||||
await service.stopFeature('feature-1');
|
||||
|
||||
// Should update to 'interrupted' immediately so the UI reflects the stop
|
||||
// without waiting for the CLI subprocess to fully terminate
|
||||
expect(mockUpdateFeatureStatusFn).toHaveBeenCalledWith(
|
||||
'/test/project',
|
||||
'feature-1',
|
||||
'interrupted'
|
||||
);
|
||||
});
|
||||
|
||||
it('still aborts and releases even if status update fails', async () => {
|
||||
const runningFeature = createRunningFeature('feature-1');
|
||||
const abortSpy = vi.spyOn(runningFeature.abortController, 'abort');
|
||||
vi.mocked(mockConcurrencyManager.getRunningFeature).mockReturnValue(runningFeature);
|
||||
vi.mocked(mockUpdateFeatureStatusFn).mockRejectedValueOnce(new Error('disk error'));
|
||||
|
||||
const result = await service.stopFeature('feature-1');
|
||||
|
||||
expect(result).toBe(true);
|
||||
expect(abortSpy).toHaveBeenCalled();
|
||||
expect(mockConcurrencyManager.release).toHaveBeenCalledWith('feature-1', { force: true });
|
||||
});
|
||||
});
|
||||
|
||||
describe('worktree resolution', () => {
|
||||
@@ -1339,8 +1430,8 @@ describe('execution-service.ts', () => {
|
||||
it('handles missing agent output gracefully', async () => {
|
||||
vi.mocked(secureFs.readFile).mockRejectedValue(new Error('ENOENT'));
|
||||
|
||||
// Should not throw
|
||||
await service.executeFeature('/test/project', 'feature-1');
|
||||
// Should not throw (isAutoMode=true so event is emitted)
|
||||
await service.executeFeature('/test/project', 'feature-1', false, true);
|
||||
|
||||
// Feature should still complete successfully
|
||||
expect(mockEventBus.emitAutoModeEvent).toHaveBeenCalledWith(
|
||||
@@ -1349,4 +1440,439 @@ describe('execution-service.ts', () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('executeFeature - agent output validation', () => {
|
||||
// Helper to generate realistic agent output with tool markers
|
||||
const makeAgentOutput = (toolCount: number, extraText = ''): string => {
|
||||
let output = 'Starting implementation...\n\n';
|
||||
for (let i = 0; i < toolCount; i++) {
|
||||
output += `🔧 Tool: Edit\nInput: {"file_path": "/src/file${i}.ts", "old_string": "old${i}", "new_string": "new${i}"}\n\n`;
|
||||
}
|
||||
output += `Implementation complete. ${extraText}`;
|
||||
return output;
|
||||
};
|
||||
|
||||
const createServiceWithMocks = () => {
|
||||
return new ExecutionService(
|
||||
mockEventBus,
|
||||
mockConcurrencyManager,
|
||||
mockWorktreeResolver,
|
||||
mockSettingsService,
|
||||
mockRunAgentFn,
|
||||
mockExecutePipelineFn,
|
||||
mockUpdateFeatureStatusFn,
|
||||
mockLoadFeatureFn,
|
||||
mockGetPlanningPromptPrefixFn,
|
||||
mockSaveFeatureSummaryFn,
|
||||
mockRecordLearningsFn,
|
||||
mockContextExistsFn,
|
||||
mockResumeFeatureFn,
|
||||
mockTrackFailureFn,
|
||||
mockSignalPauseFn,
|
||||
mockRecordSuccessFn,
|
||||
mockSaveExecutionStateFn,
|
||||
mockLoadContextFilesFn
|
||||
);
|
||||
};
|
||||
|
||||
it('sets verified when agent output has tool usage and sufficient length', async () => {
|
||||
const output = makeAgentOutput(3, 'Updated authentication module with new login flow.');
|
||||
vi.mocked(secureFs.readFile).mockResolvedValue(output);
|
||||
|
||||
await service.executeFeature('/test/project', 'feature-1');
|
||||
|
||||
expect(mockUpdateFeatureStatusFn).toHaveBeenCalledWith(
|
||||
'/test/project',
|
||||
'feature-1',
|
||||
'verified'
|
||||
);
|
||||
});
|
||||
|
||||
it('sets waiting_approval when agent output is empty', async () => {
|
||||
vi.mocked(secureFs.readFile).mockResolvedValue('');
|
||||
|
||||
const svc = createServiceWithMocks();
|
||||
await svc.executeFeature('/test/project', 'feature-1');
|
||||
|
||||
expect(mockUpdateFeatureStatusFn).toHaveBeenCalledWith(
|
||||
'/test/project',
|
||||
'feature-1',
|
||||
'waiting_approval'
|
||||
);
|
||||
});
|
||||
|
||||
it('sets waiting_approval when agent output has no tool usage markers', async () => {
|
||||
// Long output but no tool markers - agent printed text but didn't use tools
|
||||
const longOutputNoTools = 'I analyzed the codebase and found several issues. '.repeat(20);
|
||||
vi.mocked(secureFs.readFile).mockResolvedValue(longOutputNoTools);
|
||||
|
||||
const svc = createServiceWithMocks();
|
||||
await svc.executeFeature('/test/project', 'feature-1');
|
||||
|
||||
expect(mockUpdateFeatureStatusFn).toHaveBeenCalledWith(
|
||||
'/test/project',
|
||||
'feature-1',
|
||||
'waiting_approval'
|
||||
);
|
||||
});
|
||||
|
||||
it('sets waiting_approval when agent output has tool markers but is too short', async () => {
|
||||
// Has a tool marker but total output is under 200 chars
|
||||
const shortWithTool = '🔧 Tool: Read\nInput: {"file_path": "/src/index.ts"}\nDone.';
|
||||
expect(shortWithTool.trim().length).toBeLessThan(200);
|
||||
|
||||
vi.mocked(secureFs.readFile).mockResolvedValue(shortWithTool);
|
||||
|
||||
const svc = createServiceWithMocks();
|
||||
await svc.executeFeature('/test/project', 'feature-1');
|
||||
|
||||
expect(mockUpdateFeatureStatusFn).toHaveBeenCalledWith(
|
||||
'/test/project',
|
||||
'feature-1',
|
||||
'waiting_approval'
|
||||
);
|
||||
});
|
||||
|
||||
it('sets waiting_approval when agent output file is missing (ENOENT)', async () => {
|
||||
vi.mocked(secureFs.readFile).mockRejectedValue(new Error('ENOENT'));
|
||||
|
||||
const svc = createServiceWithMocks();
|
||||
await svc.executeFeature('/test/project', 'feature-1');
|
||||
|
||||
expect(mockUpdateFeatureStatusFn).toHaveBeenCalledWith(
|
||||
'/test/project',
|
||||
'feature-1',
|
||||
'waiting_approval'
|
||||
);
|
||||
});
|
||||
|
||||
it('sets waiting_approval when agent output is only whitespace', async () => {
|
||||
vi.mocked(secureFs.readFile).mockResolvedValue(' \n\n\t \n ');
|
||||
|
||||
const svc = createServiceWithMocks();
|
||||
await svc.executeFeature('/test/project', 'feature-1');
|
||||
|
||||
expect(mockUpdateFeatureStatusFn).toHaveBeenCalledWith(
|
||||
'/test/project',
|
||||
'feature-1',
|
||||
'waiting_approval'
|
||||
);
|
||||
});
|
||||
|
||||
it('sets verified when output is exactly at the 200 char threshold with tool usage', async () => {
|
||||
// Create output that's exactly 200 chars trimmed with tool markers
|
||||
const toolMarker = '🔧 Tool: Edit\nInput: {"file_path": "/src/index.ts"}\n';
|
||||
const padding = 'x'.repeat(200 - toolMarker.length);
|
||||
const output = toolMarker + padding;
|
||||
expect(output.trim().length).toBeGreaterThanOrEqual(200);
|
||||
|
||||
vi.mocked(secureFs.readFile).mockResolvedValue(output);
|
||||
|
||||
const svc = createServiceWithMocks();
|
||||
await svc.executeFeature('/test/project', 'feature-1');
|
||||
|
||||
expect(mockUpdateFeatureStatusFn).toHaveBeenCalledWith(
|
||||
'/test/project',
|
||||
'feature-1',
|
||||
'verified'
|
||||
);
|
||||
});
|
||||
|
||||
it('sets waiting_approval when output is 199 chars with tool usage (below threshold)', async () => {
|
||||
const toolMarker = '🔧 Tool: Read\n';
|
||||
const padding = 'x'.repeat(199 - toolMarker.length);
|
||||
const output = toolMarker + padding;
|
||||
expect(output.trim().length).toBe(199);
|
||||
|
||||
vi.mocked(secureFs.readFile).mockResolvedValue(output);
|
||||
|
||||
const svc = createServiceWithMocks();
|
||||
await svc.executeFeature('/test/project', 'feature-1');
|
||||
|
||||
expect(mockUpdateFeatureStatusFn).toHaveBeenCalledWith(
|
||||
'/test/project',
|
||||
'feature-1',
|
||||
'waiting_approval'
|
||||
);
|
||||
});
|
||||
|
||||
it('skipTests always takes priority over output validation', async () => {
|
||||
// Meaningful output with tool usage - would normally be 'verified'
|
||||
const output = makeAgentOutput(5, 'All changes applied successfully.');
|
||||
vi.mocked(secureFs.readFile).mockResolvedValue(output);
|
||||
|
||||
mockLoadFeatureFn = vi.fn().mockResolvedValue({ ...testFeature, skipTests: true });
|
||||
const svc = createServiceWithMocks();
|
||||
|
||||
await svc.executeFeature('/test/project', 'feature-1');
|
||||
|
||||
// skipTests=true always means waiting_approval regardless of output quality
|
||||
expect(mockUpdateFeatureStatusFn).toHaveBeenCalledWith(
|
||||
'/test/project',
|
||||
'feature-1',
|
||||
'waiting_approval'
|
||||
);
|
||||
});
|
||||
|
||||
it('skipTests with empty output still results in waiting_approval', async () => {
|
||||
vi.mocked(secureFs.readFile).mockResolvedValue('');
|
||||
|
||||
mockLoadFeatureFn = vi.fn().mockResolvedValue({ ...testFeature, skipTests: true });
|
||||
const svc = createServiceWithMocks();
|
||||
|
||||
await svc.executeFeature('/test/project', 'feature-1');
|
||||
|
||||
expect(mockUpdateFeatureStatusFn).toHaveBeenCalledWith(
|
||||
'/test/project',
|
||||
'feature-1',
|
||||
'waiting_approval'
|
||||
);
|
||||
});
|
||||
|
||||
it('still records success even when output validation fails', async () => {
|
||||
vi.mocked(secureFs.readFile).mockResolvedValue('');
|
||||
|
||||
const svc = createServiceWithMocks();
|
||||
await svc.executeFeature('/test/project', 'feature-1');
|
||||
|
||||
// recordSuccess should still be called - the agent ran without errors
|
||||
expect(mockRecordSuccessFn).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('still extracts summary when output has content but no tool markers', async () => {
|
||||
const outputNoTools = 'A '.repeat(150); // > 200 chars but no tool markers
|
||||
vi.mocked(secureFs.readFile).mockResolvedValue(outputNoTools);
|
||||
|
||||
const svc = createServiceWithMocks();
|
||||
await svc.executeFeature('/test/project', 'feature-1');
|
||||
|
||||
// Summary extraction still runs even though status is waiting_approval
|
||||
expect(extractSummary).toHaveBeenCalledWith(outputNoTools);
|
||||
expect(mockSaveFeatureSummaryFn).toHaveBeenCalledWith(
|
||||
'/test/project',
|
||||
'feature-1',
|
||||
'Test summary'
|
||||
);
|
||||
});
|
||||
|
||||
it('emits feature_complete with passes=true even when output validation routes to waiting_approval', async () => {
|
||||
vi.mocked(secureFs.readFile).mockResolvedValue('');
|
||||
|
||||
const svc = createServiceWithMocks();
|
||||
await svc.executeFeature('/test/project', 'feature-1', false, true);
|
||||
|
||||
// The agent ran without error - it's still a "pass" from the execution perspective
|
||||
expect(mockEventBus.emitAutoModeEvent).toHaveBeenCalledWith(
|
||||
'auto_mode_feature_complete',
|
||||
expect.objectContaining({ passes: true })
|
||||
);
|
||||
});
|
||||
|
||||
it('handles realistic Cursor CLI output that exits quickly', async () => {
|
||||
// Simulates a Cursor CLI that prints a brief message and exits
|
||||
const cursorQuickExit = 'Task received. Processing...\nResult: completed successfully.';
|
||||
expect(cursorQuickExit.includes('🔧 Tool:')).toBe(false);
|
||||
|
||||
vi.mocked(secureFs.readFile).mockResolvedValue(cursorQuickExit);
|
||||
|
||||
const svc = createServiceWithMocks();
|
||||
await svc.executeFeature('/test/project', 'feature-1');
|
||||
|
||||
// No tool usage = waiting_approval
|
||||
expect(mockUpdateFeatureStatusFn).toHaveBeenCalledWith(
|
||||
'/test/project',
|
||||
'feature-1',
|
||||
'waiting_approval'
|
||||
);
|
||||
});
|
||||
|
||||
it('handles realistic Claude SDK output with multiple tool uses', async () => {
|
||||
// Simulates a Claude SDK agent that does real work
|
||||
const claudeOutput =
|
||||
"I'll implement the requested feature.\n\n" +
|
||||
'🔧 Tool: Read\nInput: {"file_path": "/src/components/App.tsx"}\n\n' +
|
||||
'I can see the existing component structure. Let me modify it.\n\n' +
|
||||
'🔧 Tool: Edit\nInput: {"file_path": "/src/components/App.tsx", "old_string": "const App = () => {", "new_string": "const App: React.FC = () => {"}\n\n' +
|
||||
'🔧 Tool: Write\nInput: {"file_path": "/src/components/NewFeature.tsx"}\n\n' +
|
||||
"I've created the new component and updated the existing one. The feature is now implemented with proper TypeScript types.";
|
||||
|
||||
vi.mocked(secureFs.readFile).mockResolvedValue(claudeOutput);
|
||||
|
||||
const svc = createServiceWithMocks();
|
||||
await svc.executeFeature('/test/project', 'feature-1');
|
||||
|
||||
// Real work = verified
|
||||
expect(mockUpdateFeatureStatusFn).toHaveBeenCalledWith(
|
||||
'/test/project',
|
||||
'feature-1',
|
||||
'verified'
|
||||
);
|
||||
});
|
||||
|
||||
it('reads agent output from the correct path with utf-8 encoding', async () => {
|
||||
const output = makeAgentOutput(2, 'Done with changes.');
|
||||
vi.mocked(secureFs.readFile).mockResolvedValue(output);
|
||||
|
||||
const svc = createServiceWithMocks();
|
||||
await svc.executeFeature('/test/project', 'feature-1');
|
||||
|
||||
// Verify readFile was called with the correct path derived from getFeatureDir
|
||||
expect(secureFs.readFile).toHaveBeenCalledWith(
|
||||
'/test/project/.automaker/features/feature-1/agent-output.md',
|
||||
'utf-8'
|
||||
);
|
||||
});
|
||||
|
||||
it('completion message includes auto-verified when status is verified', async () => {
|
||||
const output = makeAgentOutput(3, 'All changes applied.');
|
||||
vi.mocked(secureFs.readFile).mockResolvedValue(output);
|
||||
|
||||
const svc = createServiceWithMocks();
|
||||
await svc.executeFeature('/test/project', 'feature-1', false, true);
|
||||
|
||||
expect(mockEventBus.emitAutoModeEvent).toHaveBeenCalledWith(
|
||||
'auto_mode_feature_complete',
|
||||
expect.objectContaining({
|
||||
message: expect.stringContaining('auto-verified'),
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('completion message does NOT include auto-verified when status is waiting_approval', async () => {
|
||||
// Empty output → waiting_approval
|
||||
vi.mocked(secureFs.readFile).mockResolvedValue('');
|
||||
|
||||
const svc = createServiceWithMocks();
|
||||
await svc.executeFeature('/test/project', 'feature-1', false, true);
|
||||
|
||||
const completeCall = vi
|
||||
.mocked(mockEventBus.emitAutoModeEvent)
|
||||
.mock.calls.find((call) => call[0] === 'auto_mode_feature_complete');
|
||||
expect(completeCall).toBeDefined();
|
||||
expect((completeCall![1] as { message: string }).message).not.toContain('auto-verified');
|
||||
});
|
||||
|
||||
it('uses same agentOutput for both status determination and summary extraction', async () => {
|
||||
// Specific output that is long enough with tool markers (verified path)
|
||||
// AND has content for summary extraction
|
||||
const specificOutput =
|
||||
'🔧 Tool: Read\nReading file...\n🔧 Tool: Edit\nEditing file...\n' +
|
||||
'The implementation is complete. Here is a detailed description of what was done. '.repeat(
|
||||
3
|
||||
);
|
||||
vi.mocked(secureFs.readFile).mockResolvedValue(specificOutput);
|
||||
|
||||
const svc = createServiceWithMocks();
|
||||
await svc.executeFeature('/test/project', 'feature-1');
|
||||
|
||||
// Status should be verified (has tools + long enough)
|
||||
expect(mockUpdateFeatureStatusFn).toHaveBeenCalledWith(
|
||||
'/test/project',
|
||||
'feature-1',
|
||||
'verified'
|
||||
);
|
||||
// extractSummary should receive the exact same output
|
||||
expect(extractSummary).toHaveBeenCalledWith(specificOutput);
|
||||
// recordLearnings should also receive the same output
|
||||
expect(mockRecordLearningsFn).toHaveBeenCalledWith(
|
||||
'/test/project',
|
||||
testFeature,
|
||||
specificOutput
|
||||
);
|
||||
});
|
||||
|
||||
it('does not call recordMemoryUsage when output is empty and memoryFiles is empty', async () => {
|
||||
vi.mocked(secureFs.readFile).mockResolvedValue('');
|
||||
const { recordMemoryUsage } = await import('@automaker/utils');
|
||||
|
||||
const svc = createServiceWithMocks();
|
||||
await svc.executeFeature('/test/project', 'feature-1');
|
||||
|
||||
// With empty output and empty memoryFiles, recordMemoryUsage should not be called
|
||||
expect(recordMemoryUsage).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('handles output with special unicode characters correctly', async () => {
|
||||
// Output with various unicode but includes tool markers
|
||||
const unicodeOutput =
|
||||
'🔧 Tool: Read\n' +
|
||||
'🔧 Tool: Edit\n' +
|
||||
'Añadiendo función de búsqueda con caracteres especiales: ñ, ü, ö, é, 日本語テスト. ' +
|
||||
'Die Änderungen wurden erfolgreich implementiert. '.repeat(3);
|
||||
vi.mocked(secureFs.readFile).mockResolvedValue(unicodeOutput);
|
||||
|
||||
const svc = createServiceWithMocks();
|
||||
await svc.executeFeature('/test/project', 'feature-1');
|
||||
|
||||
// Should still detect tool markers and sufficient length
|
||||
expect(mockUpdateFeatureStatusFn).toHaveBeenCalledWith(
|
||||
'/test/project',
|
||||
'feature-1',
|
||||
'verified'
|
||||
);
|
||||
});
|
||||
|
||||
it('treats output with only newlines and spaces around tool marker as insufficient', async () => {
|
||||
// Has tool marker but surrounded by whitespace, total trimmed < 200
|
||||
const sparseOutput = '\n\n 🔧 Tool: Read \n\n';
|
||||
expect(sparseOutput.trim().length).toBeLessThan(200);
|
||||
|
||||
vi.mocked(secureFs.readFile).mockResolvedValue(sparseOutput);
|
||||
|
||||
const svc = createServiceWithMocks();
|
||||
await svc.executeFeature('/test/project', 'feature-1');
|
||||
|
||||
expect(mockUpdateFeatureStatusFn).toHaveBeenCalledWith(
|
||||
'/test/project',
|
||||
'feature-1',
|
||||
'waiting_approval'
|
||||
);
|
||||
});
|
||||
|
||||
it('detects tool marker substring correctly (partial match like "🔧 Tools:" does not count)', async () => {
|
||||
// Output with a similar but not exact marker - "🔧 Tools:" instead of "🔧 Tool:"
|
||||
const wrongMarker = '🔧 Tools: Read\n🔧 Tools: Edit\n' + 'Implementation done. '.repeat(20);
|
||||
expect(wrongMarker.includes('🔧 Tool:')).toBe(false);
|
||||
|
||||
vi.mocked(secureFs.readFile).mockResolvedValue(wrongMarker);
|
||||
|
||||
const svc = createServiceWithMocks();
|
||||
await svc.executeFeature('/test/project', 'feature-1');
|
||||
|
||||
// "🔧 Tools:" is not the same as "🔧 Tool:" - should be waiting_approval
|
||||
expect(mockUpdateFeatureStatusFn).toHaveBeenCalledWith(
|
||||
'/test/project',
|
||||
'feature-1',
|
||||
'waiting_approval'
|
||||
);
|
||||
});
|
||||
|
||||
it('pipeline merge_conflict status short-circuits before output validation', async () => {
|
||||
// Set up pipeline that results in merge_conflict
|
||||
vi.mocked(pipelineService.getPipelineConfig).mockResolvedValue({
|
||||
version: 1,
|
||||
steps: [{ id: 'step-1', name: 'Step 1', order: 1, instructions: 'Do step 1' }] as any,
|
||||
});
|
||||
|
||||
// After pipeline, loadFeature returns merge_conflict status
|
||||
let loadCallCount = 0;
|
||||
mockLoadFeatureFn = vi.fn().mockImplementation(() => {
|
||||
loadCallCount++;
|
||||
if (loadCallCount === 1) return testFeature; // initial load
|
||||
// All subsequent loads (task check + pipeline refresh) return merge_conflict
|
||||
return { ...testFeature, status: 'merge_conflict' };
|
||||
});
|
||||
|
||||
const svc = createServiceWithMocks();
|
||||
await svc.executeFeature('/test/project', 'feature-1');
|
||||
|
||||
// Should NOT have called updateFeatureStatusFn with 'verified' or 'waiting_approval'
|
||||
// because pipeline merge_conflict short-circuits the method
|
||||
const statusCalls = vi
|
||||
.mocked(mockUpdateFeatureStatusFn)
|
||||
.mock.calls.filter((call) => call[2] === 'verified' || call[2] === 'waiting_approval');
|
||||
// The only non-in_progress status call should be absent since merge_conflict returns early
|
||||
expect(statusCalls.length).toBe(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user