Feature: worktree view customization and stability fixes (#805)

* Changes from feature/worktree-view-customization

* Feature: Git sync, set-tracking, and push divergence handling (#796)

* Add quick-add feature with improved workflows (#802)

* Changes from feature/quick-add

* feat: Clarify system prompt and improve error handling across services. Address PR Feedback

* feat: Improve PR description parsing and refactor event handling

* feat: Add context options to pipeline orchestrator initialization

* fix: Deduplicate React and handle CJS interop for use-sync-external-store

Resolve "Cannot read properties of null (reading 'useState')" errors by
deduplicating React/react-dom and ensuring use-sync-external-store is
bundled together with React to prevent CJS packages from resolving to
different React instances.

* Changes from feature/worktree-view-customization

* refactor: Remove unused worktree swap and highlight props

* refactor: Consolidate feature completion logic and improve thinking level defaults

* feat: Increase max turn limit to 10000

- Update DEFAULT_MAX_TURNS from 1000 to 10000 in settings-helpers.ts and agent-executor.ts
- Update MAX_ALLOWED_TURNS from 2000 to 10000 in settings-helpers.ts
- Update UI clamping logic from 2000 to 10000 in app-store.ts
- Update fallback values from 1000 to 10000 in use-settings-sync.ts
- Update default value from 1000 to 10000 in DEFAULT_GLOBAL_SETTINGS
- Update documentation to reflect new range: 1-10000

Allows agents to perform up to 10000 turns for complex feature execution.

Co-Authored-By: Claude Haiku 4.5 <noreply@anthropic.com>

* feat: Add model resolution, improve session handling, and enhance UI stability

* refactor: Remove unused sync and tracking branch props from worktree components

* feat: Add PR number update functionality to worktrees. Address pr feedback

* feat: Optimize Gemini CLI startup and add tool result tracking

* refactor: Improve error handling and simplify worktree task cleanup

---------

Co-authored-by: Claude Haiku 4.5 <noreply@anthropic.com>
This commit is contained in:
gsxdsm
2026-02-23 20:31:25 -08:00
committed by GitHub
parent e7504b247f
commit 0330c70261
72 changed files with 3667 additions and 1173 deletions

View File

@@ -3,7 +3,7 @@
*/
import { spawn, type ChildProcess } from 'child_process';
import readline from 'readline';
import { StringDecoder } from 'string_decoder';
export interface SubprocessOptions {
command: string;
@@ -27,7 +27,16 @@ export interface SubprocessResult {
}
/**
* Spawns a subprocess and streams JSONL output line-by-line
* Spawns a subprocess and streams JSONL output line-by-line.
*
* Uses direct 'data' event handling with manual line buffering instead of
* readline's async iterator. The readline async iterator (for await...of on
* readline.Interface) has a known issue where events batch up rather than
* being delivered immediately, because it layers events.on() Promises on top
* of the readline 'line' event emitter. This causes visible delays (20-40s
* between batches) in CLI providers like Gemini that produce frequent small
* events. Direct data event handling delivers parsed events to the consumer
* as soon as they arrive from the pipe.
*/
export async function* spawnJSONLProcess(options: SubprocessOptions): AsyncGenerator<unknown> {
const { command, args, cwd, env, abortController, timeout = 30000, stdinData } = options;
@@ -66,6 +75,19 @@ export async function* spawnJSONLProcess(options: SubprocessOptions): AsyncGener
let stderrOutput = '';
let lastOutputTime = Date.now();
let timeoutHandle: NodeJS.Timeout | null = null;
let processExited = false;
// Stream consumer state - declared in outer scope so the abort handler can
// force the consumer to exit immediately without waiting for stdout to close.
// CLI tools (especially Gemini CLI) may take a long time to respond to SIGTERM,
// leaving the feature stuck in 'in_progress' state on the UI.
let streamEnded = false;
let notifyConsumer: (() => void) | null = null;
// Track process exit early so we don't block on an already-exited process
childProcess.on('exit', () => {
processExited = true;
});
// Collect stderr for error reporting
if (childProcess.stderr) {
@@ -102,6 +124,33 @@ export async function* spawnJSONLProcess(options: SubprocessOptions): AsyncGener
clearTimeout(timeoutHandle);
}
childProcess.kill('SIGTERM');
// Force stream consumer to exit immediately instead of waiting for
// the process to close stdout. CLI tools (especially Gemini CLI) may
// take a long time to respond to SIGTERM while mid-API call.
streamEnded = true;
if (notifyConsumer) {
notifyConsumer();
notifyConsumer = null;
}
// Escalate to SIGKILL after 3 seconds if process hasn't exited.
// SIGKILL cannot be caught or ignored, guaranteeing termination.
const killTimer = setTimeout(() => {
if (!processExited) {
console.log('[SubprocessManager] Escalated to SIGKILL after SIGTERM timeout');
try {
childProcess.kill('SIGKILL');
} catch {
// Process may have already exited between the check and kill
}
}
}, 3000);
// Clean up the kill timer when process exits (don't leak timers)
childProcess.once('exit', () => {
clearTimeout(killTimer);
});
};
// Check if already aborted, if so call handler immediately
if (abortController.signal.aborted) {
@@ -119,39 +168,101 @@ export async function* spawnJSONLProcess(options: SubprocessOptions): AsyncGener
}
};
// Parse stdout as JSONL (one JSON object per line)
// Parse stdout as JSONL using direct 'data' events with manual line buffering.
// This avoids the readline async iterator which batches events due to its
// internal events.on() Promise layering, causing significant delivery delays.
if (childProcess.stdout) {
const rl = readline.createInterface({
input: childProcess.stdout,
crlfDelay: Infinity,
// Queue of parsed events ready to be yielded
const eventQueue: unknown[] = [];
// Partial line buffer for incomplete lines across data chunks
let lineBuffer = '';
// StringDecoder handles multibyte UTF-8 sequences that may be split across chunks
const decoder = new StringDecoder('utf8');
childProcess.stdout.on('data', (chunk: Buffer) => {
resetTimeout();
lineBuffer += decoder.write(chunk);
const lines = lineBuffer.split('\n');
// Last element is either empty (line ended with \n) or a partial line
lineBuffer = lines.pop() || '';
for (const line of lines) {
const trimmed = line.trim();
if (!trimmed) continue;
try {
eventQueue.push(JSON.parse(trimmed));
} catch (parseError) {
console.error(`[SubprocessManager] Failed to parse JSONL line: ${trimmed}`, parseError);
eventQueue.push({
type: 'error',
error: `Failed to parse output: ${trimmed}`,
});
}
}
// Wake up the consumer if it's waiting for events
if (notifyConsumer && eventQueue.length > 0) {
notifyConsumer();
notifyConsumer = null;
}
});
childProcess.stdout.on('end', () => {
// Flush any remaining bytes from the decoder
lineBuffer += decoder.end();
// Process any remaining partial line
if (lineBuffer.trim()) {
try {
eventQueue.push(JSON.parse(lineBuffer.trim()));
} catch (parseError) {
console.error(
`[SubprocessManager] Failed to parse final JSONL line: ${lineBuffer}`,
parseError
);
eventQueue.push({
type: 'error',
error: `Failed to parse output: ${lineBuffer}`,
});
}
lineBuffer = '';
}
streamEnded = true;
// Wake up consumer so it can exit the loop
if (notifyConsumer) {
notifyConsumer();
notifyConsumer = null;
}
});
childProcess.stdout.on('error', (error) => {
console.error('[SubprocessManager] stdout error:', error);
streamEnded = true;
if (notifyConsumer) {
notifyConsumer();
notifyConsumer = null;
}
});
try {
for await (const line of rl) {
resetTimeout();
if (!line.trim()) continue;
try {
const parsed = JSON.parse(line);
yield parsed;
} catch (parseError) {
console.error(`[SubprocessManager] Failed to parse JSONL line: ${line}`, parseError);
// Yield error but continue processing
yield {
type: 'error',
error: `Failed to parse output: ${line}`,
};
// Yield events as they arrive, waiting only when the queue is empty
while (!streamEnded || eventQueue.length > 0) {
if (eventQueue.length > 0) {
yield eventQueue.shift()!;
} else {
// Wait for the next data event to push events into the queue
await new Promise<void>((resolve) => {
notifyConsumer = resolve;
});
}
}
} catch (error) {
console.error('[SubprocessManager] Error reading stdout:', error);
throw error;
} finally {
if (timeoutHandle) {
clearTimeout(timeoutHandle);
}
rl.close();
cleanupAbortListener();
}
} else {
@@ -159,8 +270,15 @@ export async function* spawnJSONLProcess(options: SubprocessOptions): AsyncGener
cleanupAbortListener();
}
// Wait for process to exit
// Wait for process to exit.
// If the process already exited (e.g., abort handler killed it while we were
// draining the stream), resolve immediately to avoid blocking forever.
const exitCode = await new Promise<number | null>((resolve) => {
if (processExited) {
resolve(childProcess.exitCode ?? null);
return;
}
childProcess.on('exit', (code) => {
console.log(`[SubprocessManager] Process exited with code: ${code}`);
resolve(code);
@@ -245,6 +363,17 @@ export async function spawnProcess(options: SubprocessOptions): Promise<Subproce
abortHandler = () => {
cleanupAbortListener();
childProcess.kill('SIGTERM');
// Escalate to SIGKILL after 3 seconds if process hasn't exited
const killTimer = setTimeout(() => {
try {
childProcess.kill('SIGKILL');
} catch {
// Process may have already exited
}
}, 3000);
childProcess.once('exit', () => clearTimeout(killTimer));
reject(new Error('Process aborted'));
};
abortController.signal.addEventListener('abort', abortHandler);