Merge branch 'v0.11.0rc' into feat/dev-server-log-panel

Resolved conflict in worktree-panel.tsx by combining imports:
- DevServerLogsPanel from this branch
- WorktreeMobileDropdown, WorktreeActionsDropdown, BranchSwitchDropdown from v0.11.0rc

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Shirone
2026-01-13 22:01:44 +01:00
111 changed files with 7248 additions and 990 deletions

View File

@@ -5,6 +5,43 @@
import type { Request, Response } from 'express';
import { execAsync, execEnv, getErrorMessage, logError } from './common.js';
const GIT_REMOTE_ORIGIN_COMMAND = 'git remote get-url origin';
const GH_REPO_VIEW_COMMAND = 'gh repo view --json name,owner';
const GITHUB_REPO_URL_PREFIX = 'https://github.com/';
const GITHUB_HTTPS_REMOTE_REGEX = /https:\/\/github\.com\/([^/]+)\/([^/.]+)/;
const GITHUB_SSH_REMOTE_REGEX = /git@github\.com:([^/]+)\/([^/.]+)/;
interface GhRepoViewResponse {
name?: string;
owner?: {
login?: string;
};
}
async function resolveRepoFromGh(projectPath: string): Promise<{
owner: string;
repo: string;
} | null> {
try {
const { stdout } = await execAsync(GH_REPO_VIEW_COMMAND, {
cwd: projectPath,
env: execEnv,
});
const data = JSON.parse(stdout) as GhRepoViewResponse;
const owner = typeof data.owner?.login === 'string' ? data.owner.login : null;
const repo = typeof data.name === 'string' ? data.name : null;
if (!owner || !repo) {
return null;
}
return { owner, repo };
} catch {
return null;
}
}
export interface GitHubRemoteStatus {
hasGitHubRemote: boolean;
remoteUrl: string | null;
@@ -21,19 +58,38 @@ export async function checkGitHubRemote(projectPath: string): Promise<GitHubRemo
};
try {
// Get the remote URL (origin by default)
const { stdout } = await execAsync('git remote get-url origin', {
cwd: projectPath,
env: execEnv,
});
let remoteUrl = '';
try {
// Get the remote URL (origin by default)
const { stdout } = await execAsync(GIT_REMOTE_ORIGIN_COMMAND, {
cwd: projectPath,
env: execEnv,
});
remoteUrl = stdout.trim();
status.remoteUrl = remoteUrl || null;
} catch {
// Ignore missing origin remote
}
const remoteUrl = stdout.trim();
status.remoteUrl = remoteUrl;
const ghRepo = await resolveRepoFromGh(projectPath);
if (ghRepo) {
status.hasGitHubRemote = true;
status.owner = ghRepo.owner;
status.repo = ghRepo.repo;
if (!status.remoteUrl) {
status.remoteUrl = `${GITHUB_REPO_URL_PREFIX}${ghRepo.owner}/${ghRepo.repo}`;
}
return status;
}
// Check if it's a GitHub URL
// Formats: https://github.com/owner/repo.git, git@github.com:owner/repo.git
const httpsMatch = remoteUrl.match(/https:\/\/github\.com\/([^/]+)\/([^/.]+)/);
const sshMatch = remoteUrl.match(/git@github\.com:([^/]+)\/([^/.]+)/);
if (!remoteUrl) {
return status;
}
const httpsMatch = remoteUrl.match(GITHUB_HTTPS_REMOTE_REGEX);
const sshMatch = remoteUrl.match(GITHUB_SSH_REMOTE_REGEX);
const match = httpsMatch || sshMatch;
if (match) {

View File

@@ -25,19 +25,24 @@ interface GraphQLComment {
updatedAt: string;
}
interface GraphQLCommentConnection {
totalCount: number;
pageInfo: {
hasNextPage: boolean;
endCursor: string | null;
};
nodes: GraphQLComment[];
}
interface GraphQLIssueOrPullRequest {
__typename: 'Issue' | 'PullRequest';
comments: GraphQLCommentConnection;
}
interface GraphQLResponse {
data?: {
repository?: {
issue?: {
comments: {
totalCount: number;
pageInfo: {
hasNextPage: boolean;
endCursor: string | null;
};
nodes: GraphQLComment[];
};
};
issueOrPullRequest?: GraphQLIssueOrPullRequest | null;
};
};
errors?: Array<{ message: string }>;
@@ -45,6 +50,7 @@ interface GraphQLResponse {
/** Timeout for GitHub API requests in milliseconds */
const GITHUB_API_TIMEOUT_MS = 30000;
const COMMENTS_PAGE_SIZE = 50;
/**
* Validate cursor format (GraphQL cursors are typically base64 strings)
@@ -54,7 +60,7 @@ function isValidCursor(cursor: string): boolean {
}
/**
* Fetch comments for a specific issue using GitHub GraphQL API
* Fetch comments for a specific issue or pull request using GitHub GraphQL API
*/
async function fetchIssueComments(
projectPath: string,
@@ -70,24 +76,52 @@ async function fetchIssueComments(
// Use GraphQL variables instead of string interpolation for safety
const query = `
query GetIssueComments($owner: String!, $repo: String!, $issueNumber: Int!, $cursor: String) {
query GetIssueComments(
$owner: String!
$repo: String!
$issueNumber: Int!
$cursor: String
$pageSize: Int!
) {
repository(owner: $owner, name: $repo) {
issue(number: $issueNumber) {
comments(first: 50, after: $cursor) {
totalCount
pageInfo {
hasNextPage
endCursor
}
nodes {
id
author {
login
avatarUrl
issueOrPullRequest(number: $issueNumber) {
__typename
... on Issue {
comments(first: $pageSize, after: $cursor) {
totalCount
pageInfo {
hasNextPage
endCursor
}
nodes {
id
author {
login
avatarUrl
}
body
createdAt
updatedAt
}
}
}
... on PullRequest {
comments(first: $pageSize, after: $cursor) {
totalCount
pageInfo {
hasNextPage
endCursor
}
nodes {
id
author {
login
avatarUrl
}
body
createdAt
updatedAt
}
body
createdAt
updatedAt
}
}
}
@@ -99,6 +133,7 @@ async function fetchIssueComments(
repo,
issueNumber,
cursor: cursor || null,
pageSize: COMMENTS_PAGE_SIZE,
};
const requestBody = JSON.stringify({ query, variables });
@@ -140,10 +175,10 @@ async function fetchIssueComments(
throw new Error(response.errors[0].message);
}
const commentsData = response.data?.repository?.issue?.comments;
const commentsData = response.data?.repository?.issueOrPullRequest?.comments;
if (!commentsData) {
throw new Error('Issue not found or no comments data available');
throw new Error('Issue or pull request not found or no comments data available');
}
const comments: GitHubComment[] = commentsData.nodes.map((node) => ({

View File

@@ -9,6 +9,17 @@ import { checkGitHubRemote } from './check-github-remote.js';
import { createLogger } from '@automaker/utils';
const logger = createLogger('ListIssues');
const OPEN_ISSUES_LIMIT = 100;
const CLOSED_ISSUES_LIMIT = 50;
const ISSUE_LIST_FIELDS = 'number,title,state,author,createdAt,labels,url,body,assignees';
const ISSUE_STATE_OPEN = 'open';
const ISSUE_STATE_CLOSED = 'closed';
const GH_ISSUE_LIST_COMMAND = 'gh issue list';
const GH_STATE_FLAG = '--state';
const GH_JSON_FLAG = '--json';
const GH_LIMIT_FLAG = '--limit';
const LINKED_PRS_BATCH_SIZE = 20;
const LINKED_PRS_TIMELINE_ITEMS = 10;
export interface GitHubLabel {
name: string;
@@ -69,34 +80,68 @@ async function fetchLinkedPRs(
// Build GraphQL query for batch fetching linked PRs
// We fetch up to 20 issues at a time to avoid query limits
const batchSize = 20;
for (let i = 0; i < issueNumbers.length; i += batchSize) {
const batch = issueNumbers.slice(i, i + batchSize);
for (let i = 0; i < issueNumbers.length; i += LINKED_PRS_BATCH_SIZE) {
const batch = issueNumbers.slice(i, i + LINKED_PRS_BATCH_SIZE);
const issueQueries = batch
.map(
(num, idx) => `
issue${idx}: issue(number: ${num}) {
number
timelineItems(first: 10, itemTypes: [CROSS_REFERENCED_EVENT, CONNECTED_EVENT]) {
nodes {
... on CrossReferencedEvent {
source {
... on PullRequest {
number
title
state
url
issue${idx}: issueOrPullRequest(number: ${num}) {
... on Issue {
number
timelineItems(
first: ${LINKED_PRS_TIMELINE_ITEMS}
itemTypes: [CROSS_REFERENCED_EVENT, CONNECTED_EVENT]
) {
nodes {
... on CrossReferencedEvent {
source {
... on PullRequest {
number
title
state
url
}
}
}
... on ConnectedEvent {
subject {
... on PullRequest {
number
title
state
url
}
}
}
}
... on ConnectedEvent {
subject {
... on PullRequest {
number
title
state
url
}
}
... on PullRequest {
number
timelineItems(
first: ${LINKED_PRS_TIMELINE_ITEMS}
itemTypes: [CROSS_REFERENCED_EVENT, CONNECTED_EVENT]
) {
nodes {
... on CrossReferencedEvent {
source {
... on PullRequest {
number
title
state
url
}
}
}
... on ConnectedEvent {
subject {
... on PullRequest {
number
title
state
url
}
}
}
}
@@ -213,16 +258,35 @@ export function createListIssuesHandler() {
}
// Fetch open and closed issues in parallel (now including assignees)
const repoQualifier =
remoteStatus.owner && remoteStatus.repo ? `${remoteStatus.owner}/${remoteStatus.repo}` : '';
const repoFlag = repoQualifier ? `-R ${repoQualifier}` : '';
const [openResult, closedResult] = await Promise.all([
execAsync(
'gh issue list --state open --json number,title,state,author,createdAt,labels,url,body,assignees --limit 100',
[
GH_ISSUE_LIST_COMMAND,
repoFlag,
`${GH_STATE_FLAG} ${ISSUE_STATE_OPEN}`,
`${GH_JSON_FLAG} ${ISSUE_LIST_FIELDS}`,
`${GH_LIMIT_FLAG} ${OPEN_ISSUES_LIMIT}`,
]
.filter(Boolean)
.join(' '),
{
cwd: projectPath,
env: execEnv,
}
),
execAsync(
'gh issue list --state closed --json number,title,state,author,createdAt,labels,url,body,assignees --limit 50',
[
GH_ISSUE_LIST_COMMAND,
repoFlag,
`${GH_STATE_FLAG} ${ISSUE_STATE_CLOSED}`,
`${GH_JSON_FLAG} ${ISSUE_LIST_FIELDS}`,
`${GH_LIMIT_FLAG} ${CLOSED_ISSUES_LIMIT}`,
]
.filter(Boolean)
.join(' '),
{
cwd: projectPath,
env: execEnv,

View File

@@ -6,6 +6,17 @@ import type { Request, Response } from 'express';
import { execAsync, execEnv, getErrorMessage, logError } from './common.js';
import { checkGitHubRemote } from './check-github-remote.js';
const OPEN_PRS_LIMIT = 100;
const MERGED_PRS_LIMIT = 50;
const PR_LIST_FIELDS =
'number,title,state,author,createdAt,labels,url,isDraft,headRefName,reviewDecision,mergeable,body';
const PR_STATE_OPEN = 'open';
const PR_STATE_MERGED = 'merged';
const GH_PR_LIST_COMMAND = 'gh pr list';
const GH_STATE_FLAG = '--state';
const GH_JSON_FLAG = '--json';
const GH_LIMIT_FLAG = '--limit';
export interface GitHubLabel {
name: string;
color: string;
@@ -57,16 +68,36 @@ export function createListPRsHandler() {
return;
}
const repoQualifier =
remoteStatus.owner && remoteStatus.repo ? `${remoteStatus.owner}/${remoteStatus.repo}` : '';
const repoFlag = repoQualifier ? `-R ${repoQualifier}` : '';
const [openResult, mergedResult] = await Promise.all([
execAsync(
'gh pr list --state open --json number,title,state,author,createdAt,labels,url,isDraft,headRefName,reviewDecision,mergeable,body --limit 100',
[
GH_PR_LIST_COMMAND,
repoFlag,
`${GH_STATE_FLAG} ${PR_STATE_OPEN}`,
`${GH_JSON_FLAG} ${PR_LIST_FIELDS}`,
`${GH_LIMIT_FLAG} ${OPEN_PRS_LIMIT}`,
]
.filter(Boolean)
.join(' '),
{
cwd: projectPath,
env: execEnv,
}
),
execAsync(
'gh pr list --state merged --json number,title,state,author,createdAt,labels,url,isDraft,headRefName,reviewDecision,mergeable,body --limit 50',
[
GH_PR_LIST_COMMAND,
repoFlag,
`${GH_STATE_FLAG} ${PR_STATE_MERGED}`,
`${GH_JSON_FLAG} ${PR_LIST_FIELDS}`,
`${GH_LIMIT_FLAG} ${MERGED_PRS_LIMIT}`,
]
.filter(Boolean)
.join(' '),
{
cwd: projectPath,
env: execEnv,

View File

@@ -3,7 +3,7 @@
*
* Scans the codebase to determine if an issue is valid, invalid, or needs clarification.
* Runs asynchronously and emits events for progress and completion.
* Supports both Claude models and Cursor models.
* Supports Claude, Codex, Cursor, and OpenCode models.
*/
import type { Request, Response } from 'express';
@@ -11,13 +11,19 @@ import type { EventEmitter } from '../../../lib/events.js';
import type {
IssueValidationResult,
IssueValidationEvent,
ModelAlias,
CursorModelId,
ModelId,
GitHubComment,
LinkedPRInfo,
ThinkingLevel,
ReasoningEffort,
} from '@automaker/types';
import {
DEFAULT_PHASE_MODELS,
isClaudeModel,
isCodexModel,
isCursorModel,
isOpencodeModel,
} from '@automaker/types';
import { isCursorModel, DEFAULT_PHASE_MODELS } from '@automaker/types';
import { resolvePhaseModel } from '@automaker/model-resolver';
import { extractJson } from '../../../lib/json-extractor.js';
import { writeValidation } from '../../../lib/validation-storage.js';
@@ -39,9 +45,6 @@ import {
import type { SettingsService } from '../../../services/settings-service.js';
import { getAutoLoadClaudeMdSetting } from '../../../lib/settings-helpers.js';
/** Valid Claude model values for validation */
const VALID_CLAUDE_MODELS: readonly ModelAlias[] = ['opus', 'sonnet', 'haiku'] as const;
/**
* Request body for issue validation
*/
@@ -51,10 +54,12 @@ interface ValidateIssueRequestBody {
issueTitle: string;
issueBody: string;
issueLabels?: string[];
/** Model to use for validation (opus, sonnet, haiku, or cursor model IDs) */
model?: ModelAlias | CursorModelId;
/** Thinking level for Claude models (ignored for Cursor models) */
/** Model to use for validation (Claude alias or provider model ID) */
model?: ModelId;
/** Thinking level for Claude models (ignored for non-Claude models) */
thinkingLevel?: ThinkingLevel;
/** Reasoning effort for Codex models (ignored for non-Codex models) */
reasoningEffort?: ReasoningEffort;
/** Comments to include in validation analysis */
comments?: GitHubComment[];
/** Linked pull requests for this issue */
@@ -66,7 +71,7 @@ interface ValidateIssueRequestBody {
*
* Emits events for start, progress, complete, and error.
* Stores result on completion.
* Supports both Claude models (with structured output) and Cursor models (with JSON parsing).
* Supports Claude/Codex models (structured output) and Cursor/OpenCode models (JSON parsing).
*/
async function runValidation(
projectPath: string,
@@ -74,13 +79,14 @@ async function runValidation(
issueTitle: string,
issueBody: string,
issueLabels: string[] | undefined,
model: ModelAlias | CursorModelId,
model: ModelId,
events: EventEmitter,
abortController: AbortController,
settingsService?: SettingsService,
comments?: ValidationComment[],
linkedPRs?: ValidationLinkedPR[],
thinkingLevel?: ThinkingLevel
thinkingLevel?: ThinkingLevel,
reasoningEffort?: ReasoningEffort
): Promise<void> {
// Emit start event
const startEvent: IssueValidationEvent = {
@@ -111,8 +117,8 @@ async function runValidation(
let responseText = '';
// Determine if we should use structured output (Claude supports it, Cursor doesn't)
const useStructuredOutput = !isCursorModel(model);
// Determine if we should use structured output (Claude/Codex support it, Cursor/OpenCode don't)
const useStructuredOutput = isClaudeModel(model) || isCodexModel(model);
// Build the final prompt - for Cursor, include system prompt and JSON schema instructions
let finalPrompt = basePrompt;
@@ -138,14 +144,20 @@ ${basePrompt}`;
'[ValidateIssue]'
);
// Use thinkingLevel from request if provided, otherwise fall back to settings
// Use request overrides if provided, otherwise fall back to settings
let effectiveThinkingLevel: ThinkingLevel | undefined = thinkingLevel;
if (!effectiveThinkingLevel) {
let effectiveReasoningEffort: ReasoningEffort | undefined = reasoningEffort;
if (!effectiveThinkingLevel || !effectiveReasoningEffort) {
const settings = await settingsService?.getGlobalSettings();
const phaseModelEntry =
settings?.phaseModels?.validationModel || DEFAULT_PHASE_MODELS.validationModel;
const resolved = resolvePhaseModel(phaseModelEntry);
effectiveThinkingLevel = resolved.thinkingLevel;
if (!effectiveThinkingLevel) {
effectiveThinkingLevel = resolved.thinkingLevel;
}
if (!effectiveReasoningEffort && typeof phaseModelEntry !== 'string') {
effectiveReasoningEffort = phaseModelEntry.reasoningEffort;
}
}
logger.info(`Using model: ${model}`);
@@ -158,6 +170,7 @@ ${basePrompt}`;
systemPrompt: useStructuredOutput ? ISSUE_VALIDATION_SYSTEM_PROMPT : undefined,
abortController,
thinkingLevel: effectiveThinkingLevel,
reasoningEffort: effectiveReasoningEffort,
readOnly: true, // Issue validation only reads code, doesn't write
settingSources: autoLoadClaudeMd ? ['user', 'project', 'local'] : undefined,
outputFormat: useStructuredOutput
@@ -262,6 +275,7 @@ export function createValidateIssueHandler(
issueLabels,
model = 'opus',
thinkingLevel,
reasoningEffort,
comments: rawComments,
linkedPRs: rawLinkedPRs,
} = req.body as ValidateIssueRequestBody;
@@ -309,14 +323,17 @@ export function createValidateIssueHandler(
return;
}
// Validate model parameter at runtime - accept Claude models or Cursor models
const isValidClaudeModel = VALID_CLAUDE_MODELS.includes(model as ModelAlias);
const isValidCursorModel = isCursorModel(model);
// Validate model parameter at runtime - accept any supported provider model
const isValidModel =
isClaudeModel(model) ||
isCursorModel(model) ||
isCodexModel(model) ||
isOpencodeModel(model);
if (!isValidClaudeModel && !isValidCursorModel) {
if (!isValidModel) {
res.status(400).json({
success: false,
error: `Invalid model. Must be one of: ${VALID_CLAUDE_MODELS.join(', ')}, or a Cursor model ID`,
error: 'Invalid model. Must be a Claude, Cursor, Codex, or OpenCode model ID (or alias).',
});
return;
}
@@ -347,7 +364,8 @@ export function createValidateIssueHandler(
settingsService,
validationComments,
validationLinkedPRs,
thinkingLevel
thinkingLevel,
reasoningEffort
)
.catch(() => {
// Error is already handled inside runValidation (event emitted)

View File

@@ -5,7 +5,7 @@
* Each provider shows: `{ configured: boolean, masked: string }`
* Masked shows first 4 and last 4 characters for verification.
*
* Response: `{ "success": true, "credentials": { anthropic } }`
* Response: `{ "success": true, "credentials": { anthropic, google, openai } }`
*/
import type { Request, Response } from 'express';

View File

@@ -1,7 +1,7 @@
/**
* PUT /api/settings/credentials - Update API credentials
*
* Updates API keys for Anthropic. Partial updates supported.
* Updates API keys for supported providers. Partial updates supported.
* Returns masked credentials for verification without exposing full keys.
*
* Request body: `Partial<Credentials>` (usually just apiKeys)

View File

@@ -11,6 +11,7 @@ export function createApiKeysHandler() {
res.json({
success: true,
hasAnthropicKey: !!getApiKey('anthropic') || !!process.env.ANTHROPIC_API_KEY,
hasGoogleKey: !!getApiKey('google'),
hasOpenaiKey: !!getApiKey('openai') || !!process.env.OPENAI_API_KEY,
});
} catch (error) {

View File

@@ -21,22 +21,25 @@ export function createStoreApiKeyHandler() {
return;
}
setApiKey(provider, apiKey);
// Also set as environment variable and persist to .env
if (provider === 'anthropic' || provider === 'anthropic_oauth_token') {
// Both API key and OAuth token use ANTHROPIC_API_KEY
process.env.ANTHROPIC_API_KEY = apiKey;
await persistApiKeyToEnv('ANTHROPIC_API_KEY', apiKey);
logger.info('[Setup] Stored API key as ANTHROPIC_API_KEY');
} else {
const providerEnvMap: Record<string, string> = {
anthropic: 'ANTHROPIC_API_KEY',
anthropic_oauth_token: 'ANTHROPIC_API_KEY',
openai: 'OPENAI_API_KEY',
};
const envKey = providerEnvMap[provider];
if (!envKey) {
res.status(400).json({
success: false,
error: `Unsupported provider: ${provider}. Only anthropic is supported.`,
error: `Unsupported provider: ${provider}. Only anthropic and openai are supported.`,
});
return;
}
setApiKey(provider, apiKey);
process.env[envKey] = apiKey;
await persistApiKeyToEnv(envKey, apiKey);
logger.info(`[Setup] Stored API key as ${envKey}`);
res.json({ success: true });
} catch (error) {
logError(error, 'Store API key failed');

View File

@@ -17,6 +17,7 @@ import { createDeleteHandler } from './routes/delete.js';
import { createCreatePRHandler } from './routes/create-pr.js';
import { createPRInfoHandler } from './routes/pr-info.js';
import { createCommitHandler } from './routes/commit.js';
import { createGenerateCommitMessageHandler } from './routes/generate-commit-message.js';
import { createPushHandler } from './routes/push.js';
import { createPullHandler } from './routes/pull.js';
import { createCheckoutBranchHandler } from './routes/checkout-branch.js';
@@ -40,8 +41,12 @@ import {
createDeleteInitScriptHandler,
createRunInitScriptHandler,
} from './routes/init-script.js';
import type { SettingsService } from '../../services/settings-service.js';
export function createWorktreeRoutes(events: EventEmitter): Router {
export function createWorktreeRoutes(
events: EventEmitter,
settingsService?: SettingsService
): Router {
const router = Router();
router.post('/info', validatePathParams('projectPath'), createInfoHandler());
@@ -65,6 +70,12 @@ export function createWorktreeRoutes(events: EventEmitter): Router {
requireGitRepoOnly,
createCommitHandler()
);
router.post(
'/generate-commit-message',
validatePathParams('worktreePath'),
requireGitRepoOnly,
createGenerateCommitMessageHandler(settingsService)
);
router.post(
'/push',
validatePathParams('worktreePath'),

View File

@@ -0,0 +1,275 @@
/**
* POST /worktree/generate-commit-message endpoint - Generate an AI commit message from git diff
*
* Uses the configured model (via phaseModels.commitMessageModel) to generate a concise,
* conventional commit message from git changes. Defaults to Claude Haiku for speed.
*/
import type { Request, Response } from 'express';
import { exec } from 'child_process';
import { promisify } from 'util';
import { existsSync } from 'fs';
import { join } from 'path';
import { query } from '@anthropic-ai/claude-agent-sdk';
import { createLogger } from '@automaker/utils';
import { DEFAULT_PHASE_MODELS, isCursorModel, stripProviderPrefix } from '@automaker/types';
import { resolvePhaseModel } from '@automaker/model-resolver';
import { mergeCommitMessagePrompts } from '@automaker/prompts';
import { ProviderFactory } from '../../../providers/provider-factory.js';
import type { SettingsService } from '../../../services/settings-service.js';
import { getErrorMessage, logError } from '../common.js';
const logger = createLogger('GenerateCommitMessage');
const execAsync = promisify(exec);
/** Timeout for AI provider calls in milliseconds (30 seconds) */
const AI_TIMEOUT_MS = 30_000;
/**
* Wraps an async generator with a timeout.
* If the generator takes longer than the timeout, it throws an error.
*/
async function* withTimeout<T>(
generator: AsyncIterable<T>,
timeoutMs: number
): AsyncGenerator<T, void, unknown> {
const timeoutPromise = new Promise<never>((_, reject) => {
setTimeout(() => reject(new Error(`AI provider timed out after ${timeoutMs}ms`)), timeoutMs);
});
const iterator = generator[Symbol.asyncIterator]();
let done = false;
while (!done) {
const result = await Promise.race([iterator.next(), timeoutPromise]);
if (result.done) {
done = true;
} else {
yield result.value;
}
}
}
/**
* Get the effective system prompt for commit message generation.
* Uses custom prompt from settings if enabled, otherwise falls back to default.
*/
async function getSystemPrompt(settingsService?: SettingsService): Promise<string> {
const settings = await settingsService?.getGlobalSettings();
const prompts = mergeCommitMessagePrompts(settings?.promptCustomization?.commitMessage);
return prompts.systemPrompt;
}
interface GenerateCommitMessageRequestBody {
worktreePath: string;
}
interface GenerateCommitMessageSuccessResponse {
success: true;
message: string;
}
interface GenerateCommitMessageErrorResponse {
success: false;
error: string;
}
async function extractTextFromStream(
stream: AsyncIterable<{
type: string;
subtype?: string;
result?: string;
message?: {
content?: Array<{ type: string; text?: string }>;
};
}>
): Promise<string> {
let responseText = '';
for await (const msg of stream) {
if (msg.type === 'assistant' && msg.message?.content) {
for (const block of msg.message.content) {
if (block.type === 'text' && block.text) {
responseText += block.text;
}
}
} else if (msg.type === 'result' && msg.subtype === 'success') {
responseText = msg.result || responseText;
}
}
return responseText;
}
export function createGenerateCommitMessageHandler(
settingsService?: SettingsService
): (req: Request, res: Response) => Promise<void> {
return async (req: Request, res: Response): Promise<void> => {
try {
const { worktreePath } = req.body as GenerateCommitMessageRequestBody;
if (!worktreePath || typeof worktreePath !== 'string') {
const response: GenerateCommitMessageErrorResponse = {
success: false,
error: 'worktreePath is required and must be a string',
};
res.status(400).json(response);
return;
}
// Validate that the directory exists
if (!existsSync(worktreePath)) {
const response: GenerateCommitMessageErrorResponse = {
success: false,
error: 'worktreePath does not exist',
};
res.status(400).json(response);
return;
}
// Validate that it's a git repository (check for .git folder or file for worktrees)
const gitPath = join(worktreePath, '.git');
if (!existsSync(gitPath)) {
const response: GenerateCommitMessageErrorResponse = {
success: false,
error: 'worktreePath is not a git repository',
};
res.status(400).json(response);
return;
}
logger.info(`Generating commit message for worktree: ${worktreePath}`);
// Get git diff of staged and unstaged changes
let diff = '';
try {
// First try to get staged changes
const { stdout: stagedDiff } = await execAsync('git diff --cached', {
cwd: worktreePath,
maxBuffer: 1024 * 1024 * 5, // 5MB buffer
});
// If no staged changes, get unstaged changes
if (!stagedDiff.trim()) {
const { stdout: unstagedDiff } = await execAsync('git diff', {
cwd: worktreePath,
maxBuffer: 1024 * 1024 * 5, // 5MB buffer
});
diff = unstagedDiff;
} else {
diff = stagedDiff;
}
} catch (error) {
logger.error('Failed to get git diff:', error);
const response: GenerateCommitMessageErrorResponse = {
success: false,
error: 'Failed to get git changes',
};
res.status(500).json(response);
return;
}
if (!diff.trim()) {
const response: GenerateCommitMessageErrorResponse = {
success: false,
error: 'No changes to commit',
};
res.status(400).json(response);
return;
}
// Truncate diff if too long (keep first 10000 characters to avoid token limits)
const truncatedDiff =
diff.length > 10000 ? diff.substring(0, 10000) + '\n\n[... diff truncated ...]' : diff;
const userPrompt = `Generate a commit message for these changes:\n\n\`\`\`diff\n${truncatedDiff}\n\`\`\``;
// Get model from phase settings
const settings = await settingsService?.getGlobalSettings();
const phaseModelEntry =
settings?.phaseModels?.commitMessageModel || DEFAULT_PHASE_MODELS.commitMessageModel;
const { model } = resolvePhaseModel(phaseModelEntry);
logger.info(`Using model for commit message: ${model}`);
// Get the effective system prompt (custom or default)
const systemPrompt = await getSystemPrompt(settingsService);
let message: string;
// Route to appropriate provider based on model type
if (isCursorModel(model)) {
// Use Cursor provider for Cursor models
logger.info(`Using Cursor provider for model: ${model}`);
const provider = ProviderFactory.getProviderForModel(model);
const bareModel = stripProviderPrefix(model);
const cursorPrompt = `${systemPrompt}\n\n${userPrompt}`;
let responseText = '';
const cursorStream = provider.executeQuery({
prompt: cursorPrompt,
model: bareModel,
cwd: worktreePath,
maxTurns: 1,
allowedTools: [],
readOnly: true,
});
// Wrap with timeout to prevent indefinite hangs
for await (const msg of withTimeout(cursorStream, AI_TIMEOUT_MS)) {
if (msg.type === 'assistant' && msg.message?.content) {
for (const block of msg.message.content) {
if (block.type === 'text' && block.text) {
responseText += block.text;
}
}
}
}
message = responseText.trim();
} else {
// Use Claude SDK for Claude models
const stream = query({
prompt: userPrompt,
options: {
model,
systemPrompt,
maxTurns: 1,
allowedTools: [],
permissionMode: 'default',
},
});
// Wrap with timeout to prevent indefinite hangs
message = await extractTextFromStream(withTimeout(stream, AI_TIMEOUT_MS));
}
if (!message || message.trim().length === 0) {
logger.warn('Received empty response from model');
const response: GenerateCommitMessageErrorResponse = {
success: false,
error: 'Failed to generate commit message - empty response',
};
res.status(500).json(response);
return;
}
logger.info(`Generated commit message: ${message.trim().substring(0, 100)}...`);
const response: GenerateCommitMessageSuccessResponse = {
success: true,
message: message.trim(),
};
res.json(response);
} catch (error) {
logError(error, 'Generate commit message failed');
const response: GenerateCommitMessageErrorResponse = {
success: false,
error: getErrorMessage(error),
};
res.status(500).json(response);
}
};
}