mirror of
https://github.com/AutoMaker-Org/automaker.git
synced 2026-01-31 20:03:37 +00:00
* memory * feat: add smart memory selection with task context - Add taskContext parameter to loadContextFiles for intelligent file selection - Memory files are scored based on tag matching with task keywords - Category name matching (e.g., "terminals" matches terminals.md) with 4x weight - Usage statistics influence scoring (files that helped before rank higher) - Limit to top 5 files + always include gotchas.md - Auto-mode passes feature title/description as context - Chat sessions pass user message as context This prevents loading 40+ memory files and killing context limits. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com> * refactor: enhance auto-mode service and context loader - Improved context loading by adding task context for better memory selection. - Updated JSON parsing logic to handle various formats and ensure robust error handling. - Introduced file locking mechanisms to prevent race conditions during memory file updates. - Enhanced metadata handling in memory files, including validation and sanitization. - Refactored scoring logic for context files to improve selection accuracy based on task relevance. These changes optimize memory file management and enhance the overall performance of the auto-mode service. * refactor: enhance learning extraction and formatting in auto-mode service - Improved the learning extraction process by refining the user prompt to focus on meaningful insights and structured JSON output. - Updated the LearningEntry interface to include additional context fields for better documentation of decisions and patterns. - Enhanced the formatLearning function to adopt an Architecture Decision Record (ADR) style, providing richer context for recorded learnings. - Added detailed logging for better traceability during the learning extraction and appending processes. These changes aim to improve the quality and clarity of learnings captured during the auto-mode service's operation. * feat: integrate stripProviderPrefix utility for model ID handling - Added stripProviderPrefix utility to various routes to ensure providers receive bare model IDs. - Updated model references in executeQuery calls across multiple files, enhancing consistency in model ID handling. - Introduced memoryExtractionModel in settings for improved learning extraction tasks. These changes streamline the model ID processing and enhance the overall functionality of the provider interactions. * feat: enhance error handling and server offline management in board actions - Improved error handling in the handleRunFeature and handleStartImplementation functions to throw errors for better caller management. - Integrated connection error detection and server offline handling, redirecting users to the login page when the server is unreachable. - Updated follow-up feature logic to include rollback mechanisms and improved user feedback for error scenarios. These changes enhance the robustness of the board actions by ensuring proper error management and user experience during server connectivity issues. --------- Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com> Co-authored-by: webdevcody <webdevcody@gmail.com>
223 lines
7.5 KiB
TypeScript
223 lines
7.5 KiB
TypeScript
/**
|
|
* Generate backlog plan using Claude AI
|
|
*
|
|
* Model is configurable via phaseModels.backlogPlanningModel in settings
|
|
* (defaults to Sonnet). Can be overridden per-call via model parameter.
|
|
*/
|
|
|
|
import type { EventEmitter } from '../../lib/events.js';
|
|
import type { Feature, BacklogPlanResult, BacklogChange, DependencyUpdate } from '@automaker/types';
|
|
import {
|
|
DEFAULT_PHASE_MODELS,
|
|
isCursorModel,
|
|
stripProviderPrefix,
|
|
type ThinkingLevel,
|
|
} from '@automaker/types';
|
|
import { resolvePhaseModel } from '@automaker/model-resolver';
|
|
import { FeatureLoader } from '../../services/feature-loader.js';
|
|
import { ProviderFactory } from '../../providers/provider-factory.js';
|
|
import { extractJsonWithArray } from '../../lib/json-extractor.js';
|
|
import { logger, setRunningState, getErrorMessage } from './common.js';
|
|
import type { SettingsService } from '../../services/settings-service.js';
|
|
import { getAutoLoadClaudeMdSetting, getPromptCustomization } from '../../lib/settings-helpers.js';
|
|
|
|
const featureLoader = new FeatureLoader();
|
|
|
|
/**
|
|
* Format features for the AI prompt
|
|
*/
|
|
function formatFeaturesForPrompt(features: Feature[]): string {
|
|
if (features.length === 0) {
|
|
return 'No features in backlog yet.';
|
|
}
|
|
|
|
return features
|
|
.map((f) => {
|
|
const deps = f.dependencies?.length ? `Dependencies: [${f.dependencies.join(', ')}]` : '';
|
|
const priority = f.priority !== undefined ? `Priority: ${f.priority}` : '';
|
|
return `- ID: ${f.id}
|
|
Title: ${f.title || 'Untitled'}
|
|
Description: ${f.description}
|
|
Category: ${f.category}
|
|
Status: ${f.status || 'backlog'}
|
|
${priority}
|
|
${deps}`.trim();
|
|
})
|
|
.join('\n\n');
|
|
}
|
|
|
|
/**
|
|
* Parse the AI response into a BacklogPlanResult
|
|
*/
|
|
function parsePlanResponse(response: string): BacklogPlanResult {
|
|
// Use shared JSON extraction utility for robust parsing
|
|
// extractJsonWithArray validates that 'changes' exists AND is an array
|
|
const parsed = extractJsonWithArray<BacklogPlanResult>(response, 'changes', {
|
|
logger,
|
|
});
|
|
|
|
if (parsed) {
|
|
return parsed;
|
|
}
|
|
|
|
// If parsing fails, log details and return an empty result
|
|
logger.warn('[BacklogPlan] Failed to parse AI response as JSON');
|
|
logger.warn('[BacklogPlan] Response text length:', response.length);
|
|
logger.warn('[BacklogPlan] Response preview:', response.slice(0, 500));
|
|
if (response.length === 0) {
|
|
logger.error('[BacklogPlan] Response text is EMPTY! No content was extracted from stream.');
|
|
}
|
|
return {
|
|
changes: [],
|
|
summary: 'Failed to parse AI response',
|
|
dependencyUpdates: [],
|
|
};
|
|
}
|
|
|
|
/**
|
|
* Generate a backlog modification plan based on user prompt
|
|
*/
|
|
export async function generateBacklogPlan(
|
|
projectPath: string,
|
|
prompt: string,
|
|
events: EventEmitter,
|
|
abortController: AbortController,
|
|
settingsService?: SettingsService,
|
|
model?: string
|
|
): Promise<BacklogPlanResult> {
|
|
try {
|
|
// Load current features
|
|
const features = await featureLoader.getAll(projectPath);
|
|
|
|
events.emit('backlog-plan:event', {
|
|
type: 'backlog_plan_progress',
|
|
content: `Loaded ${features.length} features from backlog`,
|
|
});
|
|
|
|
// Load prompts from settings
|
|
const prompts = await getPromptCustomization(settingsService, '[BacklogPlan]');
|
|
|
|
// Build the system prompt
|
|
const systemPrompt = prompts.backlogPlan.systemPrompt;
|
|
|
|
// Build the user prompt from template
|
|
const currentFeatures = formatFeaturesForPrompt(features);
|
|
const userPrompt = prompts.backlogPlan.userPromptTemplate
|
|
.replace('{{currentFeatures}}', currentFeatures)
|
|
.replace('{{userRequest}}', prompt);
|
|
|
|
events.emit('backlog-plan:event', {
|
|
type: 'backlog_plan_progress',
|
|
content: 'Generating plan with AI...',
|
|
});
|
|
|
|
// Get the model to use from settings or provided override
|
|
let effectiveModel = model;
|
|
let thinkingLevel: ThinkingLevel | undefined;
|
|
if (!effectiveModel) {
|
|
const settings = await settingsService?.getGlobalSettings();
|
|
const phaseModelEntry =
|
|
settings?.phaseModels?.backlogPlanningModel || DEFAULT_PHASE_MODELS.backlogPlanningModel;
|
|
const resolved = resolvePhaseModel(phaseModelEntry);
|
|
effectiveModel = resolved.model;
|
|
thinkingLevel = resolved.thinkingLevel;
|
|
}
|
|
logger.info('[BacklogPlan] Using model:', effectiveModel);
|
|
|
|
const provider = ProviderFactory.getProviderForModel(effectiveModel);
|
|
// Strip provider prefix - providers expect bare model IDs
|
|
const bareModel = stripProviderPrefix(effectiveModel);
|
|
|
|
// Get autoLoadClaudeMd setting
|
|
const autoLoadClaudeMd = await getAutoLoadClaudeMdSetting(
|
|
projectPath,
|
|
settingsService,
|
|
'[BacklogPlan]'
|
|
);
|
|
|
|
// For Cursor models, we need to combine prompts with explicit instructions
|
|
// because Cursor doesn't support systemPrompt separation like Claude SDK
|
|
let finalPrompt = userPrompt;
|
|
let finalSystemPrompt: string | undefined = systemPrompt;
|
|
|
|
if (isCursorModel(effectiveModel)) {
|
|
logger.info('[BacklogPlan] Using Cursor model - adding explicit no-file-write instructions');
|
|
finalPrompt = `${systemPrompt}
|
|
|
|
CRITICAL INSTRUCTIONS:
|
|
1. DO NOT write any files. Return the JSON in your response only.
|
|
2. DO NOT use Write, Edit, or any file modification tools.
|
|
3. Respond with ONLY a JSON object - no explanations, no markdown, just raw JSON.
|
|
4. Your entire response should be valid JSON starting with { and ending with }.
|
|
5. No text before or after the JSON object.
|
|
|
|
${userPrompt}`;
|
|
finalSystemPrompt = undefined; // System prompt is now embedded in the user prompt
|
|
}
|
|
|
|
// Execute the query
|
|
const stream = provider.executeQuery({
|
|
prompt: finalPrompt,
|
|
model: bareModel,
|
|
cwd: projectPath,
|
|
systemPrompt: finalSystemPrompt,
|
|
maxTurns: 1,
|
|
allowedTools: [], // No tools needed for this
|
|
abortController,
|
|
settingSources: autoLoadClaudeMd ? ['user', 'project'] : undefined,
|
|
readOnly: true, // Plan generation only generates text, doesn't write files
|
|
thinkingLevel, // Pass thinking level for extended thinking
|
|
});
|
|
|
|
let responseText = '';
|
|
|
|
for await (const msg of stream) {
|
|
if (abortController.signal.aborted) {
|
|
throw new Error('Generation aborted');
|
|
}
|
|
|
|
if (msg.type === 'assistant') {
|
|
if (msg.message?.content) {
|
|
for (const block of msg.message.content) {
|
|
if (block.type === 'text') {
|
|
responseText += block.text;
|
|
}
|
|
}
|
|
}
|
|
} else if (msg.type === 'result' && msg.subtype === 'success' && msg.result) {
|
|
// Use result if it's a final accumulated message (from Cursor provider)
|
|
logger.info('[BacklogPlan] Received result from Cursor, length:', msg.result.length);
|
|
logger.info('[BacklogPlan] Previous responseText length:', responseText.length);
|
|
if (msg.result.length > responseText.length) {
|
|
logger.info('[BacklogPlan] Using Cursor result (longer than accumulated text)');
|
|
responseText = msg.result;
|
|
} else {
|
|
logger.info('[BacklogPlan] Keeping accumulated text (longer than Cursor result)');
|
|
}
|
|
}
|
|
}
|
|
|
|
// Parse the response
|
|
const result = parsePlanResponse(responseText);
|
|
|
|
events.emit('backlog-plan:event', {
|
|
type: 'backlog_plan_complete',
|
|
result,
|
|
});
|
|
|
|
return result;
|
|
} catch (error) {
|
|
const errorMessage = getErrorMessage(error);
|
|
logger.error('[BacklogPlan] Generation failed:', errorMessage);
|
|
|
|
events.emit('backlog-plan:event', {
|
|
type: 'backlog_plan_error',
|
|
error: errorMessage,
|
|
});
|
|
|
|
throw error;
|
|
} finally {
|
|
setRunningState(false, null);
|
|
}
|
|
}
|