mirror of
https://github.com/AutoMaker-Org/automaker.git
synced 2026-01-30 06:12:03 +00:00
feat: connect Task Execution prompts to auto-mode-service
Update auto-mode-service.ts to use centralized Task Execution prompts from settings, making all 9 task execution prompts customizable via UI: - buildFeaturePrompt: uses implementationInstructions and playwrightVerificationInstructions from settings - buildTaskPrompt: uses taskPromptTemplate with variable substitution - buildPipelineStepPrompt: updated to pass prompts through - executeFeatureWithContext: uses resumeFeatureTemplate - resolvePlanApproval recovery: uses continuationAfterApprovalTemplate - Multi-agent continuation: uses continuationAfterApprovalTemplate - recordLearningsFromFeature: uses learningExtractionSystemPrompt and learningExtractionUserPromptTemplate All 12 prompt categories are now fully customizable from the UI. Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -579,6 +579,9 @@ export class AutoModeService {
|
||||
'[AutoMode]'
|
||||
);
|
||||
|
||||
// Get customized prompts from settings
|
||||
const prompts = await getPromptCustomization(this.settingsService, '[AutoMode]');
|
||||
|
||||
// Build the prompt - use continuation prompt if provided (for recovery after plan approval)
|
||||
let prompt: string;
|
||||
// Load project context files (CLAUDE.md, CODE_QUALITY.md, etc.) and memory files
|
||||
@@ -604,7 +607,7 @@ export class AutoModeService {
|
||||
logger.info(`Using continuation prompt for feature ${featureId}`);
|
||||
} else {
|
||||
// Normal flow: build prompt with planning phase
|
||||
const featurePrompt = this.buildFeaturePrompt(feature);
|
||||
const featurePrompt = this.buildFeaturePrompt(feature, prompts.taskExecution);
|
||||
const planningPrefix = await this.getPlanningPromptPrefix(feature);
|
||||
prompt = planningPrefix + featurePrompt;
|
||||
|
||||
@@ -783,6 +786,9 @@ export class AutoModeService {
|
||||
): Promise<void> {
|
||||
logger.info(`Executing ${steps.length} pipeline step(s) for feature ${featureId}`);
|
||||
|
||||
// Get customized prompts from settings
|
||||
const prompts = await getPromptCustomization(this.settingsService, '[AutoMode]');
|
||||
|
||||
// Load context files once with feature context for smart memory selection
|
||||
const contextResult = await loadContextFiles({
|
||||
projectPath,
|
||||
@@ -827,7 +833,12 @@ export class AutoModeService {
|
||||
});
|
||||
|
||||
// Build prompt for this pipeline step
|
||||
const prompt = this.buildPipelineStepPrompt(step, feature, previousContext);
|
||||
const prompt = this.buildPipelineStepPrompt(
|
||||
step,
|
||||
feature,
|
||||
previousContext,
|
||||
prompts.taskExecution
|
||||
);
|
||||
|
||||
// Get model from feature
|
||||
const model = resolveModelString(feature.model, DEFAULT_MODELS.claude);
|
||||
@@ -882,14 +893,18 @@ export class AutoModeService {
|
||||
private buildPipelineStepPrompt(
|
||||
step: PipelineStep,
|
||||
feature: Feature,
|
||||
previousContext: string
|
||||
previousContext: string,
|
||||
taskExecutionPrompts: {
|
||||
implementationInstructions: string;
|
||||
playwrightVerificationInstructions: string;
|
||||
}
|
||||
): string {
|
||||
let prompt = `## Pipeline Step: ${step.name}
|
||||
|
||||
This is an automated pipeline step following the initial feature implementation.
|
||||
|
||||
### Feature Context
|
||||
${this.buildFeaturePrompt(feature)}
|
||||
${this.buildFeaturePrompt(feature, taskExecutionPrompts)}
|
||||
|
||||
`;
|
||||
|
||||
@@ -1279,6 +1294,9 @@ Complete the pipeline step instructions above. Review the previous work and appl
|
||||
'[AutoMode]'
|
||||
);
|
||||
|
||||
// Get customized prompts from settings
|
||||
const prompts = await getPromptCustomization(this.settingsService, '[AutoMode]');
|
||||
|
||||
// Load project context files (CLAUDE.md, CODE_QUALITY.md, etc.) - passed as system prompt
|
||||
const contextResult = await loadContextFiles({
|
||||
projectPath,
|
||||
@@ -1296,7 +1314,7 @@ Complete the pipeline step instructions above. Review the previous work and appl
|
||||
// Build complete prompt with feature info, previous context, and follow-up instructions
|
||||
let fullPrompt = `## Follow-up on Feature Implementation
|
||||
|
||||
${feature ? this.buildFeaturePrompt(feature) : `**Feature ID:** ${featureId}`}
|
||||
${feature ? this.buildFeaturePrompt(feature, prompts.taskExecution) : `**Feature ID:** ${featureId}`}
|
||||
`;
|
||||
|
||||
if (previousContext) {
|
||||
@@ -1888,13 +1906,17 @@ Format your response as a structured markdown document.`;
|
||||
content: editedPlan || feature.planSpec.content,
|
||||
});
|
||||
|
||||
// Build continuation prompt and re-run the feature
|
||||
// Get customized prompts from settings
|
||||
const prompts = await getPromptCustomization(this.settingsService, '[AutoMode]');
|
||||
|
||||
// Build continuation prompt using centralized template
|
||||
const planContent = editedPlan || feature.planSpec.content || '';
|
||||
let continuationPrompt = `The plan/specification has been approved. `;
|
||||
if (feedback) {
|
||||
continuationPrompt += `\n\nUser feedback: ${feedback}\n\n`;
|
||||
}
|
||||
continuationPrompt += `Now proceed with the implementation as specified in the plan:\n\n${planContent}\n\nImplement the feature now.`;
|
||||
let continuationPrompt = prompts.taskExecution.continuationAfterApprovalTemplate;
|
||||
continuationPrompt = continuationPrompt.replace(
|
||||
/\{\{userFeedback\}\}/g,
|
||||
feedback || ''
|
||||
);
|
||||
continuationPrompt = continuationPrompt.replace(/\{\{approvedPlan\}\}/g, planContent);
|
||||
|
||||
logger.info(`Starting recovery execution for feature ${featureId}`);
|
||||
|
||||
@@ -2225,7 +2247,13 @@ Format your response as a structured markdown document.`;
|
||||
return planningPrompt + '\n\n---\n\n## Feature Request\n\n';
|
||||
}
|
||||
|
||||
private buildFeaturePrompt(feature: Feature): string {
|
||||
private buildFeaturePrompt(
|
||||
feature: Feature,
|
||||
taskExecutionPrompts: {
|
||||
implementationInstructions: string;
|
||||
playwrightVerificationInstructions: string;
|
||||
}
|
||||
): string {
|
||||
const title = this.extractTitleFromDescription(feature.description);
|
||||
|
||||
let prompt = `## Feature Implementation Task
|
||||
@@ -2267,80 +2295,10 @@ You can use the Read tool to view these images at any time during implementation
|
||||
// Add verification instructions based on testing mode
|
||||
if (feature.skipTests) {
|
||||
// Manual verification - just implement the feature
|
||||
prompt += `
|
||||
## Instructions
|
||||
|
||||
Implement this feature by:
|
||||
1. First, explore the codebase to understand the existing structure
|
||||
2. Plan your implementation approach
|
||||
3. Write the necessary code changes
|
||||
4. Ensure the code follows existing patterns and conventions
|
||||
|
||||
When done, wrap your final summary in <summary> tags like this:
|
||||
|
||||
<summary>
|
||||
## Summary: [Feature Title]
|
||||
|
||||
### Changes Implemented
|
||||
- [List of changes made]
|
||||
|
||||
### Files Modified
|
||||
- [List of files]
|
||||
|
||||
### Notes for Developer
|
||||
- [Any important notes]
|
||||
</summary>
|
||||
|
||||
This helps parse your summary correctly in the output logs.`;
|
||||
prompt += `\n${taskExecutionPrompts.implementationInstructions}`;
|
||||
} else {
|
||||
// Automated testing - implement and verify with Playwright
|
||||
prompt += `
|
||||
## Instructions
|
||||
|
||||
Implement this feature by:
|
||||
1. First, explore the codebase to understand the existing structure
|
||||
2. Plan your implementation approach
|
||||
3. Write the necessary code changes
|
||||
4. Ensure the code follows existing patterns and conventions
|
||||
|
||||
## Verification with Playwright (REQUIRED)
|
||||
|
||||
After implementing the feature, you MUST verify it works correctly using Playwright:
|
||||
|
||||
1. **Create a temporary Playwright test** to verify the feature works as expected
|
||||
2. **Run the test** to confirm the feature is working
|
||||
3. **Delete the test file** after verification - this is a temporary verification test, not a permanent test suite addition
|
||||
|
||||
Example verification workflow:
|
||||
\`\`\`bash
|
||||
# Create a simple verification test
|
||||
npx playwright test my-verification-test.spec.ts
|
||||
|
||||
# After successful verification, delete the test
|
||||
rm my-verification-test.spec.ts
|
||||
\`\`\`
|
||||
|
||||
The test should verify the core functionality of the feature. If the test fails, fix the implementation and re-test.
|
||||
|
||||
When done, wrap your final summary in <summary> tags like this:
|
||||
|
||||
<summary>
|
||||
## Summary: [Feature Title]
|
||||
|
||||
### Changes Implemented
|
||||
- [List of changes made]
|
||||
|
||||
### Files Modified
|
||||
- [List of files]
|
||||
|
||||
### Verification Status
|
||||
- [Describe how the feature was verified with Playwright]
|
||||
|
||||
### Notes for Developer
|
||||
- [Any important notes]
|
||||
</summary>
|
||||
|
||||
This helps parse your summary correctly in the output logs.`;
|
||||
prompt += `\n${taskExecutionPrompts.implementationInstructions}\n\n${taskExecutionPrompts.playwrightVerificationInstructions}`;
|
||||
}
|
||||
|
||||
return prompt;
|
||||
@@ -2910,6 +2868,12 @@ After generating the revised spec, output:
|
||||
`Starting multi-agent execution: ${parsedTasks.length} tasks for feature ${featureId}`
|
||||
);
|
||||
|
||||
// Get customized prompts for task execution
|
||||
const taskPrompts = await getPromptCustomization(
|
||||
this.settingsService,
|
||||
'[AutoMode]'
|
||||
);
|
||||
|
||||
// Execute each task with a separate agent
|
||||
for (let taskIndex = 0; taskIndex < parsedTasks.length; taskIndex++) {
|
||||
const task = parsedTasks[taskIndex];
|
||||
@@ -2941,6 +2905,7 @@ After generating the revised spec, output:
|
||||
parsedTasks,
|
||||
taskIndex,
|
||||
approvedPlanContent,
|
||||
taskPrompts.taskExecution.taskPromptTemplate,
|
||||
userFeedback
|
||||
);
|
||||
|
||||
@@ -3023,15 +2988,21 @@ After generating the revised spec, output:
|
||||
`No parsed tasks, using single-agent execution for feature ${featureId}`
|
||||
);
|
||||
|
||||
const continuationPrompt = `The plan/specification has been approved. Now implement it.
|
||||
${userFeedback ? `\n## User Feedback\n${userFeedback}\n` : ''}
|
||||
## Approved Plan
|
||||
|
||||
${approvedPlanContent}
|
||||
|
||||
## Instructions
|
||||
|
||||
Implement all the changes described in the plan above.`;
|
||||
// Get customized prompts for continuation
|
||||
const taskPrompts = await getPromptCustomization(
|
||||
this.settingsService,
|
||||
'[AutoMode]'
|
||||
);
|
||||
let continuationPrompt =
|
||||
taskPrompts.taskExecution.continuationAfterApprovalTemplate;
|
||||
continuationPrompt = continuationPrompt.replace(
|
||||
/\{\{userFeedback\}\}/g,
|
||||
userFeedback || ''
|
||||
);
|
||||
continuationPrompt = continuationPrompt.replace(
|
||||
/\{\{approvedPlan\}\}/g,
|
||||
approvedPlanContent
|
||||
);
|
||||
|
||||
const continuationStream = provider.executeQuery({
|
||||
prompt: continuationPrompt,
|
||||
@@ -3151,17 +3122,16 @@ Implement all the changes described in the plan above.`;
|
||||
throw new Error(`Feature ${featureId} not found`);
|
||||
}
|
||||
|
||||
const prompt = `## Continuing Feature Implementation
|
||||
// Get customized prompts from settings
|
||||
const prompts = await getPromptCustomization(this.settingsService, '[AutoMode]');
|
||||
|
||||
${this.buildFeaturePrompt(feature)}
|
||||
// Build the feature prompt
|
||||
const featurePrompt = this.buildFeaturePrompt(feature, prompts.taskExecution);
|
||||
|
||||
## Previous Context
|
||||
The following is the output from a previous implementation attempt. Continue from where you left off:
|
||||
|
||||
${context}
|
||||
|
||||
## Instructions
|
||||
Review the previous work and continue the implementation. If the feature appears complete, verify it works correctly.`;
|
||||
// Use the resume feature template with variable substitution
|
||||
let prompt = prompts.taskExecution.resumeFeatureTemplate;
|
||||
prompt = prompt.replace(/\{\{featurePrompt\}\}/g, featurePrompt);
|
||||
prompt = prompt.replace(/\{\{previousContext\}\}/g, context);
|
||||
|
||||
return this.executeFeature(projectPath, featureId, useWorktrees, false, undefined, {
|
||||
continuationPrompt: prompt,
|
||||
@@ -3282,68 +3252,42 @@ Review the previous work and continue the implementation. If the feature appears
|
||||
allTasks: ParsedTask[],
|
||||
taskIndex: number,
|
||||
planContent: string,
|
||||
taskPromptTemplate: string,
|
||||
userFeedback?: string
|
||||
): string {
|
||||
const completedTasks = allTasks.slice(0, taskIndex);
|
||||
const remainingTasks = allTasks.slice(taskIndex + 1);
|
||||
|
||||
let prompt = `# Task Execution: ${task.id}
|
||||
// Build completed tasks string
|
||||
const completedTasksStr =
|
||||
completedTasks.length > 0
|
||||
? `### Already Completed (${completedTasks.length} tasks)\n${completedTasks.map((t) => `- [x] ${t.id}: ${t.description}`).join('\n')}\n`
|
||||
: '';
|
||||
|
||||
You are executing a specific task as part of a larger feature implementation.
|
||||
// Build remaining tasks string
|
||||
const remainingTasksStr =
|
||||
remainingTasks.length > 0
|
||||
? `### Coming Up Next (${remainingTasks.length} tasks remaining)\n${remainingTasks
|
||||
.slice(0, 3)
|
||||
.map((t) => `- [ ] ${t.id}: ${t.description}`)
|
||||
.join(
|
||||
'\n'
|
||||
)}${remainingTasks.length > 3 ? `\n... and ${remainingTasks.length - 3} more tasks` : ''}\n`
|
||||
: '';
|
||||
|
||||
## Your Current Task
|
||||
// Build user feedback string
|
||||
const userFeedbackStr = userFeedback ? `### User Feedback\n${userFeedback}\n` : '';
|
||||
|
||||
**Task ID:** ${task.id}
|
||||
**Description:** ${task.description}
|
||||
${task.filePath ? `**Primary File:** ${task.filePath}` : ''}
|
||||
${task.phase ? `**Phase:** ${task.phase}` : ''}
|
||||
|
||||
## Context
|
||||
|
||||
`;
|
||||
|
||||
// Show what's already done
|
||||
if (completedTasks.length > 0) {
|
||||
prompt += `### Already Completed (${completedTasks.length} tasks)
|
||||
${completedTasks.map((t) => `- [x] ${t.id}: ${t.description}`).join('\n')}
|
||||
|
||||
`;
|
||||
}
|
||||
|
||||
// Show remaining tasks
|
||||
if (remainingTasks.length > 0) {
|
||||
prompt += `### Coming Up Next (${remainingTasks.length} tasks remaining)
|
||||
${remainingTasks
|
||||
.slice(0, 3)
|
||||
.map((t) => `- [ ] ${t.id}: ${t.description}`)
|
||||
.join('\n')}
|
||||
${remainingTasks.length > 3 ? `... and ${remainingTasks.length - 3} more tasks` : ''}
|
||||
|
||||
`;
|
||||
}
|
||||
|
||||
// Add user feedback if any
|
||||
if (userFeedback) {
|
||||
prompt += `### User Feedback
|
||||
${userFeedback}
|
||||
|
||||
`;
|
||||
}
|
||||
|
||||
// Add relevant excerpt from plan (just the task-related part to save context)
|
||||
prompt += `### Reference: Full Plan
|
||||
<details>
|
||||
${planContent}
|
||||
</details>
|
||||
|
||||
## Instructions
|
||||
|
||||
1. Focus ONLY on completing task ${task.id}: "${task.description}"
|
||||
2. Do not work on other tasks
|
||||
3. Use the existing codebase patterns
|
||||
4. When done, summarize what you implemented
|
||||
|
||||
Begin implementing task ${task.id} now.`;
|
||||
// Use centralized template with variable substitution
|
||||
let prompt = taskPromptTemplate;
|
||||
prompt = prompt.replace(/\{\{taskId\}\}/g, task.id);
|
||||
prompt = prompt.replace(/\{\{taskDescription\}\}/g, task.description);
|
||||
prompt = prompt.replace(/\{\{taskFilePath\}\}/g, task.filePath || '');
|
||||
prompt = prompt.replace(/\{\{taskPhase\}\}/g, task.phase || '');
|
||||
prompt = prompt.replace(/\{\{completedTasks\}\}/g, completedTasksStr);
|
||||
prompt = prompt.replace(/\{\{remainingTasks\}\}/g, remainingTasksStr);
|
||||
prompt = prompt.replace(/\{\{userFeedback\}\}/g, userFeedbackStr);
|
||||
prompt = prompt.replace(/\{\{planContent\}\}/g, planContent);
|
||||
|
||||
return prompt;
|
||||
}
|
||||
@@ -3553,32 +3497,13 @@ Begin implementing task ${task.id} now.`;
|
||||
// Limit output to avoid token limits
|
||||
const truncatedOutput = agentOutput.length > 10000 ? agentOutput.slice(-10000) : agentOutput;
|
||||
|
||||
const userPrompt = `You are an Architecture Decision Record (ADR) extractor. Analyze this implementation and return ONLY JSON with learnings. No explanations.
|
||||
// Get customized prompts from settings
|
||||
const prompts = await getPromptCustomization(this.settingsService, '[AutoMode]');
|
||||
|
||||
Feature: "${feature.title}"
|
||||
|
||||
Implementation log:
|
||||
${truncatedOutput}
|
||||
|
||||
Extract MEANINGFUL learnings - not obvious things. For each, capture:
|
||||
- DECISIONS: Why this approach vs alternatives? What would break if changed?
|
||||
- GOTCHAS: What was unexpected? What's the root cause? How to avoid?
|
||||
- PATTERNS: Why this pattern? What problem does it solve? Trade-offs?
|
||||
|
||||
JSON format ONLY (no markdown, no text):
|
||||
{"learnings": [{
|
||||
"category": "architecture|api|ui|database|auth|testing|performance|security|gotchas",
|
||||
"type": "decision|gotcha|pattern",
|
||||
"content": "What was done/learned",
|
||||
"context": "Problem being solved or situation faced",
|
||||
"why": "Reasoning - why this approach",
|
||||
"rejected": "Alternative considered and why rejected",
|
||||
"tradeoffs": "What became easier/harder",
|
||||
"breaking": "What breaks if this is changed/removed"
|
||||
}]}
|
||||
|
||||
IMPORTANT: Only include NON-OBVIOUS learnings with real reasoning. Skip trivial patterns.
|
||||
If nothing notable: {"learnings": []}`;
|
||||
// Build user prompt using centralized template with variable substitution
|
||||
let userPrompt = prompts.taskExecution.learningExtractionUserPromptTemplate;
|
||||
userPrompt = userPrompt.replace(/\{\{featureTitle\}\}/g, feature.title || '');
|
||||
userPrompt = userPrompt.replace(/\{\{implementationLog\}\}/g, truncatedOutput);
|
||||
|
||||
try {
|
||||
// Get model from phase settings
|
||||
@@ -3612,8 +3537,7 @@ If nothing notable: {"learnings": []}`;
|
||||
cwd: projectPath,
|
||||
maxTurns: 1,
|
||||
allowedTools: [],
|
||||
systemPrompt:
|
||||
'You are a JSON extraction assistant. You MUST respond with ONLY valid JSON, no explanations, no markdown, no other text. Extract learnings from the provided implementation context and return them as JSON.',
|
||||
systemPrompt: prompts.taskExecution.learningExtractionSystemPrompt,
|
||||
});
|
||||
|
||||
const responseText = result.text;
|
||||
|
||||
Reference in New Issue
Block a user