Merge pull request #501 from AutoMaker-Org/feature/v0.11.0rc-1768426435282-1ogl

feat: centralize prompts and add customization UI for App Spec, Context, Suggestions, Tasks
This commit is contained in:
Shirone
2026-01-15 20:20:56 +00:00
committed by GitHub
25 changed files with 1925 additions and 885 deletions

View File

@@ -219,7 +219,7 @@ app.get('/api/health/detailed', createDetailedHandler());
app.use('/api/fs', createFsRoutes(events));
app.use('/api/agent', createAgentRoutes(agentService, events));
app.use('/api/sessions', createSessionsRoutes(agentService));
app.use('/api/features', createFeaturesRoutes(featureLoader));
app.use('/api/features', createFeaturesRoutes(featureLoader, settingsService));
app.use('/api/auto-mode', createAutoModeRoutes(autoModeService));
app.use('/api/enhance-prompt', createEnhancePromptRoutes(settingsService));
app.use('/api/worktree', createWorktreeRoutes(events, settingsService));

View File

@@ -11,6 +11,14 @@ import {
mergeAgentPrompts,
mergeBacklogPlanPrompts,
mergeEnhancementPrompts,
mergeCommitMessagePrompts,
mergeTitleGenerationPrompts,
mergeIssueValidationPrompts,
mergeIdeationPrompts,
mergeAppSpecPrompts,
mergeContextDescriptionPrompts,
mergeSuggestionsPrompts,
mergeTaskExecutionPrompts,
} from '@automaker/prompts';
const logger = createLogger('SettingsHelper');
@@ -218,6 +226,14 @@ export async function getPromptCustomization(
agent: ReturnType<typeof mergeAgentPrompts>;
backlogPlan: ReturnType<typeof mergeBacklogPlanPrompts>;
enhancement: ReturnType<typeof mergeEnhancementPrompts>;
commitMessage: ReturnType<typeof mergeCommitMessagePrompts>;
titleGeneration: ReturnType<typeof mergeTitleGenerationPrompts>;
issueValidation: ReturnType<typeof mergeIssueValidationPrompts>;
ideation: ReturnType<typeof mergeIdeationPrompts>;
appSpec: ReturnType<typeof mergeAppSpecPrompts>;
contextDescription: ReturnType<typeof mergeContextDescriptionPrompts>;
suggestions: ReturnType<typeof mergeSuggestionsPrompts>;
taskExecution: ReturnType<typeof mergeTaskExecutionPrompts>;
}> {
let customization: PromptCustomization = {};
@@ -239,6 +255,14 @@ export async function getPromptCustomization(
agent: mergeAgentPrompts(customization.agent),
backlogPlan: mergeBacklogPlanPrompts(customization.backlogPlan),
enhancement: mergeEnhancementPrompts(customization.enhancement),
commitMessage: mergeCommitMessagePrompts(customization.commitMessage),
titleGeneration: mergeTitleGenerationPrompts(customization.titleGeneration),
issueValidation: mergeIssueValidationPrompts(customization.issueValidation),
ideation: mergeIdeationPrompts(customization.ideation),
appSpec: mergeAppSpecPrompts(customization.appSpec),
contextDescription: mergeContextDescriptionPrompts(customization.contextDescription),
suggestions: mergeSuggestionsPrompts(customization.suggestions),
taskExecution: mergeTaskExecutionPrompts(customization.taskExecution),
};
}

View File

@@ -14,7 +14,7 @@ import { streamingQuery } from '../../providers/simple-query-service.js';
import { parseAndCreateFeatures } from './parse-and-create-features.js';
import { getAppSpecPath } from '@automaker/platform';
import type { SettingsService } from '../../services/settings-service.js';
import { getAutoLoadClaudeMdSetting } from '../../lib/settings-helpers.js';
import { getAutoLoadClaudeMdSetting, getPromptCustomization } from '../../lib/settings-helpers.js';
const logger = createLogger('SpecRegeneration');
@@ -53,38 +53,16 @@ export async function generateFeaturesFromSpec(
return;
}
// Get customized prompts from settings
const prompts = await getPromptCustomization(settingsService, '[FeatureGeneration]');
const prompt = `Based on this project specification:
${spec}
Generate a prioritized list of implementable features. For each feature provide:
${prompts.appSpec.generateFeaturesFromSpecPrompt}
1. **id**: A unique lowercase-hyphenated identifier
2. **category**: Functional category (e.g., "Core", "UI", "API", "Authentication", "Database")
3. **title**: Short descriptive title
4. **description**: What this feature does (2-3 sentences)
5. **priority**: 1 (high), 2 (medium), or 3 (low)
6. **complexity**: "simple", "moderate", or "complex"
7. **dependencies**: Array of feature IDs this depends on (can be empty)
Format as JSON:
{
"features": [
{
"id": "feature-id",
"category": "Feature Category",
"title": "Feature Title",
"description": "What it does",
"priority": 1,
"complexity": "moderate",
"dependencies": []
}
]
}
Generate ${featureCount} features that build on each other logically.
IMPORTANT: Do not ask for clarification. The specification is provided above. Generate the JSON immediately.`;
Generate ${featureCount} features that build on each other logically.`;
logger.info('========== PROMPT BEING SENT ==========');
logger.info(`Prompt length: ${prompt.length} chars`);

View File

@@ -7,12 +7,7 @@
import * as secureFs from '../../lib/secure-fs.js';
import type { EventEmitter } from '../../lib/events.js';
import {
specOutputSchema,
specToXml,
getStructuredSpecPromptInstruction,
type SpecOutput,
} from '../../lib/app-spec-format.js';
import { specOutputSchema, specToXml, type SpecOutput } from '../../lib/app-spec-format.js';
import { createLogger } from '@automaker/utils';
import { DEFAULT_PHASE_MODELS, isCursorModel } from '@automaker/types';
import { resolvePhaseModel } from '@automaker/model-resolver';
@@ -21,7 +16,7 @@ import { streamingQuery } from '../../providers/simple-query-service.js';
import { generateFeaturesFromSpec } from './generate-features-from-spec.js';
import { ensureAutomakerDir, getAppSpecPath } from '@automaker/platform';
import type { SettingsService } from '../../services/settings-service.js';
import { getAutoLoadClaudeMdSetting } from '../../lib/settings-helpers.js';
import { getAutoLoadClaudeMdSetting, getPromptCustomization } from '../../lib/settings-helpers.js';
const logger = createLogger('SpecRegeneration');
@@ -43,6 +38,9 @@ export async function generateSpec(
logger.info('analyzeProject:', analyzeProject);
logger.info('maxFeatures:', maxFeatures);
// Get customized prompts from settings
const prompts = await getPromptCustomization(settingsService, '[SpecRegeneration]');
// Build the prompt based on whether we should analyze the project
let analysisInstructions = '';
let techStackDefaults = '';
@@ -66,9 +64,7 @@ export async function generateSpec(
Use these technologies as the foundation for the specification.`;
}
const prompt = `You are helping to define a software project specification.
IMPORTANT: Never ask for clarification or additional information. Use the information provided and make reasonable assumptions to create the best possible specification. If details are missing, infer them based on common patterns and best practices.
const prompt = `${prompts.appSpec.generateSpecSystemPrompt}
Project Overview:
${projectOverview}
@@ -77,7 +73,7 @@ ${techStackDefaults}
${analysisInstructions}
${getStructuredSpecPromptInstruction()}`;
${prompts.appSpec.structuredSpecInstructions}`;
logger.info('========== PROMPT BEING SENT ==========');
logger.info(`Prompt length: ${prompt.length} chars`);

View File

@@ -19,7 +19,10 @@ import { simpleQuery } from '../../../providers/simple-query-service.js';
import * as secureFs from '../../../lib/secure-fs.js';
import * as path from 'path';
import type { SettingsService } from '../../../services/settings-service.js';
import { getAutoLoadClaudeMdSetting } from '../../../lib/settings-helpers.js';
import {
getAutoLoadClaudeMdSetting,
getPromptCustomization,
} from '../../../lib/settings-helpers.js';
const logger = createLogger('DescribeFile');
@@ -130,11 +133,12 @@ export function createDescribeFileHandler(
// Get the filename for context
const fileName = path.basename(resolvedPath);
// Get customized prompts from settings
const prompts = await getPromptCustomization(settingsService, '[DescribeFile]');
// Build prompt with file content passed as structured data
// The file content is included directly, not via tool invocation
const prompt = `Analyze the following file and provide a 1-2 sentence description suitable for use as context in an AI coding assistant. Focus on what the file contains, its purpose, and why an AI agent might want to use this context in the future (e.g., "API documentation for the authentication endpoints", "Configuration file for database connections", "Coding style guidelines for the project").
Respond with ONLY the description text, no additional formatting, preamble, or explanation.
const prompt = `${prompts.contextDescription.describeFilePrompt}
File: ${fileName}${truncated ? ' (truncated)' : ''}

View File

@@ -19,7 +19,10 @@ import { simpleQuery } from '../../../providers/simple-query-service.js';
import * as secureFs from '../../../lib/secure-fs.js';
import * as path from 'path';
import type { SettingsService } from '../../../services/settings-service.js';
import { getAutoLoadClaudeMdSetting } from '../../../lib/settings-helpers.js';
import {
getAutoLoadClaudeMdSetting,
getPromptCustomization,
} from '../../../lib/settings-helpers.js';
const logger = createLogger('DescribeImage');
@@ -278,12 +281,11 @@ export function createDescribeImageHandler(
logger.info(`[${requestId}] Using model: ${model}`);
// Build the instruction text
const instructionText =
`Describe this image in 1-2 sentences suitable for use as context in an AI coding assistant. ` +
`Focus on what the image shows and its purpose (e.g., "UI mockup showing login form with email/password fields", ` +
`"Architecture diagram of microservices", "Screenshot of error message in terminal").\n\n` +
`Respond with ONLY the description text, no additional formatting, preamble, or explanation.`;
// Get customized prompts from settings
const prompts = await getPromptCustomization(settingsService, '[DescribeImage]');
// Build the instruction text from centralized prompts
const instructionText = prompts.contextDescription.describeImagePrompt;
// Build prompt based on provider capability
// Some providers (like Cursor) may not support image content blocks

View File

@@ -4,6 +4,7 @@
import { Router } from 'express';
import { FeatureLoader } from '../../services/feature-loader.js';
import type { SettingsService } from '../../services/settings-service.js';
import { validatePathParams } from '../../middleware/validate-paths.js';
import { createListHandler } from './routes/list.js';
import { createGetHandler } from './routes/get.js';
@@ -15,7 +16,10 @@ import { createDeleteHandler } from './routes/delete.js';
import { createAgentOutputHandler, createRawOutputHandler } from './routes/agent-output.js';
import { createGenerateTitleHandler } from './routes/generate-title.js';
export function createFeaturesRoutes(featureLoader: FeatureLoader): Router {
export function createFeaturesRoutes(
featureLoader: FeatureLoader,
settingsService?: SettingsService
): Router {
const router = Router();
router.post('/list', validatePathParams('projectPath'), createListHandler(featureLoader));
@@ -35,7 +39,7 @@ export function createFeaturesRoutes(featureLoader: FeatureLoader): Router {
router.post('/delete', validatePathParams('projectPath'), createDeleteHandler(featureLoader));
router.post('/agent-output', createAgentOutputHandler(featureLoader));
router.post('/raw-output', createRawOutputHandler(featureLoader));
router.post('/generate-title', createGenerateTitleHandler());
router.post('/generate-title', createGenerateTitleHandler(settingsService));
return router;
}

View File

@@ -9,6 +9,8 @@ import type { Request, Response } from 'express';
import { createLogger } from '@automaker/utils';
import { CLAUDE_MODEL_MAP } from '@automaker/model-resolver';
import { simpleQuery } from '../../../providers/simple-query-service.js';
import type { SettingsService } from '../../../services/settings-service.js';
import { getPromptCustomization } from '../../../lib/settings-helpers.js';
const logger = createLogger('GenerateTitle');
@@ -26,16 +28,9 @@ interface GenerateTitleErrorResponse {
error: string;
}
const SYSTEM_PROMPT = `You are a title generator. Your task is to create a concise, descriptive title (5-10 words max) for a software feature based on its description.
Rules:
- Output ONLY the title, nothing else
- Keep it short and action-oriented (e.g., "Add dark mode toggle", "Fix login validation")
- Start with a verb when possible (Add, Fix, Update, Implement, Create, etc.)
- No quotes, periods, or extra formatting
- Capture the essence of the feature in a scannable way`;
export function createGenerateTitleHandler(): (req: Request, res: Response) => Promise<void> {
export function createGenerateTitleHandler(
settingsService?: SettingsService
): (req: Request, res: Response) => Promise<void> {
return async (req: Request, res: Response): Promise<void> => {
try {
const { description } = req.body as GenerateTitleRequestBody;
@@ -61,11 +56,15 @@ export function createGenerateTitleHandler(): (req: Request, res: Response) => P
logger.info(`Generating title for description: ${trimmedDescription.substring(0, 50)}...`);
// Get customized prompts from settings
const prompts = await getPromptCustomization(settingsService, '[GenerateTitle]');
const systemPrompt = prompts.titleGeneration.systemPrompt;
const userPrompt = `Generate a concise title for this feature:\n\n${trimmedDescription}`;
// Use simpleQuery - provider abstraction handles all the streaming/extraction
const result = await simpleQuery({
prompt: `${SYSTEM_PROMPT}\n\n${userPrompt}`,
prompt: `${systemPrompt}\n\n${userPrompt}`,
model: CLAUDE_MODEL_MAP.haiku,
cwd: process.cwd(),
maxTurns: 1,

View File

@@ -30,11 +30,11 @@ import { writeValidation } from '../../../lib/validation-storage.js';
import { streamingQuery } from '../../../providers/simple-query-service.js';
import {
issueValidationSchema,
ISSUE_VALIDATION_SYSTEM_PROMPT,
buildValidationPrompt,
ValidationComment,
ValidationLinkedPR,
} from './validation-schema.js';
import { getPromptCustomization } from '../../../lib/settings-helpers.js';
import {
trySetValidationRunning,
clearValidationStatus,
@@ -117,13 +117,17 @@ async function runValidation(
let responseText = '';
// Get customized prompts from settings
const prompts = await getPromptCustomization(settingsService, '[ValidateIssue]');
const issueValidationSystemPrompt = prompts.issueValidation.systemPrompt;
// Determine if we should use structured output (Claude/Codex support it, Cursor/OpenCode don't)
const useStructuredOutput = isClaudeModel(model) || isCodexModel(model);
// Build the final prompt - for Cursor, include system prompt and JSON schema instructions
let finalPrompt = basePrompt;
if (!useStructuredOutput) {
finalPrompt = `${ISSUE_VALIDATION_SYSTEM_PROMPT}
finalPrompt = `${issueValidationSystemPrompt}
CRITICAL INSTRUCTIONS:
1. DO NOT write any files. Return the JSON in your response only.
@@ -167,7 +171,7 @@ ${basePrompt}`;
prompt: finalPrompt,
model: model as string,
cwd: projectPath,
systemPrompt: useStructuredOutput ? ISSUE_VALIDATION_SYSTEM_PROMPT : undefined,
systemPrompt: useStructuredOutput ? issueValidationSystemPrompt : undefined,
abortController,
thinkingLevel: effectiveThinkingLevel,
reasoningEffort: effectiveReasoningEffort,

View File

@@ -1,8 +1,11 @@
/**
* Issue Validation Schema and System Prompt
* Issue Validation Schema and Prompt Building
*
* Defines the JSON schema for Claude's structured output and
* the system prompt that guides the validation process.
* helper functions for building validation prompts.
*
* Note: The system prompt is now centralized in @automaker/prompts
* and accessed via getPromptCustomization() in validate-issue.ts
*/
/**
@@ -82,76 +85,6 @@ export const issueValidationSchema = {
additionalProperties: false,
} as const;
/**
* System prompt that guides Claude in validating GitHub issues.
* Instructs the model to use read-only tools to analyze the codebase.
*/
export const ISSUE_VALIDATION_SYSTEM_PROMPT = `You are an expert code analyst validating GitHub issues against a codebase.
Your task is to analyze a GitHub issue and determine if it's valid by scanning the codebase.
## Validation Process
1. **Read the issue carefully** - Understand what is being reported or requested
2. **Search the codebase** - Use Glob to find relevant files by pattern, Grep to search for keywords
3. **Examine the code** - Use Read to look at the actual implementation in relevant files
4. **Check linked PRs** - If there are linked pull requests, use \`gh pr diff <PR_NUMBER>\` to review the changes
5. **Form your verdict** - Based on your analysis, determine if the issue is valid
## Verdicts
- **valid**: The issue describes a real problem that exists in the codebase, or a clear feature request that can be implemented. The referenced files/components exist and the issue is actionable.
- **invalid**: The issue describes behavior that doesn't exist, references non-existent files or components, is based on a misunderstanding of the code, or the described "bug" is actually expected behavior.
- **needs_clarification**: The issue lacks sufficient detail to verify. Specify what additional information is needed in the missingInfo field.
## For Bug Reports, Check:
- Do the referenced files/components exist?
- Does the code match what the issue describes?
- Is the described behavior actually a bug or expected?
- Can you locate the code that would cause the reported issue?
## For Feature Requests, Check:
- Does the feature already exist?
- Is the implementation location clear?
- Is the request technically feasible given the codebase structure?
## Analyzing Linked Pull Requests
When an issue has linked PRs (especially open ones), you MUST analyze them:
1. **Run \`gh pr diff <PR_NUMBER>\`** to see what changes the PR makes
2. **Run \`gh pr view <PR_NUMBER>\`** to see PR description and status
3. **Evaluate if the PR fixes the issue** - Does the diff address the reported problem?
4. **Provide a recommendation**:
- \`wait_for_merge\`: The PR appears to fix the issue correctly. No additional work needed - just wait for it to be merged.
- \`pr_needs_work\`: The PR attempts to fix the issue but is incomplete or has problems.
- \`no_pr\`: No relevant PR exists for this issue.
5. **Include prAnalysis in your response** with:
- hasOpenPR: true/false
- prFixesIssue: true/false (based on diff analysis)
- prNumber: the PR number you analyzed
- prSummary: brief description of what the PR changes
- recommendation: one of the above values
## Response Guidelines
- **Always include relatedFiles** when you find relevant code
- **Set bugConfirmed to true** only if you can definitively confirm a bug exists in the code
- **Provide a suggestedFix** when you have a clear idea of how to address the issue
- **Use missingInfo** when the verdict is needs_clarification to list what's needed
- **Include prAnalysis** when there are linked PRs - this is critical for avoiding duplicate work
- **Set estimatedComplexity** to help prioritize:
- trivial: Simple text changes, one-line fixes
- simple: Small changes to one file
- moderate: Changes to multiple files or moderate logic changes
- complex: Significant refactoring or new feature implementation
- very_complex: Major architectural changes or cross-cutting concerns
Be thorough in your analysis but focus on files that are directly relevant to the issue.`;
/**
* Comment data structure for validation prompt
*/

View File

@@ -15,7 +15,7 @@ import { FeatureLoader } from '../../services/feature-loader.js';
import { getAppSpecPath } from '@automaker/platform';
import * as secureFs from '../../lib/secure-fs.js';
import type { SettingsService } from '../../services/settings-service.js';
import { getAutoLoadClaudeMdSetting } from '../../lib/settings-helpers.js';
import { getAutoLoadClaudeMdSetting, getPromptCustomization } from '../../lib/settings-helpers.js';
const logger = createLogger('Suggestions');
@@ -137,11 +137,15 @@ export async function generateSuggestions(
modelOverride?: string,
thinkingLevelOverride?: ThinkingLevel
): Promise<void> {
// Get customized prompts from settings
const prompts = await getPromptCustomization(settingsService, '[Suggestions]');
// Map suggestion types to their prompts
const typePrompts: Record<string, string> = {
features: 'Analyze this project and suggest new features that would add value.',
refactoring: 'Analyze this project and identify refactoring opportunities.',
security: 'Analyze this project for security vulnerabilities and suggest fixes.',
performance: 'Analyze this project for performance issues and suggest optimizations.',
features: prompts.suggestions.featuresPrompt,
refactoring: prompts.suggestions.refactoringPrompt,
security: prompts.suggestions.securityPrompt,
performance: prompts.suggestions.performancePrompt,
};
// Load existing context to avoid duplicates
@@ -151,15 +155,7 @@ export async function generateSuggestions(
${existingContext}
${existingContext ? '\nIMPORTANT: Do NOT suggest features that are already implemented or already in the backlog above. Focus on NEW ideas that complement what already exists.\n' : ''}
Look at the codebase and provide 3-5 concrete suggestions.
For each suggestion, provide:
1. A category (e.g., "User Experience", "Security", "Performance")
2. A clear description of what to implement
3. Priority (1=high, 2=medium, 3=low)
4. Brief reasoning for why this would help
The response will be automatically formatted as structured JSON.`;
${prompts.suggestions.baseTemplate}`;
// Don't send initial message - let the agent output speak for itself
// The first agent message will be captured as an info entry

View File

@@ -579,6 +579,9 @@ export class AutoModeService {
'[AutoMode]'
);
// Get customized prompts from settings
const prompts = await getPromptCustomization(this.settingsService, '[AutoMode]');
// Build the prompt - use continuation prompt if provided (for recovery after plan approval)
let prompt: string;
// Load project context files (CLAUDE.md, CODE_QUALITY.md, etc.) and memory files
@@ -604,7 +607,7 @@ export class AutoModeService {
logger.info(`Using continuation prompt for feature ${featureId}`);
} else {
// Normal flow: build prompt with planning phase
const featurePrompt = this.buildFeaturePrompt(feature);
const featurePrompt = this.buildFeaturePrompt(feature, prompts.taskExecution);
const planningPrefix = await this.getPlanningPromptPrefix(feature);
prompt = planningPrefix + featurePrompt;
@@ -783,6 +786,9 @@ export class AutoModeService {
): Promise<void> {
logger.info(`Executing ${steps.length} pipeline step(s) for feature ${featureId}`);
// Get customized prompts from settings
const prompts = await getPromptCustomization(this.settingsService, '[AutoMode]');
// Load context files once with feature context for smart memory selection
const contextResult = await loadContextFiles({
projectPath,
@@ -827,7 +833,12 @@ export class AutoModeService {
});
// Build prompt for this pipeline step
const prompt = this.buildPipelineStepPrompt(step, feature, previousContext);
const prompt = this.buildPipelineStepPrompt(
step,
feature,
previousContext,
prompts.taskExecution
);
// Get model from feature
const model = resolveModelString(feature.model, DEFAULT_MODELS.claude);
@@ -882,14 +893,18 @@ export class AutoModeService {
private buildPipelineStepPrompt(
step: PipelineStep,
feature: Feature,
previousContext: string
previousContext: string,
taskExecutionPrompts: {
implementationInstructions: string;
playwrightVerificationInstructions: string;
}
): string {
let prompt = `## Pipeline Step: ${step.name}
This is an automated pipeline step following the initial feature implementation.
### Feature Context
${this.buildFeaturePrompt(feature)}
${this.buildFeaturePrompt(feature, taskExecutionPrompts)}
`;
@@ -1279,6 +1294,9 @@ Complete the pipeline step instructions above. Review the previous work and appl
'[AutoMode]'
);
// Get customized prompts from settings
const prompts = await getPromptCustomization(this.settingsService, '[AutoMode]');
// Load project context files (CLAUDE.md, CODE_QUALITY.md, etc.) - passed as system prompt
const contextResult = await loadContextFiles({
projectPath,
@@ -1296,7 +1314,7 @@ Complete the pipeline step instructions above. Review the previous work and appl
// Build complete prompt with feature info, previous context, and follow-up instructions
let fullPrompt = `## Follow-up on Feature Implementation
${feature ? this.buildFeaturePrompt(feature) : `**Feature ID:** ${featureId}`}
${feature ? this.buildFeaturePrompt(feature, prompts.taskExecution) : `**Feature ID:** ${featureId}`}
`;
if (previousContext) {
@@ -1888,13 +1906,17 @@ Format your response as a structured markdown document.`;
content: editedPlan || feature.planSpec.content,
});
// Build continuation prompt and re-run the feature
// Get customized prompts from settings
const prompts = await getPromptCustomization(this.settingsService, '[AutoMode]');
// Build continuation prompt using centralized template
const planContent = editedPlan || feature.planSpec.content || '';
let continuationPrompt = `The plan/specification has been approved. `;
if (feedback) {
continuationPrompt += `\n\nUser feedback: ${feedback}\n\n`;
}
continuationPrompt += `Now proceed with the implementation as specified in the plan:\n\n${planContent}\n\nImplement the feature now.`;
let continuationPrompt = prompts.taskExecution.continuationAfterApprovalTemplate;
continuationPrompt = continuationPrompt.replace(
/\{\{userFeedback\}\}/g,
feedback || ''
);
continuationPrompt = continuationPrompt.replace(/\{\{approvedPlan\}\}/g, planContent);
logger.info(`Starting recovery execution for feature ${featureId}`);
@@ -2225,7 +2247,13 @@ Format your response as a structured markdown document.`;
return planningPrompt + '\n\n---\n\n## Feature Request\n\n';
}
private buildFeaturePrompt(feature: Feature): string {
private buildFeaturePrompt(
feature: Feature,
taskExecutionPrompts: {
implementationInstructions: string;
playwrightVerificationInstructions: string;
}
): string {
const title = this.extractTitleFromDescription(feature.description);
let prompt = `## Feature Implementation Task
@@ -2267,80 +2295,10 @@ You can use the Read tool to view these images at any time during implementation
// Add verification instructions based on testing mode
if (feature.skipTests) {
// Manual verification - just implement the feature
prompt += `
## Instructions
Implement this feature by:
1. First, explore the codebase to understand the existing structure
2. Plan your implementation approach
3. Write the necessary code changes
4. Ensure the code follows existing patterns and conventions
When done, wrap your final summary in <summary> tags like this:
<summary>
## Summary: [Feature Title]
### Changes Implemented
- [List of changes made]
### Files Modified
- [List of files]
### Notes for Developer
- [Any important notes]
</summary>
This helps parse your summary correctly in the output logs.`;
prompt += `\n${taskExecutionPrompts.implementationInstructions}`;
} else {
// Automated testing - implement and verify with Playwright
prompt += `
## Instructions
Implement this feature by:
1. First, explore the codebase to understand the existing structure
2. Plan your implementation approach
3. Write the necessary code changes
4. Ensure the code follows existing patterns and conventions
## Verification with Playwright (REQUIRED)
After implementing the feature, you MUST verify it works correctly using Playwright:
1. **Create a temporary Playwright test** to verify the feature works as expected
2. **Run the test** to confirm the feature is working
3. **Delete the test file** after verification - this is a temporary verification test, not a permanent test suite addition
Example verification workflow:
\`\`\`bash
# Create a simple verification test
npx playwright test my-verification-test.spec.ts
# After successful verification, delete the test
rm my-verification-test.spec.ts
\`\`\`
The test should verify the core functionality of the feature. If the test fails, fix the implementation and re-test.
When done, wrap your final summary in <summary> tags like this:
<summary>
## Summary: [Feature Title]
### Changes Implemented
- [List of changes made]
### Files Modified
- [List of files]
### Verification Status
- [Describe how the feature was verified with Playwright]
### Notes for Developer
- [Any important notes]
</summary>
This helps parse your summary correctly in the output logs.`;
prompt += `\n${taskExecutionPrompts.implementationInstructions}\n\n${taskExecutionPrompts.playwrightVerificationInstructions}`;
}
return prompt;
@@ -2910,6 +2868,12 @@ After generating the revised spec, output:
`Starting multi-agent execution: ${parsedTasks.length} tasks for feature ${featureId}`
);
// Get customized prompts for task execution
const taskPrompts = await getPromptCustomization(
this.settingsService,
'[AutoMode]'
);
// Execute each task with a separate agent
for (let taskIndex = 0; taskIndex < parsedTasks.length; taskIndex++) {
const task = parsedTasks[taskIndex];
@@ -2941,6 +2905,7 @@ After generating the revised spec, output:
parsedTasks,
taskIndex,
approvedPlanContent,
taskPrompts.taskExecution.taskPromptTemplate,
userFeedback
);
@@ -3023,15 +2988,21 @@ After generating the revised spec, output:
`No parsed tasks, using single-agent execution for feature ${featureId}`
);
const continuationPrompt = `The plan/specification has been approved. Now implement it.
${userFeedback ? `\n## User Feedback\n${userFeedback}\n` : ''}
## Approved Plan
${approvedPlanContent}
## Instructions
Implement all the changes described in the plan above.`;
// Get customized prompts for continuation
const taskPrompts = await getPromptCustomization(
this.settingsService,
'[AutoMode]'
);
let continuationPrompt =
taskPrompts.taskExecution.continuationAfterApprovalTemplate;
continuationPrompt = continuationPrompt.replace(
/\{\{userFeedback\}\}/g,
userFeedback || ''
);
continuationPrompt = continuationPrompt.replace(
/\{\{approvedPlan\}\}/g,
approvedPlanContent
);
const continuationStream = provider.executeQuery({
prompt: continuationPrompt,
@@ -3151,17 +3122,16 @@ Implement all the changes described in the plan above.`;
throw new Error(`Feature ${featureId} not found`);
}
const prompt = `## Continuing Feature Implementation
// Get customized prompts from settings
const prompts = await getPromptCustomization(this.settingsService, '[AutoMode]');
${this.buildFeaturePrompt(feature)}
// Build the feature prompt
const featurePrompt = this.buildFeaturePrompt(feature, prompts.taskExecution);
## Previous Context
The following is the output from a previous implementation attempt. Continue from where you left off:
${context}
## Instructions
Review the previous work and continue the implementation. If the feature appears complete, verify it works correctly.`;
// Use the resume feature template with variable substitution
let prompt = prompts.taskExecution.resumeFeatureTemplate;
prompt = prompt.replace(/\{\{featurePrompt\}\}/g, featurePrompt);
prompt = prompt.replace(/\{\{previousContext\}\}/g, context);
return this.executeFeature(projectPath, featureId, useWorktrees, false, undefined, {
continuationPrompt: prompt,
@@ -3282,68 +3252,42 @@ Review the previous work and continue the implementation. If the feature appears
allTasks: ParsedTask[],
taskIndex: number,
planContent: string,
taskPromptTemplate: string,
userFeedback?: string
): string {
const completedTasks = allTasks.slice(0, taskIndex);
const remainingTasks = allTasks.slice(taskIndex + 1);
let prompt = `# Task Execution: ${task.id}
// Build completed tasks string
const completedTasksStr =
completedTasks.length > 0
? `### Already Completed (${completedTasks.length} tasks)\n${completedTasks.map((t) => `- [x] ${t.id}: ${t.description}`).join('\n')}\n`
: '';
You are executing a specific task as part of a larger feature implementation.
// Build remaining tasks string
const remainingTasksStr =
remainingTasks.length > 0
? `### Coming Up Next (${remainingTasks.length} tasks remaining)\n${remainingTasks
.slice(0, 3)
.map((t) => `- [ ] ${t.id}: ${t.description}`)
.join(
'\n'
)}${remainingTasks.length > 3 ? `\n... and ${remainingTasks.length - 3} more tasks` : ''}\n`
: '';
## Your Current Task
// Build user feedback string
const userFeedbackStr = userFeedback ? `### User Feedback\n${userFeedback}\n` : '';
**Task ID:** ${task.id}
**Description:** ${task.description}
${task.filePath ? `**Primary File:** ${task.filePath}` : ''}
${task.phase ? `**Phase:** ${task.phase}` : ''}
## Context
`;
// Show what's already done
if (completedTasks.length > 0) {
prompt += `### Already Completed (${completedTasks.length} tasks)
${completedTasks.map((t) => `- [x] ${t.id}: ${t.description}`).join('\n')}
`;
}
// Show remaining tasks
if (remainingTasks.length > 0) {
prompt += `### Coming Up Next (${remainingTasks.length} tasks remaining)
${remainingTasks
.slice(0, 3)
.map((t) => `- [ ] ${t.id}: ${t.description}`)
.join('\n')}
${remainingTasks.length > 3 ? `... and ${remainingTasks.length - 3} more tasks` : ''}
`;
}
// Add user feedback if any
if (userFeedback) {
prompt += `### User Feedback
${userFeedback}
`;
}
// Add relevant excerpt from plan (just the task-related part to save context)
prompt += `### Reference: Full Plan
<details>
${planContent}
</details>
## Instructions
1. Focus ONLY on completing task ${task.id}: "${task.description}"
2. Do not work on other tasks
3. Use the existing codebase patterns
4. When done, summarize what you implemented
Begin implementing task ${task.id} now.`;
// Use centralized template with variable substitution
let prompt = taskPromptTemplate;
prompt = prompt.replace(/\{\{taskId\}\}/g, task.id);
prompt = prompt.replace(/\{\{taskDescription\}\}/g, task.description);
prompt = prompt.replace(/\{\{taskFilePath\}\}/g, task.filePath || '');
prompt = prompt.replace(/\{\{taskPhase\}\}/g, task.phase || '');
prompt = prompt.replace(/\{\{completedTasks\}\}/g, completedTasksStr);
prompt = prompt.replace(/\{\{remainingTasks\}\}/g, remainingTasksStr);
prompt = prompt.replace(/\{\{userFeedback\}\}/g, userFeedbackStr);
prompt = prompt.replace(/\{\{planContent\}\}/g, planContent);
return prompt;
}
@@ -3553,32 +3497,13 @@ Begin implementing task ${task.id} now.`;
// Limit output to avoid token limits
const truncatedOutput = agentOutput.length > 10000 ? agentOutput.slice(-10000) : agentOutput;
const userPrompt = `You are an Architecture Decision Record (ADR) extractor. Analyze this implementation and return ONLY JSON with learnings. No explanations.
// Get customized prompts from settings
const prompts = await getPromptCustomization(this.settingsService, '[AutoMode]');
Feature: "${feature.title}"
Implementation log:
${truncatedOutput}
Extract MEANINGFUL learnings - not obvious things. For each, capture:
- DECISIONS: Why this approach vs alternatives? What would break if changed?
- GOTCHAS: What was unexpected? What's the root cause? How to avoid?
- PATTERNS: Why this pattern? What problem does it solve? Trade-offs?
JSON format ONLY (no markdown, no text):
{"learnings": [{
"category": "architecture|api|ui|database|auth|testing|performance|security|gotchas",
"type": "decision|gotcha|pattern",
"content": "What was done/learned",
"context": "Problem being solved or situation faced",
"why": "Reasoning - why this approach",
"rejected": "Alternative considered and why rejected",
"tradeoffs": "What became easier/harder",
"breaking": "What breaks if this is changed/removed"
}]}
IMPORTANT: Only include NON-OBVIOUS learnings with real reasoning. Skip trivial patterns.
If nothing notable: {"learnings": []}`;
// Build user prompt using centralized template with variable substitution
let userPrompt = prompts.taskExecution.learningExtractionUserPromptTemplate;
userPrompt = userPrompt.replace(/\{\{featureTitle\}\}/g, feature.title || '');
userPrompt = userPrompt.replace(/\{\{implementationLog\}\}/g, truncatedOutput);
try {
// Get model from phase settings
@@ -3612,8 +3537,7 @@ If nothing notable: {"learnings": []}`;
cwd: projectPath,
maxTurns: 1,
allowedTools: [],
systemPrompt:
'You are a JSON extraction assistant. You MUST respond with ONLY valid JSON, no explanations, no markdown, no other text. Extract learnings from the provided implementation context and return them as JSON.',
systemPrompt: prompts.taskExecution.learningExtractionSystemPrompt,
});
const responseText = result.text;

View File

@@ -41,6 +41,7 @@ import type { FeatureLoader } from './feature-loader.js';
import { createChatOptions, validateWorkingDirectory } from '../lib/sdk-options.js';
import { resolveModelString } from '@automaker/model-resolver';
import { stripProviderPrefix } from '@automaker/types';
import { getPromptCustomization } from '../lib/settings-helpers.js';
const logger = createLogger('IdeationService');
@@ -195,8 +196,12 @@ export class IdeationService {
// Gather existing features and ideas to prevent duplicate suggestions
const existingWorkContext = await this.gatherExistingWorkContext(projectPath);
// Get customized prompts from settings
const prompts = await getPromptCustomization(this.settingsService, '[IdeationService]');
// Build system prompt for ideation
const systemPrompt = this.buildIdeationSystemPrompt(
prompts.ideation.ideationSystemPrompt,
contextResult.formattedPrompt,
activeSession.session.promptCategory,
existingWorkContext
@@ -645,8 +650,12 @@ export class IdeationService {
// Gather existing features and ideas to prevent duplicates
const existingWorkContext = await this.gatherExistingWorkContext(projectPath);
// Get customized prompts from settings
const prompts = await getPromptCustomization(this.settingsService, '[IdeationService]');
// Build system prompt for structured suggestions
const systemPrompt = this.buildSuggestionsSystemPrompt(
prompts.ideation.suggestionsSystemPrompt,
contextPrompt,
category,
count,
@@ -721,8 +730,14 @@ export class IdeationService {
/**
* Build system prompt for structured suggestion generation
* @param basePrompt - The base system prompt from settings
* @param contextFilesPrompt - Project context from loaded files
* @param category - The idea category to focus on
* @param count - Number of suggestions to generate
* @param existingWorkContext - Context about existing features/ideas
*/
private buildSuggestionsSystemPrompt(
basePrompt: string,
contextFilesPrompt: string | undefined,
category: IdeaCategory,
count: number = 10,
@@ -734,35 +749,18 @@ export class IdeationService {
const existingWorkSection = existingWorkContext ? `\n\n${existingWorkContext}` : '';
return `You are an AI product strategist helping brainstorm feature ideas for a software project.
// Replace placeholder {{count}} if present, otherwise append count instruction
let prompt = basePrompt;
if (prompt.includes('{{count}}')) {
prompt = prompt.replace(/\{\{count\}\}/g, String(count));
} else {
prompt += `\n\nGenerate exactly ${count} suggestions.`;
}
IMPORTANT: You do NOT have access to any tools. You CANNOT read files, search code, or run commands.
You must generate suggestions based ONLY on the project context provided below.
Do NOT say "I'll analyze" or "Let me explore" - you cannot do those things.
Based on the project context and the user's prompt, generate exactly ${count} creative and actionable feature suggestions.
YOUR RESPONSE MUST BE ONLY A JSON ARRAY - nothing else. No explanation, no preamble, no markdown code fences.
Each suggestion must have this structure:
{
"title": "Short, actionable title (max 60 chars)",
"description": "Clear description of what to build or improve (2-3 sentences)",
"rationale": "Why this is valuable - the problem it solves or opportunity it creates",
"priority": "high" | "medium" | "low"
}
return `${prompt}
Focus area: ${this.getCategoryDescription(category)}
Guidelines:
- Generate exactly ${count} suggestions
- Be specific and actionable - avoid vague ideas
- Mix different priority levels (some high, some medium, some low)
- Each suggestion should be independently implementable
- Think creatively - include both obvious improvements and innovative ideas
- Consider the project's domain and target users
- IMPORTANT: Do NOT suggest features or ideas that already exist in the "Existing Features" or "Existing Ideas" sections below
${contextSection}${existingWorkSection}`;
}
@@ -1269,30 +1267,11 @@ ${contextSection}${existingWorkSection}`;
// ============================================================================
private buildIdeationSystemPrompt(
basePrompt: string,
contextFilesPrompt: string | undefined,
category?: IdeaCategory,
existingWorkContext?: string
): string {
const basePrompt = `You are an AI product strategist and UX expert helping brainstorm ideas for improving a software project.
Your role is to:
- Analyze the codebase structure and patterns
- Identify opportunities for improvement
- Suggest actionable ideas with clear rationale
- Consider user experience, technical feasibility, and business value
- Be specific and reference actual files/components when possible
When suggesting ideas:
1. Provide a clear, concise title
2. Explain the problem or opportunity
3. Describe the proposed solution
4. Highlight the expected benefit
5. Note any dependencies or considerations
IMPORTANT: Do NOT suggest features or ideas that already exist in the project. Check the "Existing Features" and "Existing Ideas" sections below to avoid duplicates.
Focus on practical, implementable suggestions that would genuinely improve the product.`;
const categoryContext = category
? `\n\nFocus area: ${this.getCategoryDescription(category)}`
: '';