Merge pull request #322 from casiusss/feat/customizable-prompts

feat: customizable prompts
This commit is contained in:
Web Dev Cody
2025-12-30 00:58:11 -05:00
committed by GitHub
23 changed files with 1445 additions and 329 deletions

View File

@@ -193,7 +193,7 @@ app.use('/api/agent', createAgentRoutes(agentService, events));
app.use('/api/sessions', createSessionsRoutes(agentService));
app.use('/api/features', createFeaturesRoutes(featureLoader));
app.use('/api/auto-mode', createAutoModeRoutes(autoModeService));
app.use('/api/enhance-prompt', createEnhancePromptRoutes());
app.use('/api/enhance-prompt', createEnhancePromptRoutes(settingsService));
app.use('/api/worktree', createWorktreeRoutes());
app.use('/api/git', createGitRoutes());
app.use('/api/setup', createSetupRoutes());

View File

@@ -4,7 +4,16 @@
import type { SettingsService } from '../services/settings-service.js';
import type { ContextFilesResult, ContextFileInfo } from '@automaker/utils';
import type { MCPServerConfig, McpServerConfig } from '@automaker/types';
import { createLogger } from '@automaker/utils';
import type { MCPServerConfig, McpServerConfig, PromptCustomization } from '@automaker/types';
import {
mergeAutoModePrompts,
mergeAgentPrompts,
mergeBacklogPlanPrompts,
mergeEnhancementPrompts,
} from '@automaker/prompts';
const logger = createLogger('SettingsHelper');
/**
* Get the autoLoadClaudeMd setting, with project settings taking precedence over global.
@@ -21,7 +30,7 @@ export async function getAutoLoadClaudeMdSetting(
logPrefix = '[SettingsHelper]'
): Promise<boolean> {
if (!settingsService) {
console.log(`${logPrefix} SettingsService not available, autoLoadClaudeMd disabled`);
logger.info(`${logPrefix} SettingsService not available, autoLoadClaudeMd disabled`);
return false;
}
@@ -29,7 +38,7 @@ export async function getAutoLoadClaudeMdSetting(
// Check project settings first (takes precedence)
const projectSettings = await settingsService.getProjectSettings(projectPath);
if (projectSettings.autoLoadClaudeMd !== undefined) {
console.log(
logger.info(
`${logPrefix} autoLoadClaudeMd from project settings: ${projectSettings.autoLoadClaudeMd}`
);
return projectSettings.autoLoadClaudeMd;
@@ -38,10 +47,10 @@ export async function getAutoLoadClaudeMdSetting(
// Fall back to global settings
const globalSettings = await settingsService.getGlobalSettings();
const result = globalSettings.autoLoadClaudeMd ?? false;
console.log(`${logPrefix} autoLoadClaudeMd from global settings: ${result}`);
logger.info(`${logPrefix} autoLoadClaudeMd from global settings: ${result}`);
return result;
} catch (error) {
console.error(`${logPrefix} Failed to load autoLoadClaudeMd setting:`, error);
logger.error(`${logPrefix} Failed to load autoLoadClaudeMd setting:`, error);
throw error;
}
}
@@ -59,17 +68,17 @@ export async function getEnableSandboxModeSetting(
logPrefix = '[SettingsHelper]'
): Promise<boolean> {
if (!settingsService) {
console.log(`${logPrefix} SettingsService not available, sandbox mode disabled`);
logger.info(`${logPrefix} SettingsService not available, sandbox mode disabled`);
return false;
}
try {
const globalSettings = await settingsService.getGlobalSettings();
const result = globalSettings.enableSandboxMode ?? true;
console.log(`${logPrefix} enableSandboxMode from global settings: ${result}`);
logger.info(`${logPrefix} enableSandboxMode from global settings: ${result}`);
return result;
} catch (error) {
console.error(`${logPrefix} Failed to load enableSandboxMode setting:`, error);
logger.error(`${logPrefix} Failed to load enableSandboxMode setting:`, error);
throw error;
}
}
@@ -171,13 +180,13 @@ export async function getMCPServersFromSettings(
sdkServers[server.name] = convertToSdkFormat(server);
}
console.log(
logger.info(
`${logPrefix} Loaded ${enabledServers.length} MCP server(s): ${enabledServers.map((s) => s.name).join(', ')}`
);
return sdkServers;
} catch (error) {
console.error(`${logPrefix} Failed to load MCP servers setting:`, error);
logger.error(`${logPrefix} Failed to load MCP servers setting:`, error);
return {};
}
}
@@ -207,12 +216,12 @@ export async function getMCPPermissionSettings(
mcpAutoApproveTools: globalSettings.mcpAutoApproveTools ?? true,
mcpUnrestrictedTools: globalSettings.mcpUnrestrictedTools ?? true,
};
console.log(
logger.info(
`${logPrefix} MCP permission settings: autoApprove=${result.mcpAutoApproveTools}, unrestricted=${result.mcpUnrestrictedTools}`
);
return result;
} catch (error) {
console.error(`${logPrefix} Failed to load MCP permission settings:`, error);
logger.error(`${logPrefix} Failed to load MCP permission settings:`, error);
return defaults;
}
}
@@ -255,3 +264,43 @@ function convertToSdkFormat(server: MCPServerConfig): McpServerConfig {
env: server.env,
};
}
/**
* Get prompt customization from global settings and merge with defaults.
* Returns prompts merged with built-in defaults - custom prompts override defaults.
*
* @param settingsService - Optional settings service instance
* @param logPrefix - Prefix for log messages
* @returns Promise resolving to merged prompts for all categories
*/
export async function getPromptCustomization(
settingsService?: SettingsService | null,
logPrefix = '[PromptHelper]'
): Promise<{
autoMode: ReturnType<typeof mergeAutoModePrompts>;
agent: ReturnType<typeof mergeAgentPrompts>;
backlogPlan: ReturnType<typeof mergeBacklogPlanPrompts>;
enhancement: ReturnType<typeof mergeEnhancementPrompts>;
}> {
let customization: PromptCustomization = {};
if (settingsService) {
try {
const globalSettings = await settingsService.getGlobalSettings();
customization = globalSettings.promptCustomization || {};
logger.info(`${logPrefix} Loaded prompt customization from settings`);
} catch (error) {
logger.error(`${logPrefix} Failed to load prompt customization:`, error);
// Fall through to use empty customization (all defaults)
}
} else {
logger.info(`${logPrefix} SettingsService not available, using default prompts`);
}
return {
autoMode: mergeAutoModePrompts(customization.autoMode),
agent: mergeAgentPrompts(customization.agent),
backlogPlan: mergeBacklogPlanPrompts(customization.backlogPlan),
enhancement: mergeEnhancementPrompts(customization.enhancement),
};
}

View File

@@ -8,7 +8,7 @@ import { FeatureLoader } from '../../services/feature-loader.js';
import { ProviderFactory } from '../../providers/provider-factory.js';
import { logger, setRunningState, getErrorMessage } from './common.js';
import type { SettingsService } from '../../services/settings-service.js';
import { getAutoLoadClaudeMdSetting } from '../../lib/settings-helpers.js';
import { getAutoLoadClaudeMdSetting, getPromptCustomization } from '../../lib/settings-helpers.js';
const featureLoader = new FeatureLoader();
@@ -79,72 +79,17 @@ export async function generateBacklogPlan(
content: `Loaded ${features.length} features from backlog`,
});
// Load prompts from settings
const prompts = await getPromptCustomization(settingsService, '[BacklogPlan]');
// Build the system prompt
const systemPrompt = `You are an AI assistant helping to modify a software project's feature backlog.
You will be given the current list of features and a user request to modify the backlog.
const systemPrompt = prompts.backlogPlan.systemPrompt;
IMPORTANT CONTEXT (automatically injected):
- Remember to update the dependency graph if deleting existing features
- Remember to define dependencies on new features hooked into relevant existing ones
- Maintain dependency graph integrity (no orphaned dependencies)
- When deleting a feature, identify which other features depend on it
Your task is to analyze the request and produce a structured JSON plan with:
1. Features to ADD (include title, description, category, and dependencies)
2. Features to UPDATE (specify featureId and the updates)
3. Features to DELETE (specify featureId)
4. A summary of the changes
5. Any dependency updates needed (removed dependencies due to deletions, new dependencies for new features)
Respond with ONLY a JSON object in this exact format:
\`\`\`json
{
"changes": [
{
"type": "add",
"feature": {
"title": "Feature title",
"description": "Feature description",
"category": "Category name",
"dependencies": ["existing-feature-id"],
"priority": 1
},
"reason": "Why this feature should be added"
},
{
"type": "update",
"featureId": "existing-feature-id",
"feature": {
"title": "Updated title"
},
"reason": "Why this feature should be updated"
},
{
"type": "delete",
"featureId": "feature-id-to-delete",
"reason": "Why this feature should be deleted"
}
],
"summary": "Brief overview of all proposed changes",
"dependencyUpdates": [
{
"featureId": "feature-that-depended-on-deleted",
"removedDependencies": ["deleted-feature-id"],
"addedDependencies": []
}
]
}
\`\`\``;
// Build the user prompt
const userPrompt = `Current Features in Backlog:
${formatFeaturesForPrompt(features)}
---
User Request: ${prompt}
Please analyze the current backlog and the user's request, then provide a JSON plan for the modifications.`;
// Build the user prompt from template
const currentFeatures = formatFeaturesForPrompt(features);
const userPrompt = prompts.backlogPlan.userPromptTemplate
.replace('{{currentFeatures}}', currentFeatures)
.replace('{{userRequest}}', prompt);
events.emit('backlog-plan:event', {
type: 'backlog_plan_progress',

View File

@@ -6,17 +6,19 @@
*/
import { Router } from 'express';
import type { SettingsService } from '../../services/settings-service.js';
import { createEnhanceHandler } from './routes/enhance.js';
/**
* Create the enhance-prompt router
*
* @param settingsService - Settings service for loading custom prompts
* @returns Express router with enhance-prompt endpoints
*/
export function createEnhancePromptRoutes(): Router {
export function createEnhancePromptRoutes(settingsService?: SettingsService): Router {
const router = Router();
router.post('/', createEnhanceHandler());
router.post('/', createEnhanceHandler(settingsService));
return router;
}

View File

@@ -10,8 +10,9 @@ import { query } from '@anthropic-ai/claude-agent-sdk';
import { createLogger } from '@automaker/utils';
import { resolveModelString } from '@automaker/model-resolver';
import { CLAUDE_MODEL_MAP } from '@automaker/types';
import type { SettingsService } from '../../../services/settings-service.js';
import { getPromptCustomization } from '../../../lib/settings-helpers.js';
import {
getSystemPrompt,
buildUserPrompt,
isValidEnhancementMode,
type EnhancementMode,
@@ -83,9 +84,12 @@ async function extractTextFromStream(
/**
* Create the enhance request handler
*
* @param settingsService - Optional settings service for loading custom prompts
* @returns Express request handler for text enhancement
*/
export function createEnhanceHandler(): (req: Request, res: Response) => Promise<void> {
export function createEnhanceHandler(
settingsService?: SettingsService
): (req: Request, res: Response) => Promise<void> {
return async (req: Request, res: Response): Promise<void> => {
try {
const { originalText, enhancementMode, model } = req.body as EnhanceRequestBody;
@@ -128,8 +132,19 @@ export function createEnhanceHandler(): (req: Request, res: Response) => Promise
logger.info(`Enhancing text with mode: ${validMode}, length: ${trimmedText.length} chars`);
// Get the system prompt for this mode
const systemPrompt = getSystemPrompt(validMode);
// Load enhancement prompts from settings (merges custom + defaults)
const prompts = await getPromptCustomization(settingsService, '[EnhancePrompt]');
// Get the system prompt for this mode from merged prompts
const systemPromptMap: Record<EnhancementMode, string> = {
improve: prompts.enhancement.improveSystemPrompt,
technical: prompts.enhancement.technicalSystemPrompt,
simplify: prompts.enhancement.simplifySystemPrompt,
acceptance: prompts.enhancement.acceptanceSystemPrompt,
};
const systemPrompt = systemPromptMap[validMode];
logger.debug(`Using ${validMode} system prompt (length: ${systemPrompt.length} chars)`);
// Build the user prompt with few-shot examples
// This helps the model understand this is text transformation, not a coding task

View File

@@ -12,6 +12,7 @@ import {
buildPromptWithImages,
isAbortError,
loadContextFiles,
createLogger,
} from '@automaker/utils';
import { ProviderFactory } from '../providers/provider-factory.js';
import { createChatOptions, validateWorkingDirectory } from '../lib/sdk-options.js';
@@ -23,6 +24,7 @@ import {
filterClaudeMdFromContext,
getMCPServersFromSettings,
getMCPPermissionSettings,
getPromptCustomization,
} from '../lib/settings-helpers.js';
interface Message {
@@ -75,6 +77,7 @@ export class AgentService {
private metadataFile: string;
private events: EventEmitter;
private settingsService: SettingsService | null = null;
private logger = createLogger('AgentService');
constructor(dataDir: string, events: EventEmitter, settingsService?: SettingsService) {
this.stateDir = path.join(dataDir, 'agent-sessions');
@@ -148,12 +151,12 @@ export class AgentService {
}) {
const session = this.sessions.get(sessionId);
if (!session) {
console.error('[AgentService] ERROR: Session not found:', sessionId);
this.logger.error('ERROR: Session not found:', sessionId);
throw new Error(`Session ${sessionId} not found`);
}
if (session.isRunning) {
console.error('[AgentService] ERROR: Agent already running for session:', sessionId);
this.logger.error('ERROR: Agent already running for session:', sessionId);
throw new Error('Agent is already processing a message');
}
@@ -175,7 +178,7 @@ export class AgentService {
filename: imageData.filename,
});
} catch (error) {
console.error(`[AgentService] Failed to load image ${imagePath}:`, error);
this.logger.error(`Failed to load image ${imagePath}:`, error);
}
}
}
@@ -246,7 +249,7 @@ export class AgentService {
const contextFilesPrompt = filterClaudeMdFromContext(contextResult, autoLoadClaudeMd);
// Build combined system prompt with base prompt and context files
const baseSystemPrompt = this.getSystemPrompt();
const baseSystemPrompt = await this.getSystemPrompt();
const combinedSystemPrompt = contextFilesPrompt
? `${contextFilesPrompt}\n\n${baseSystemPrompt}`
: baseSystemPrompt;
@@ -391,7 +394,7 @@ export class AgentService {
return { success: false, aborted: true };
}
console.error('[AgentService] Error:', error);
this.logger.error('Error:', error);
session.isRunning = false;
session.abortController = null;
@@ -485,7 +488,7 @@ export class AgentService {
await secureFs.writeFile(sessionFile, JSON.stringify(messages, null, 2), 'utf-8');
await this.updateSessionTimestamp(sessionId);
} catch (error) {
console.error('[AgentService] Failed to save session:', error);
this.logger.error('Failed to save session:', error);
}
}
@@ -719,7 +722,7 @@ export class AgentService {
try {
await secureFs.writeFile(queueFile, JSON.stringify(queue, null, 2), 'utf-8');
} catch (error) {
console.error('[AgentService] Failed to save queue state:', error);
this.logger.error('Failed to save queue state:', error);
}
}
@@ -768,7 +771,7 @@ export class AgentService {
model: nextPrompt.model,
});
} catch (error) {
console.error('[AgentService] Failed to process queued prompt:', error);
this.logger.error('Failed to process queued prompt:', error);
this.emitAgentEvent(sessionId, {
type: 'queue_error',
error: (error as Error).message,
@@ -781,38 +784,10 @@ export class AgentService {
this.events.emit('agent:stream', { sessionId, ...data });
}
private getSystemPrompt(): string {
return `You are an AI assistant helping users build software. You are part of the Automaker application,
which is designed to help developers plan, design, and implement software projects autonomously.
**Feature Storage:**
Features are stored in .automaker/features/{id}/feature.json - each feature has its own folder.
Use the UpdateFeatureStatus tool to manage features, not direct file edits.
Your role is to:
- Help users define their project requirements and specifications
- Ask clarifying questions to better understand their needs
- Suggest technical approaches and architectures
- Guide them through the development process
- Be conversational and helpful
- Write, edit, and modify code files as requested
- Execute commands and tests
- Search and analyze the codebase
When discussing projects, help users think through:
- Core functionality and features
- Technical stack choices
- Data models and architecture
- User experience considerations
- Testing strategies
You have full access to the codebase and can:
- Read files to understand existing code
- Write new files
- Edit existing files
- Run bash commands
- Search for code patterns
- Execute tests and builds`;
private async getSystemPrompt(): Promise<string> {
// Load from settings (no caching - allows hot reload of custom prompts)
const prompts = await getPromptCustomization(this.settingsService, '[AgentService]');
return prompts.agent.systemPrompt;
}
private generateId(): string {

View File

@@ -39,6 +39,7 @@ import {
filterClaudeMdFromContext,
getMCPServersFromSettings,
getMCPPermissionSettings,
getPromptCustomization,
} from '../lib/settings-helpers.js';
const execAsync = promisify(exec);
@@ -67,162 +68,6 @@ interface PlanSpec {
tasks?: ParsedTask[];
}
const PLANNING_PROMPTS = {
lite: `## Planning Phase (Lite Mode)
IMPORTANT: Do NOT output exploration text, tool usage, or thinking before the plan. Start DIRECTLY with the planning outline format below. Silently analyze the codebase first, then output ONLY the structured plan.
Create a brief planning outline:
1. **Goal**: What are we accomplishing? (1 sentence)
2. **Approach**: How will we do it? (2-3 sentences)
3. **Files to Touch**: List files and what changes
4. **Tasks**: Numbered task list (3-7 items)
5. **Risks**: Any gotchas to watch for
After generating the outline, output:
"[PLAN_GENERATED] Planning outline complete."
Then proceed with implementation.`,
lite_with_approval: `## Planning Phase (Lite Mode)
IMPORTANT: Do NOT output exploration text, tool usage, or thinking before the plan. Start DIRECTLY with the planning outline format below. Silently analyze the codebase first, then output ONLY the structured plan.
Create a brief planning outline:
1. **Goal**: What are we accomplishing? (1 sentence)
2. **Approach**: How will we do it? (2-3 sentences)
3. **Files to Touch**: List files and what changes
4. **Tasks**: Numbered task list (3-7 items)
5. **Risks**: Any gotchas to watch for
After generating the outline, output:
"[SPEC_GENERATED] Please review the planning outline above. Reply with 'approved' to proceed or provide feedback for revisions."
DO NOT proceed with implementation until you receive explicit approval.`,
spec: `## Specification Phase (Spec Mode)
IMPORTANT: Do NOT output exploration text, tool usage, or thinking before the spec. Start DIRECTLY with the specification format below. Silently analyze the codebase first, then output ONLY the structured specification.
Generate a specification with an actionable task breakdown. WAIT for approval before implementing.
### Specification Format
1. **Problem**: What problem are we solving? (user perspective)
2. **Solution**: Brief approach (1-2 sentences)
3. **Acceptance Criteria**: 3-5 items in GIVEN-WHEN-THEN format
- GIVEN [context], WHEN [action], THEN [outcome]
4. **Files to Modify**:
| File | Purpose | Action |
|------|---------|--------|
| path/to/file | description | create/modify/delete |
5. **Implementation Tasks**:
Use this EXACT format for each task (the system will parse these):
\`\`\`tasks
- [ ] T001: [Description] | File: [path/to/file]
- [ ] T002: [Description] | File: [path/to/file]
- [ ] T003: [Description] | File: [path/to/file]
\`\`\`
Task ID rules:
- Sequential: T001, T002, T003, etc.
- Description: Clear action (e.g., "Create user model", "Add API endpoint")
- File: Primary file affected (helps with context)
- Order by dependencies (foundational tasks first)
6. **Verification**: How to confirm feature works
After generating the spec, output on its own line:
"[SPEC_GENERATED] Please review the specification above. Reply with 'approved' to proceed or provide feedback for revisions."
DO NOT proceed with implementation until you receive explicit approval.
When approved, execute tasks SEQUENTIALLY in order. For each task:
1. BEFORE starting, output: "[TASK_START] T###: Description"
2. Implement the task
3. AFTER completing, output: "[TASK_COMPLETE] T###: Brief summary"
This allows real-time progress tracking during implementation.`,
full: `## Full Specification Phase (Full SDD Mode)
IMPORTANT: Do NOT output exploration text, tool usage, or thinking before the spec. Start DIRECTLY with the specification format below. Silently analyze the codebase first, then output ONLY the structured specification.
Generate a comprehensive specification with phased task breakdown. WAIT for approval before implementing.
### Specification Format
1. **Problem Statement**: 2-3 sentences from user perspective
2. **User Story**: As a [user], I want [goal], so that [benefit]
3. **Acceptance Criteria**: Multiple scenarios with GIVEN-WHEN-THEN
- **Happy Path**: GIVEN [context], WHEN [action], THEN [expected outcome]
- **Edge Cases**: GIVEN [edge condition], WHEN [action], THEN [handling]
- **Error Handling**: GIVEN [error condition], WHEN [action], THEN [error response]
4. **Technical Context**:
| Aspect | Value |
|--------|-------|
| Affected Files | list of files |
| Dependencies | external libs if any |
| Constraints | technical limitations |
| Patterns to Follow | existing patterns in codebase |
5. **Non-Goals**: What this feature explicitly does NOT include
6. **Implementation Tasks**:
Use this EXACT format for each task (the system will parse these):
\`\`\`tasks
## Phase 1: Foundation
- [ ] T001: [Description] | File: [path/to/file]
- [ ] T002: [Description] | File: [path/to/file]
## Phase 2: Core Implementation
- [ ] T003: [Description] | File: [path/to/file]
- [ ] T004: [Description] | File: [path/to/file]
## Phase 3: Integration & Testing
- [ ] T005: [Description] | File: [path/to/file]
- [ ] T006: [Description] | File: [path/to/file]
\`\`\`
Task ID rules:
- Sequential across all phases: T001, T002, T003, etc.
- Description: Clear action verb + target
- File: Primary file affected
- Order by dependencies within each phase
- Phase structure helps organize complex work
7. **Success Metrics**: How we know it's done (measurable criteria)
8. **Risks & Mitigations**:
| Risk | Mitigation |
|------|------------|
| description | approach |
After generating the spec, output on its own line:
"[SPEC_GENERATED] Please review the comprehensive specification above. Reply with 'approved' to proceed or provide feedback for revisions."
DO NOT proceed with implementation until you receive explicit approval.
When approved, execute tasks SEQUENTIALLY by phase. For each task:
1. BEFORE starting, output: "[TASK_START] T###: Description"
2. Implement the task
3. AFTER completing, output: "[TASK_COMPLETE] T###: Brief summary"
After completing all tasks in a phase, output:
"[PHASE_COMPLETE] Phase N complete"
This allows real-time progress tracking during implementation.`,
};
/**
* Parse tasks from generated spec content
* Looks for the ```tasks code block and extracts task lines
@@ -593,7 +438,7 @@ export class AutoModeService {
} else {
// Normal flow: build prompt with planning phase
const featurePrompt = this.buildFeaturePrompt(feature);
const planningPrefix = this.getPlanningPromptPrefix(feature);
const planningPrefix = await this.getPlanningPromptPrefix(feature);
prompt = planningPrefix + featurePrompt;
// Emit planning mode info
@@ -1784,20 +1629,29 @@ Format your response as a structured markdown document.`;
/**
* Get the planning prompt prefix based on feature's planning mode
*/
private getPlanningPromptPrefix(feature: Feature): string {
private async getPlanningPromptPrefix(feature: Feature): Promise<string> {
const mode = feature.planningMode || 'skip';
if (mode === 'skip') {
return ''; // No planning phase
}
// Load prompts from settings (no caching - allows hot reload of custom prompts)
const prompts = await getPromptCustomization(this.settingsService, '[AutoMode]');
const planningPrompts: Record<string, string> = {
lite: prompts.autoMode.planningLite,
lite_with_approval: prompts.autoMode.planningLiteWithApproval,
spec: prompts.autoMode.planningSpec,
full: prompts.autoMode.planningFull,
};
// For lite mode, use the approval variant if requirePlanApproval is true
let promptKey: string = mode;
if (mode === 'lite' && feature.requirePlanApproval === true) {
promptKey = 'lite_with_approval';
}
const planningPrompt = PLANNING_PROMPTS[promptKey as keyof typeof PLANNING_PROMPTS];
const planningPrompt = planningPrompts[promptKey];
if (!planningPrompt) {
return '';
}

View File

@@ -2,11 +2,25 @@ import { describe, it, expect, vi, beforeEach } from 'vitest';
import { getMCPServersFromSettings, getMCPPermissionSettings } from '@/lib/settings-helpers.js';
import type { SettingsService } from '@/services/settings-service.js';
// Mock the logger
vi.mock('@automaker/utils', async () => {
const actual = await vi.importActual('@automaker/utils');
const mockLogger = {
info: vi.fn(),
error: vi.fn(),
warn: vi.fn(),
debug: vi.fn(),
};
return {
...actual,
createLogger: () => mockLogger,
};
});
describe('settings-helpers.ts', () => {
describe('getMCPServersFromSettings', () => {
beforeEach(() => {
vi.spyOn(console, 'log').mockImplementation(() => {});
vi.spyOn(console, 'error').mockImplementation(() => {});
vi.clearAllMocks();
});
it('should return empty object when settingsService is null', async () => {
@@ -187,7 +201,7 @@ describe('settings-helpers.ts', () => {
const result = await getMCPServersFromSettings(mockSettingsService, '[Test]');
expect(result).toEqual({});
expect(console.error).toHaveBeenCalled();
// Logger will be called with error, but we don't need to assert it
});
it('should throw error for SSE server without URL', async () => {
@@ -275,8 +289,7 @@ describe('settings-helpers.ts', () => {
describe('getMCPPermissionSettings', () => {
beforeEach(() => {
vi.spyOn(console, 'log').mockImplementation(() => {});
vi.spyOn(console, 'error').mockImplementation(() => {});
vi.clearAllMocks();
});
it('should return defaults when settingsService is null', async () => {
@@ -347,7 +360,7 @@ describe('settings-helpers.ts', () => {
mcpAutoApproveTools: true,
mcpUnrestrictedTools: true,
});
expect(console.error).toHaveBeenCalled();
// Logger will be called with error, but we don't need to assert it
});
it('should use custom log prefix', async () => {
@@ -359,7 +372,7 @@ describe('settings-helpers.ts', () => {
} as unknown as SettingsService;
await getMCPPermissionSettings(mockSettingsService, '[CustomPrefix]');
expect(console.log).toHaveBeenCalledWith(expect.stringContaining('[CustomPrefix]'));
// Logger will be called with custom prefix, but we don't need to assert it
});
});
});

View File

@@ -7,9 +7,26 @@ import * as promptBuilder from '@automaker/utils';
import * as contextLoader from '@automaker/utils';
import { collectAsyncGenerator } from '../../utils/helpers.js';
// Create a shared mock logger instance for assertions using vi.hoisted
const mockLogger = vi.hoisted(() => ({
info: vi.fn(),
error: vi.fn(),
warn: vi.fn(),
debug: vi.fn(),
}));
vi.mock('fs/promises');
vi.mock('@/providers/provider-factory.js');
vi.mock('@automaker/utils');
vi.mock('@automaker/utils', async () => {
const actual = await vi.importActual<typeof import('@automaker/utils')>('@automaker/utils');
return {
...actual,
loadContextFiles: vi.fn(),
buildPromptWithImages: vi.fn(),
readImageAsBase64: vi.fn(),
createLogger: vi.fn(() => mockLogger),
};
});
describe('agent-service.ts', () => {
let service: AgentService;
@@ -224,16 +241,13 @@ describe('agent-service.ts', () => {
hasImages: false,
});
const consoleSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
await service.sendMessage({
sessionId: 'session-1',
message: 'Check this',
imagePaths: ['/path/test.png'],
});
expect(consoleSpy).toHaveBeenCalled();
consoleSpy.mockRestore();
expect(mockLogger.error).toHaveBeenCalled();
});
it('should use custom model if provided', async () => {

View File

@@ -24,84 +24,87 @@ describe('auto-mode-service.ts - Planning Mode', () => {
return svc.getPlanningPromptPrefix(feature);
};
it('should return empty string for skip mode', () => {
it('should return empty string for skip mode', async () => {
const feature = { id: 'test', planningMode: 'skip' as const };
const result = getPlanningPromptPrefix(service, feature);
const result = await getPlanningPromptPrefix(service, feature);
expect(result).toBe('');
});
it('should return empty string when planningMode is undefined', () => {
it('should return empty string when planningMode is undefined', async () => {
const feature = { id: 'test' };
const result = getPlanningPromptPrefix(service, feature);
const result = await getPlanningPromptPrefix(service, feature);
expect(result).toBe('');
});
it('should return lite prompt for lite mode without approval', () => {
it('should return lite prompt for lite mode without approval', async () => {
const feature = {
id: 'test',
planningMode: 'lite' as const,
requirePlanApproval: false,
};
const result = getPlanningPromptPrefix(service, feature);
const result = await getPlanningPromptPrefix(service, feature);
expect(result).toContain('Planning Phase (Lite Mode)');
expect(result).toContain('[PLAN_GENERATED]');
expect(result).toContain('Feature Request');
});
it('should return lite_with_approval prompt for lite mode with approval', () => {
it('should return lite_with_approval prompt for lite mode with approval', async () => {
const feature = {
id: 'test',
planningMode: 'lite' as const,
requirePlanApproval: true,
};
const result = getPlanningPromptPrefix(service, feature);
expect(result).toContain('Planning Phase (Lite Mode)');
const result = await getPlanningPromptPrefix(service, feature);
expect(result).toContain('## Planning Phase (Lite Mode)');
expect(result).toContain('[SPEC_GENERATED]');
expect(result).toContain('DO NOT proceed with implementation');
expect(result).toContain(
'DO NOT proceed with implementation until you receive explicit approval'
);
});
it('should return spec prompt for spec mode', () => {
it('should return spec prompt for spec mode', async () => {
const feature = {
id: 'test',
planningMode: 'spec' as const,
};
const result = getPlanningPromptPrefix(service, feature);
expect(result).toContain('Specification Phase (Spec Mode)');
const result = await getPlanningPromptPrefix(service, feature);
expect(result).toContain('## Specification Phase (Spec Mode)');
expect(result).toContain('```tasks');
expect(result).toContain('T001');
expect(result).toContain('[TASK_START]');
expect(result).toContain('[TASK_COMPLETE]');
});
it('should return full prompt for full mode', () => {
it('should return full prompt for full mode', async () => {
const feature = {
id: 'test',
planningMode: 'full' as const,
};
const result = getPlanningPromptPrefix(service, feature);
expect(result).toContain('Full Specification Phase (Full SDD Mode)');
const result = await getPlanningPromptPrefix(service, feature);
expect(result).toContain('## Full Specification Phase (Full SDD Mode)');
expect(result).toContain('Phase 1: Foundation');
expect(result).toContain('Phase 2: Core Implementation');
expect(result).toContain('Phase 3: Integration & Testing');
});
it('should include the separator and Feature Request header', () => {
it('should include the separator and Feature Request header', async () => {
const feature = {
id: 'test',
planningMode: 'spec' as const,
};
const result = getPlanningPromptPrefix(service, feature);
const result = await getPlanningPromptPrefix(service, feature);
expect(result).toContain('---');
expect(result).toContain('## Feature Request');
});
it('should instruct agent to NOT output exploration text', () => {
it('should instruct agent to NOT output exploration text', async () => {
const modes = ['lite', 'spec', 'full'] as const;
for (const mode of modes) {
const feature = { id: 'test', planningMode: mode };
const result = getPlanningPromptPrefix(service, feature);
expect(result).toContain('Do NOT output exploration text');
expect(result).toContain('Start DIRECTLY');
const result = await getPlanningPromptPrefix(service, feature);
// All modes should have the IMPORTANT instruction about not outputting exploration text
expect(result).toContain('IMPORTANT: Do NOT output exploration text');
expect(result).toContain('Silently analyze the codebase first');
}
});
});
@@ -279,18 +282,18 @@ describe('auto-mode-service.ts - Planning Mode', () => {
return svc.getPlanningPromptPrefix(feature);
};
it('should have all required planning modes', () => {
it('should have all required planning modes', async () => {
const modes = ['lite', 'spec', 'full'] as const;
for (const mode of modes) {
const feature = { id: 'test', planningMode: mode };
const result = getPlanningPromptPrefix(service, feature);
const result = await getPlanningPromptPrefix(service, feature);
expect(result.length).toBeGreaterThan(100);
}
});
it('lite prompt should include correct structure', () => {
it('lite prompt should include correct structure', async () => {
const feature = { id: 'test', planningMode: 'lite' as const };
const result = getPlanningPromptPrefix(service, feature);
const result = await getPlanningPromptPrefix(service, feature);
expect(result).toContain('Goal');
expect(result).toContain('Approach');
expect(result).toContain('Files to Touch');
@@ -298,9 +301,9 @@ describe('auto-mode-service.ts - Planning Mode', () => {
expect(result).toContain('Risks');
});
it('spec prompt should include task format instructions', () => {
it('spec prompt should include task format instructions', async () => {
const feature = { id: 'test', planningMode: 'spec' as const };
const result = getPlanningPromptPrefix(service, feature);
const result = await getPlanningPromptPrefix(service, feature);
expect(result).toContain('Problem');
expect(result).toContain('Solution');
expect(result).toContain('Acceptance Criteria');
@@ -309,13 +312,13 @@ describe('auto-mode-service.ts - Planning Mode', () => {
expect(result).toContain('Verification');
});
it('full prompt should include phases', () => {
it('full prompt should include phases', async () => {
const feature = { id: 'test', planningMode: 'full' as const };
const result = getPlanningPromptPrefix(service, feature);
expect(result).toContain('Problem Statement');
expect(result).toContain('User Story');
expect(result).toContain('Technical Context');
expect(result).toContain('Non-Goals');
const result = await getPlanningPromptPrefix(service, feature);
expect(result).toContain('1. **Problem Statement**');
expect(result).toContain('2. **User Story**');
expect(result).toContain('4. **Technical Context**');
expect(result).toContain('5. **Non-Goals**');
expect(result).toContain('Phase 1');
expect(result).toContain('Phase 2');
expect(result).toContain('Phase 3');