feat: Centralize AI prompts into JSON templates (#882)

* centralize prompt management

* add changeset

* add variant key to determine prompt version

* update tests and add prompt manager test

* determine internal path, don't use projectRoot

* add promptManager mock

* detailed prompt docs

* add schemas and validator packages

* add validate prompts command

* add schema validation

* update tests

* move schemas to src/prompts/schemas

* use this.promptsDir for better semantics

* add prompt schemas

* version schema files & update links

* remove validate command

* expect dependencies

* update docs

* fix test

* remove suggestmode to ensure clean keys

* remove default variant from research and update schema

* now handled by prompt manager

* add manual test to verify prompts

* remove incorrect batch variant

* consolidate variants

* consolidate analyze-complexity to just default variant

* consolidate parse-prd variants

* add eq handler for handlebars

* consolidate research prompt variants

* use brevity

* consolidate variants for update subtask

* add not handler

* consolidate variants for update-task

* consolidate update-tasks variants

* add conditional content to prompt when research used

* update prompt tests

* show correct research variant

* make variant names link to below

* remove changset

* restore gitignore

* Merge branch 'next' of https://github.com/eyaltoledano/claude-task-master into joedanz/centralize-prompts

# Conflicts:
#	package-lock.json
#	scripts/modules/task-manager/expand-task.js
#	scripts/modules/task-manager/parse-prd.js

remove unused

* add else

* update tests

* update biome optional dependencies

* responsive html output for mobile
This commit is contained in:
Joe Danziger
2025-07-10 03:52:11 -04:00
committed by GitHub
parent 4bc8029080
commit a65ad0a47c
36 changed files with 6180 additions and 9034 deletions

View File

@@ -123,6 +123,18 @@ jest.unstable_mockModule(
})
);
jest.unstable_mockModule(
'../../../../../scripts/modules/prompt-manager.js',
() => ({
getPromptManager: jest.fn().mockReturnValue({
loadPrompt: jest.fn().mockResolvedValue({
systemPrompt: 'Mocked system prompt',
userPrompt: 'Mocked user prompt'
})
})
})
);
// Mock external UI libraries
jest.unstable_mockModule('chalk', () => ({
default: {

View File

@@ -171,6 +171,18 @@ jest.unstable_mockModule('fs', () => ({
writeFileSync: mockWriteFileSync
}));
jest.unstable_mockModule(
'../../../../../scripts/modules/prompt-manager.js',
() => ({
getPromptManager: jest.fn().mockReturnValue({
loadPrompt: jest.fn().mockResolvedValue({
systemPrompt: 'Mocked system prompt',
userPrompt: 'Mocked user prompt'
})
})
})
);
// Import the mocked modules
const { readJSON, writeJSON, log, CONFIG } = await import(
'../../../../../scripts/modules/utils.js'
@@ -262,11 +274,13 @@ describe('analyzeTaskComplexity', () => {
file: 'tasks/tasks.json',
output: 'scripts/task-complexity-report.json',
threshold: '5',
research: false
research: false,
projectRoot: '/mock/project/root'
};
// Act
await analyzeTaskComplexity(options, {
projectRoot: '/mock/project/root',
mcpLog: {
info: jest.fn(),
warn: jest.fn(),
@@ -279,7 +293,7 @@ describe('analyzeTaskComplexity', () => {
// Assert
expect(readJSON).toHaveBeenCalledWith(
'tasks/tasks.json',
undefined,
'/mock/project/root',
undefined
);
expect(generateTextService).toHaveBeenCalledWith(expect.any(Object));
@@ -296,11 +310,13 @@ describe('analyzeTaskComplexity', () => {
file: 'tasks/tasks.json',
output: 'scripts/task-complexity-report.json',
threshold: '5',
research: true
research: true,
projectRoot: '/mock/project/root'
};
// Act
await analyzeTaskComplexity(researchOptions, {
projectRoot: '/mock/project/root',
mcpLog: {
info: jest.fn(),
warn: jest.fn(),
@@ -323,10 +339,12 @@ describe('analyzeTaskComplexity', () => {
let options = {
file: 'tasks/tasks.json',
output: 'scripts/task-complexity-report.json',
threshold: '7'
threshold: '7',
projectRoot: '/mock/project/root'
};
await analyzeTaskComplexity(options, {
projectRoot: '/mock/project/root',
mcpLog: {
info: jest.fn(),
warn: jest.fn(),
@@ -349,10 +367,12 @@ describe('analyzeTaskComplexity', () => {
options = {
file: 'tasks/tasks.json',
output: 'scripts/task-complexity-report.json',
threshold: 8
threshold: 8,
projectRoot: '/mock/project/root'
};
await analyzeTaskComplexity(options, {
projectRoot: '/mock/project/root',
mcpLog: {
info: jest.fn(),
warn: jest.fn(),
@@ -374,11 +394,13 @@ describe('analyzeTaskComplexity', () => {
const options = {
file: 'tasks/tasks.json',
output: 'scripts/task-complexity-report.json',
threshold: '5'
threshold: '5',
projectRoot: '/mock/project/root'
};
// Act
await analyzeTaskComplexity(options, {
projectRoot: '/mock/project/root',
mcpLog: {
info: jest.fn(),
warn: jest.fn(),
@@ -402,7 +424,8 @@ describe('analyzeTaskComplexity', () => {
const options = {
file: 'tasks/tasks.json',
output: 'scripts/task-complexity-report.json',
threshold: '5'
threshold: '5',
projectRoot: '/mock/project/root'
};
// Force API error
@@ -419,6 +442,7 @@ describe('analyzeTaskComplexity', () => {
// Act & Assert
await expect(
analyzeTaskComplexity(options, {
projectRoot: '/mock/project/root',
mcpLog: mockMcpLog
})
).rejects.toThrow('API Error');

View File

@@ -131,11 +131,7 @@ jest.unstable_mockModule(
'../../../../../scripts/modules/utils/contextGatherer.js',
() => ({
ContextGatherer: jest.fn().mockImplementation(() => ({
gather: jest.fn().mockResolvedValue({
contextSummary: 'Mock context summary',
allRelatedTaskIds: [],
graphVisualization: 'Mock graph'
})
gather: jest.fn().mockResolvedValue('Mock project context from files')
}))
})
);
@@ -147,6 +143,18 @@ jest.unstable_mockModule(
})
);
jest.unstable_mockModule(
'../../../../../scripts/modules/prompt-manager.js',
() => ({
getPromptManager: jest.fn().mockReturnValue({
loadPrompt: jest.fn().mockResolvedValue({
systemPrompt: 'Mocked system prompt',
userPrompt: 'Mocked user prompt'
})
})
})
);
// Mock external UI libraries
jest.unstable_mockModule('chalk', () => ({
default: {
@@ -663,6 +671,18 @@ describe('expandTask', () => {
describe('Complexity Report Integration (Tag-Specific)', () => {
test('should use tag-specific complexity report when available', async () => {
// Arrange
const { getPromptManager } = await import(
'../../../../../scripts/modules/prompt-manager.js'
);
const mockLoadPrompt = jest.fn().mockResolvedValue({
systemPrompt: 'Generate exactly 5 subtasks for complexity report',
userPrompt:
'Please break this task into 5 parts\n\nUser provided context'
});
getPromptManager.mockReturnValue({
loadPrompt: mockLoadPrompt
});
const tasksPath = 'tasks/tasks.json';
const taskId = '1'; // Task in feature-branch
const context = {
@@ -710,6 +730,16 @@ describe('expandTask', () => {
const callArg = generateTextService.mock.calls[0][0];
expect(callArg.systemPrompt).toContain('Generate exactly 5 subtasks');
// Assert - Should use complexity-report variant with expansion prompt
expect(mockLoadPrompt).toHaveBeenCalledWith(
'expand-task',
expect.objectContaining({
subtaskCount: 5,
expansionPrompt: 'Please break this task into 5 parts'
}),
'complexity-report'
);
// Clean up stub
existsSpy.mockRestore();
});
@@ -903,6 +933,17 @@ describe('expandTask', () => {
test('should handle additional context correctly', async () => {
// Arrange
const { getPromptManager } = await import(
'../../../../../scripts/modules/prompt-manager.js'
);
const mockLoadPrompt = jest.fn().mockResolvedValue({
systemPrompt: 'Mocked system prompt',
userPrompt: 'Mocked user prompt with context'
});
getPromptManager.mockReturnValue({
loadPrompt: mockLoadPrompt
});
const tasksPath = 'tasks/tasks.json';
const taskId = '2';
const additionalContext = 'Use React hooks and TypeScript';
@@ -922,11 +963,28 @@ describe('expandTask', () => {
false
);
// Assert - Should include additional context in prompt
expect(generateTextService).toHaveBeenCalledWith(
// Assert - Should pass separate context parameters to prompt manager
expect(mockLoadPrompt).toHaveBeenCalledWith(
'expand-task',
expect.objectContaining({
prompt: expect.stringContaining('Use React hooks and TypeScript')
})
additionalContext: expect.stringContaining(
'Use React hooks and TypeScript'
),
gatheredContext: expect.stringContaining(
'Mock project context from files'
)
}),
expect.any(String)
);
// Additional assertion to verify the context parameters are passed separately
const call = mockLoadPrompt.mock.calls[0];
const parameters = call[1];
expect(parameters.additionalContext).toContain(
'Use React hooks and TypeScript'
);
expect(parameters.gatheredContext).toContain(
'Mock project context from files'
);
});
@@ -1003,6 +1061,20 @@ describe('expandTask', () => {
});
test('should use dynamic prompting when numSubtasks is 0', async () => {
// Mock getPromptManager to return realistic prompt with dynamic content
const { getPromptManager } = await import(
'../../../../../scripts/modules/prompt-manager.js'
);
const mockLoadPrompt = jest.fn().mockResolvedValue({
systemPrompt:
'You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into an appropriate number of specific subtasks that can be implemented one by one.',
userPrompt:
'Break down this task into an appropriate number of specific subtasks'
});
getPromptManager.mockReturnValue({
loadPrompt: mockLoadPrompt
});
// Act
await expandTask(tasksPath, taskId, 0, false, '', context, false);
@@ -1017,6 +1089,19 @@ describe('expandTask', () => {
});
test('should use specific count prompting when numSubtasks is positive', async () => {
// Mock getPromptManager to return realistic prompt with specific count
const { getPromptManager } = await import(
'../../../../../scripts/modules/prompt-manager.js'
);
const mockLoadPrompt = jest.fn().mockResolvedValue({
systemPrompt:
'You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into 5 specific subtasks that can be implemented one by one.',
userPrompt: 'Break down this task into exactly 5 specific subtasks'
});
getPromptManager.mockReturnValue({
loadPrompt: mockLoadPrompt
});
// Act
await expandTask(tasksPath, taskId, 5, false, '', context, false);
@@ -1032,6 +1117,19 @@ describe('expandTask', () => {
// Mock getDefaultSubtasks to return a specific value
getDefaultSubtasks.mockReturnValue(4);
// Mock getPromptManager to return realistic prompt with default count
const { getPromptManager } = await import(
'../../../../../scripts/modules/prompt-manager.js'
);
const mockLoadPrompt = jest.fn().mockResolvedValue({
systemPrompt:
'You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into 4 specific subtasks that can be implemented one by one.',
userPrompt: 'Break down this task into exactly 4 specific subtasks'
});
getPromptManager.mockReturnValue({
loadPrompt: mockLoadPrompt
});
// Act
await expandTask(tasksPath, taskId, -3, false, '', context, false);
@@ -1045,6 +1143,19 @@ describe('expandTask', () => {
// Mock getDefaultSubtasks to return a specific value
getDefaultSubtasks.mockReturnValue(6);
// Mock getPromptManager to return realistic prompt with default count
const { getPromptManager } = await import(
'../../../../../scripts/modules/prompt-manager.js'
);
const mockLoadPrompt = jest.fn().mockResolvedValue({
systemPrompt:
'You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into 6 specific subtasks that can be implemented one by one.',
userPrompt: 'Break down this task into exactly 6 specific subtasks'
});
getPromptManager.mockReturnValue({
loadPrompt: mockLoadPrompt
});
// Act - Call without specifying numSubtasks (undefined)
await expandTask(tasksPath, taskId, undefined, false, '', context, false);
@@ -1058,6 +1169,19 @@ describe('expandTask', () => {
// Mock getDefaultSubtasks to return a specific value
getDefaultSubtasks.mockReturnValue(7);
// Mock getPromptManager to return realistic prompt with default count
const { getPromptManager } = await import(
'../../../../../scripts/modules/prompt-manager.js'
);
const mockLoadPrompt = jest.fn().mockResolvedValue({
systemPrompt:
'You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into 7 specific subtasks that can be implemented one by one.',
userPrompt: 'Break down this task into exactly 7 specific subtasks'
});
getPromptManager.mockReturnValue({
loadPrompt: mockLoadPrompt
});
// Act - Call with null numSubtasks
await expandTask(tasksPath, taskId, null, false, '', context, false);

View File

@@ -48,7 +48,8 @@ jest.unstable_mockModule(
'../../../../../scripts/modules/config-manager.js',
() => ({
getDebugFlag: jest.fn(() => false),
getDefaultNumTasks: jest.fn(() => 10)
getDefaultNumTasks: jest.fn(() => 10),
getDefaultPriority: jest.fn(() => 'medium')
})
);
@@ -70,6 +71,30 @@ jest.unstable_mockModule(
})
);
jest.unstable_mockModule(
'../../../../../scripts/modules/prompt-manager.js',
() => ({
getPromptManager: jest.fn().mockReturnValue({
loadPrompt: jest.fn().mockImplementation((templateName, params) => {
// Create dynamic mock prompts based on the parameters
const { numTasks } = params || {};
let numTasksText = '';
if (numTasks > 0) {
numTasksText = `approximately ${numTasks}`;
} else {
numTasksText = 'an appropriate number of';
}
return Promise.resolve({
systemPrompt: 'Mocked system prompt for parse-prd',
userPrompt: `Generate ${numTasksText} top-level development tasks from the PRD content.`
});
})
})
})
);
// Mock fs module
jest.unstable_mockModule('fs', () => ({
default: {
@@ -348,33 +373,23 @@ describe('parsePRD', () => {
expect(fs.default.writeFileSync).not.toHaveBeenCalled();
});
test('should call process.exit when tasks in tag exist without force flag in CLI mode', async () => {
test('should throw error when tasks in tag exist without force flag in CLI mode', async () => {
// Setup mocks to simulate tasks.json already exists with tasks in the target tag
fs.default.existsSync.mockReturnValue(true);
fs.default.readFileSync.mockReturnValueOnce(
JSON.stringify(existingTasksData)
);
// Mock process.exit for this specific test
const mockProcessExit = jest
.spyOn(process, 'exit')
.mockImplementation((code) => {
throw new Error(`process.exit: ${code}`);
});
// Call the function without mcpLog (CLI mode) and expect it to throw due to mocked process.exit
// Call the function without mcpLog (CLI mode) and expect it to throw an error
// In test environment, process.exit is prevented and error is thrown instead
await expect(
parsePRD('path/to/prd.txt', 'tasks/tasks.json', 3)
).rejects.toThrow('process.exit: 1');
// Verify process.exit was called with code 1
expect(mockProcessExit).toHaveBeenCalledWith(1);
).rejects.toThrow(
"Tag 'master' already contains 2 tasks. Use --force to overwrite or --append to add to existing tasks."
);
// Verify the file was NOT written
expect(fs.default.writeFileSync).not.toHaveBeenCalled();
// Restore the mock
mockProcessExit.mockRestore();
});
test('should append new tasks when append option is true', async () => {

View File

@@ -55,6 +55,18 @@ jest.unstable_mockModule(
})
);
jest.unstable_mockModule(
'../../../../../scripts/modules/prompt-manager.js',
() => ({
getPromptManager: jest.fn().mockReturnValue({
loadPrompt: jest.fn().mockResolvedValue({
systemPrompt: 'Mocked system prompt',
userPrompt: 'Mocked user prompt'
})
})
})
);
jest.unstable_mockModule(
'../../../../../scripts/modules/task-manager/models.js',
() => ({