Feat: Added automatic determination of task number based on complexity (#884)
- Added 'defaultNumTasks: 10' to default config, now used in 'parse-prd' - Adjusted 'parse-prd' and 'expand-task' to: - Accept a 'numTasks' value of 0 - Updated tool and command descriptions - Updated prompts to 'an appropriate number of' when value is 0 - Updated 'README-task-master.md' and 'command-reference.md' docs - Added more tests for: 'parse-prd', 'expand-task' and 'config-manager' Co-authored-by: Ralph Khreish <35776126+Crunchyman-ralph@users.noreply.github.com>
This commit is contained in:
5
.changeset/smooth-ants-live.md
Normal file
5
.changeset/smooth-ants-live.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
Added option for the AI to determine the number of tasks required based entirely on complexity
|
||||
@@ -323,8 +323,11 @@ Here's a comprehensive reference of all available commands:
|
||||
# Parse a PRD file and generate tasks
|
||||
task-master parse-prd <prd-file.txt>
|
||||
|
||||
# Limit the number of tasks generated
|
||||
task-master parse-prd <prd-file.txt> --num-tasks=10
|
||||
# Limit the number of tasks generated (default is 10)
|
||||
task-master parse-prd <prd-file.txt> --num-tasks=5
|
||||
|
||||
# Allow task master to determine the number of tasks based on complexity
|
||||
task-master parse-prd <prd-file.txt> --num-tasks=0
|
||||
```
|
||||
|
||||
### List Tasks
|
||||
@@ -397,6 +400,9 @@ When marking a task as "done", all of its subtasks will automatically be marked
|
||||
# Expand a specific task with subtasks
|
||||
task-master expand --id=<id> --num=<number>
|
||||
|
||||
# Expand a task with a dynamic number of subtasks (ignoring complexity report)
|
||||
task-master expand --id=<id> --num=0
|
||||
|
||||
# Expand with additional context
|
||||
task-master expand --id=<id> --prompt="<context>"
|
||||
|
||||
|
||||
@@ -8,8 +8,11 @@ Here's a comprehensive reference of all available commands:
|
||||
# Parse a PRD file and generate tasks
|
||||
task-master parse-prd <prd-file.txt>
|
||||
|
||||
# Limit the number of tasks generated
|
||||
task-master parse-prd <prd-file.txt> --num-tasks=10
|
||||
# Limit the number of tasks generated (default is 10)
|
||||
task-master parse-prd <prd-file.txt> --num-tasks=5
|
||||
|
||||
# Allow task master to determine the number of tasks based on complexity
|
||||
task-master parse-prd <prd-file.txt> --num-tasks=0
|
||||
```
|
||||
|
||||
## List Tasks
|
||||
@@ -128,6 +131,9 @@ When marking a task as "done", all of its subtasks will automatically be marked
|
||||
# Expand a specific task with subtasks
|
||||
task-master expand --id=<id> --num=<number>
|
||||
|
||||
# Expand a task with a dynamic number of subtasks (ignoring complexity report)
|
||||
task-master expand --id=<id> --num=0
|
||||
|
||||
# Expand with additional context
|
||||
task-master expand --id=<id> --prompt="<context>"
|
||||
|
||||
|
||||
@@ -36,6 +36,7 @@ Taskmaster uses two primary methods for configuration:
|
||||
"global": {
|
||||
"logLevel": "info",
|
||||
"debug": false,
|
||||
"defaultNumTasks": 10,
|
||||
"defaultSubtasks": 5,
|
||||
"defaultPriority": "medium",
|
||||
"defaultTag": "master",
|
||||
|
||||
@@ -109,7 +109,7 @@ export async function parsePRDDirect(args, log, context = {}) {
|
||||
if (numTasksArg) {
|
||||
numTasks =
|
||||
typeof numTasksArg === 'string' ? parseInt(numTasksArg, 10) : numTasksArg;
|
||||
if (Number.isNaN(numTasks) || numTasks <= 0) {
|
||||
if (Number.isNaN(numTasks) || numTasks < 0) {
|
||||
// Ensure positive number
|
||||
numTasks = getDefaultNumTasks(projectRoot); // Fallback to default if parsing fails or invalid
|
||||
logWrapper.warn(
|
||||
|
||||
@@ -43,7 +43,7 @@ export function registerParsePRDTool(server) {
|
||||
.string()
|
||||
.optional()
|
||||
.describe(
|
||||
'Approximate number of top-level tasks to generate (default: 10). As the agent, if you have enough information, ensure to enter a number of tasks that would logically scale with project complexity. Avoid entering numbers above 50 due to context window limitations.'
|
||||
'Approximate number of top-level tasks to generate (default: 10). As the agent, if you have enough information, ensure to enter a number of tasks that would logically scale with project complexity. Setting to 0 will allow Taskmaster to determine the appropriate number of tasks based on the complexity of the PRD. Avoid entering numbers above 50 due to context window limitations.'
|
||||
),
|
||||
force: z
|
||||
.boolean()
|
||||
|
||||
@@ -69,7 +69,9 @@ import {
|
||||
ConfigurationError,
|
||||
isConfigFilePresent,
|
||||
getAvailableModels,
|
||||
getBaseUrlForRole
|
||||
getBaseUrlForRole,
|
||||
getDefaultNumTasks,
|
||||
getDefaultSubtasks
|
||||
} from './config-manager.js';
|
||||
|
||||
import { CUSTOM_PROVIDERS } from '../../src/constants/providers.js';
|
||||
@@ -803,7 +805,11 @@ function registerCommands(programInstance) {
|
||||
'Path to the PRD file (alternative to positional argument)'
|
||||
)
|
||||
.option('-o, --output <file>', 'Output file path', TASKMASTER_TASKS_FILE)
|
||||
.option('-n, --num-tasks <number>', 'Number of tasks to generate', '10')
|
||||
.option(
|
||||
'-n, --num-tasks <number>',
|
||||
'Number of tasks to generate',
|
||||
getDefaultNumTasks()
|
||||
)
|
||||
.option('-f, --force', 'Skip confirmation when overwriting existing tasks')
|
||||
.option(
|
||||
'--append',
|
||||
|
||||
@@ -61,6 +61,7 @@ const DEFAULTS = {
|
||||
global: {
|
||||
logLevel: 'info',
|
||||
debug: false,
|
||||
defaultNumTasks: 10,
|
||||
defaultSubtasks: 5,
|
||||
defaultPriority: 'medium',
|
||||
projectName: 'Task Master',
|
||||
|
||||
@@ -67,7 +67,7 @@ const subtaskWrapperSchema = z.object({
|
||||
*/
|
||||
function generateMainSystemPrompt(subtaskCount) {
|
||||
return `You are an AI assistant helping with task breakdown for software development.
|
||||
You need to break down a high-level task into ${subtaskCount} specific subtasks that can be implemented one by one.
|
||||
You need to break down a high-level task into ${subtaskCount > 0 ? subtaskCount : 'an appropriate number of'} specific subtasks that can be implemented one by one.
|
||||
|
||||
Subtasks should:
|
||||
1. Be specific and actionable implementation steps
|
||||
@@ -117,11 +117,11 @@ function generateMainUserPrompt(
|
||||
"details": "Implementation guidance",
|
||||
"testStrategy": "Optional testing approach"
|
||||
},
|
||||
// ... (repeat for a total of ${subtaskCount} subtasks with sequential IDs)
|
||||
// ... (repeat for ${subtaskCount ? 'a total of ' + subtaskCount : 'each of the'} subtasks with sequential IDs)
|
||||
]
|
||||
}`;
|
||||
|
||||
return `Break down this task into exactly ${subtaskCount} specific subtasks:
|
||||
return `Break down this task into ${subtaskCount > 0 ? 'exactly ' + subtaskCount : 'an appropriate number of'} specific subtasks:
|
||||
|
||||
Task ID: ${task.id}
|
||||
Title: ${task.title}
|
||||
@@ -165,7 +165,7 @@ function generateResearchUserPrompt(
|
||||
]
|
||||
}`;
|
||||
|
||||
return `Analyze the following task and break it down into exactly ${subtaskCount} specific subtasks using your research capabilities. Assign sequential IDs starting from ${nextSubtaskId}.
|
||||
return `Analyze the following task and break it down into ${subtaskCount > 0 ? 'exactly ' + subtaskCount : 'an appropriate number of'} specific subtasks using your research capabilities. Assign sequential IDs starting from ${nextSubtaskId}.
|
||||
|
||||
Parent Task:
|
||||
ID: ${task.id}
|
||||
@@ -546,7 +546,7 @@ async function expandTask(
|
||||
|
||||
// Determine final subtask count
|
||||
const explicitNumSubtasks = parseInt(numSubtasks, 10);
|
||||
if (!Number.isNaN(explicitNumSubtasks) && explicitNumSubtasks > 0) {
|
||||
if (!Number.isNaN(explicitNumSubtasks) && explicitNumSubtasks >= 0) {
|
||||
finalSubtaskCount = explicitNumSubtasks;
|
||||
logger.info(
|
||||
`Using explicitly provided subtask count: ${finalSubtaskCount}`
|
||||
@@ -560,7 +560,7 @@ async function expandTask(
|
||||
finalSubtaskCount = getDefaultSubtasks(session);
|
||||
logger.info(`Using default number of subtasks: ${finalSubtaskCount}`);
|
||||
}
|
||||
if (Number.isNaN(finalSubtaskCount) || finalSubtaskCount <= 0) {
|
||||
if (Number.isNaN(finalSubtaskCount) || finalSubtaskCount < 0) {
|
||||
logger.warn(
|
||||
`Invalid subtask count determined (${finalSubtaskCount}), defaulting to 3.`
|
||||
);
|
||||
@@ -581,7 +581,7 @@ async function expandTask(
|
||||
}
|
||||
|
||||
// --- Use Simplified System Prompt for Report Prompts ---
|
||||
systemPrompt = `You are an AI assistant helping with task breakdown. Generate exactly ${finalSubtaskCount} subtasks based on the provided prompt and context. Respond ONLY with a valid JSON object containing a single key "subtasks" whose value is an array of the generated subtask objects. Each subtask object in the array must have keys: "id", "title", "description", "dependencies", "details", "status". Ensure the 'id' starts from ${nextSubtaskId} and is sequential. Ensure 'dependencies' only reference valid prior subtask IDs generated in this response (starting from ${nextSubtaskId}). Ensure 'details' is a string. Ensure 'status' is 'pending'. Do not include any other text or explanation.`;
|
||||
systemPrompt = `You are an AI assistant helping with task breakdown. Generate ${finalSubtaskCount > 0 ? 'exactly ' + finalSubtaskCount : 'an appropriate number of'} subtasks based on the provided prompt and context. Respond ONLY with a valid JSON object containing a single key "subtasks" whose value is an array of the generated subtask objects. Each subtask object in the array must have keys: "id", "title", "description", "dependencies", "details", "status". Ensure the 'id' starts from ${nextSubtaskId} and is sequential. Ensure 'dependencies' only reference valid prior subtask IDs generated in this response (starting from ${nextSubtaskId}). Ensure 'status' is 'pending'. Do not include any other text or explanation.`;
|
||||
logger.info(
|
||||
`Using expansion prompt from complexity report and simplified system prompt for task ${task.id}.`
|
||||
);
|
||||
@@ -623,7 +623,7 @@ async function expandTask(
|
||||
let loadingIndicator = null;
|
||||
if (outputFormat === 'text') {
|
||||
loadingIndicator = startLoadingIndicator(
|
||||
`Generating ${finalSubtaskCount} subtasks...\n`
|
||||
`Generating ${finalSubtaskCount || 'appropriate number of'} subtasks...\n`
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -188,7 +188,7 @@ Your task breakdown should incorporate this research, resulting in more detailed
|
||||
// Base system prompt for PRD parsing
|
||||
const systemPrompt = `You are an AI assistant specialized in analyzing Product Requirements Documents (PRDs) and generating a structured, logically ordered, dependency-aware and sequenced list of development tasks in JSON format.${researchPromptAddition}
|
||||
|
||||
Analyze the provided PRD content and generate approximately ${numTasks} top-level development tasks. If the complexity or the level of detail of the PRD is high, generate more tasks relative to the complexity of the PRD
|
||||
Analyze the provided PRD content and generate ${numTasks > 0 ? 'approximately ' + numTasks : 'an appropriate number of'} top-level development tasks. If the complexity or the level of detail of the PRD is high, generate more tasks relative to the complexity of the PRD
|
||||
Each task should represent a logical unit of work needed to implement the requirements and focus on the most direct and effective way to implement the requirements without unnecessary complexity or overengineering. Include pseudo-code, implementation details, and test strategy for each task. Find the most up to date information to implement each task.
|
||||
Assign sequential IDs starting from ${nextId}. Infer title, description, details, and test strategy for each task based *only* on the PRD content.
|
||||
Set status to 'pending', dependencies to an empty array [], and priority to 'medium' initially for all tasks.
|
||||
@@ -207,7 +207,7 @@ Each task should follow this JSON structure:
|
||||
}
|
||||
|
||||
Guidelines:
|
||||
1. Unless complexity warrants otherwise, create exactly ${numTasks} tasks, numbered sequentially starting from ${nextId}
|
||||
1. ${numTasks > 0 ? 'Unless complexity warrants otherwise' : 'Depending on the complexity'}, create ${numTasks > 0 ? 'exactly ' + numTasks : 'an appropriate number of'} tasks, numbered sequentially starting from ${nextId}
|
||||
2. Each task should be atomic and focused on a single responsibility following the most up to date best practices and standards
|
||||
3. Order tasks logically - consider dependencies and implementation sequence
|
||||
4. Early tasks should focus on setup, core functionality first, then advanced features
|
||||
@@ -220,7 +220,7 @@ Guidelines:
|
||||
11. Always aim to provide the most direct path to implementation, avoiding over-engineering or roundabout approaches${research ? '\n12. For each task, include specific, actionable guidance based on current industry standards and best practices discovered through research' : ''}`;
|
||||
|
||||
// Build user prompt with PRD content
|
||||
const userPrompt = `Here's the Product Requirements Document (PRD) to break down into approximately ${numTasks} tasks, starting IDs from ${nextId}:${research ? '\n\nRemember to thoroughly research current best practices and technologies before task breakdown to provide specific, actionable implementation details.' : ''}\n\n${prdContent}\n\n
|
||||
const userPrompt = `Here's the Product Requirements Document (PRD) to break down into approximately ${numTasks > 0 ? 'approximately ' + numTasks : 'an appropriate number of'} tasks, starting IDs from ${nextId}:${research ? '\n\nRemember to thoroughly research current best practices and technologies before task breakdown to provide specific, actionable implementation details.' : ''}\n\n${prdContent}\n\n
|
||||
|
||||
Return your response in this format:
|
||||
{
|
||||
@@ -235,7 +235,7 @@ Guidelines:
|
||||
],
|
||||
"metadata": {
|
||||
"projectName": "PRD Implementation",
|
||||
"totalTasks": ${numTasks},
|
||||
"totalTasks": {number of tasks},
|
||||
"sourceFile": "${prdPath}",
|
||||
"generatedAt": "YYYY-MM-DD"
|
||||
}
|
||||
|
||||
@@ -136,6 +136,7 @@ const DEFAULT_CONFIG = {
|
||||
global: {
|
||||
logLevel: 'info',
|
||||
debug: false,
|
||||
defaultNumTasks: 10,
|
||||
defaultSubtasks: 5,
|
||||
defaultPriority: 'medium',
|
||||
projectName: 'Task Master',
|
||||
@@ -738,5 +739,116 @@ describe('getAllProviders', () => {
|
||||
|
||||
// Add tests for getParametersForRole if needed
|
||||
|
||||
// --- defaultNumTasks Tests ---
|
||||
describe('Configuration Getters', () => {
|
||||
test('getDefaultNumTasks should return default value when config is valid', () => {
|
||||
// Arrange: Mock fs.readFileSync to return valid config when called with the expected path
|
||||
fsReadFileSyncSpy.mockImplementation((filePath) => {
|
||||
if (filePath === MOCK_CONFIG_PATH) {
|
||||
return JSON.stringify({
|
||||
global: {
|
||||
defaultNumTasks: 15
|
||||
}
|
||||
});
|
||||
}
|
||||
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
|
||||
});
|
||||
fsExistsSyncSpy.mockReturnValue(true);
|
||||
|
||||
// Force reload to clear cache
|
||||
configManager.getConfig(MOCK_PROJECT_ROOT, true);
|
||||
|
||||
// Act: Call getDefaultNumTasks with explicit root
|
||||
const result = configManager.getDefaultNumTasks(MOCK_PROJECT_ROOT);
|
||||
|
||||
// Assert
|
||||
expect(result).toBe(15);
|
||||
});
|
||||
|
||||
test('getDefaultNumTasks should return fallback when config value is invalid', () => {
|
||||
// Arrange: Mock fs.readFileSync to return invalid config
|
||||
fsReadFileSyncSpy.mockImplementation((filePath) => {
|
||||
if (filePath === MOCK_CONFIG_PATH) {
|
||||
return JSON.stringify({
|
||||
global: {
|
||||
defaultNumTasks: 'invalid'
|
||||
}
|
||||
});
|
||||
}
|
||||
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
|
||||
});
|
||||
fsExistsSyncSpy.mockReturnValue(true);
|
||||
|
||||
// Force reload to clear cache
|
||||
configManager.getConfig(MOCK_PROJECT_ROOT, true);
|
||||
|
||||
// Act: Call getDefaultNumTasks with explicit root
|
||||
const result = configManager.getDefaultNumTasks(MOCK_PROJECT_ROOT);
|
||||
|
||||
// Assert
|
||||
expect(result).toBe(10); // Should fallback to DEFAULTS.global.defaultNumTasks
|
||||
});
|
||||
|
||||
test('getDefaultNumTasks should return fallback when config value is missing', () => {
|
||||
// Arrange: Mock fs.readFileSync to return config without defaultNumTasks
|
||||
fsReadFileSyncSpy.mockImplementation((filePath) => {
|
||||
if (filePath === MOCK_CONFIG_PATH) {
|
||||
return JSON.stringify({
|
||||
global: {}
|
||||
});
|
||||
}
|
||||
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
|
||||
});
|
||||
fsExistsSyncSpy.mockReturnValue(true);
|
||||
|
||||
// Force reload to clear cache
|
||||
configManager.getConfig(MOCK_PROJECT_ROOT, true);
|
||||
|
||||
// Act: Call getDefaultNumTasks with explicit root
|
||||
const result = configManager.getDefaultNumTasks(MOCK_PROJECT_ROOT);
|
||||
|
||||
// Assert
|
||||
expect(result).toBe(10); // Should fallback to DEFAULTS.global.defaultNumTasks
|
||||
});
|
||||
|
||||
test('getDefaultNumTasks should handle non-existent config file', () => {
|
||||
// Arrange: Mock file not existing
|
||||
fsExistsSyncSpy.mockReturnValue(false);
|
||||
|
||||
// Force reload to clear cache
|
||||
configManager.getConfig(MOCK_PROJECT_ROOT, true);
|
||||
|
||||
// Act: Call getDefaultNumTasks with explicit root
|
||||
const result = configManager.getDefaultNumTasks(MOCK_PROJECT_ROOT);
|
||||
|
||||
// Assert
|
||||
expect(result).toBe(10); // Should fallback to DEFAULTS.global.defaultNumTasks
|
||||
});
|
||||
|
||||
test('getDefaultNumTasks should accept explicit project root', () => {
|
||||
// Arrange: Mock fs.readFileSync to return valid config
|
||||
fsReadFileSyncSpy.mockImplementation((filePath) => {
|
||||
if (filePath === MOCK_CONFIG_PATH) {
|
||||
return JSON.stringify({
|
||||
global: {
|
||||
defaultNumTasks: 20
|
||||
}
|
||||
});
|
||||
}
|
||||
throw new Error(`Unexpected fs.readFileSync call: ${filePath}`);
|
||||
});
|
||||
fsExistsSyncSpy.mockReturnValue(true);
|
||||
|
||||
// Force reload to clear cache
|
||||
configManager.getConfig(MOCK_PROJECT_ROOT, true);
|
||||
|
||||
// Act: Call getDefaultNumTasks with explicit project root
|
||||
const result = configManager.getDefaultNumTasks(MOCK_PROJECT_ROOT);
|
||||
|
||||
// Assert
|
||||
expect(result).toBe(20);
|
||||
});
|
||||
});
|
||||
|
||||
// Note: Tests for setMainModel, setResearchModel were removed as the functions were removed in the implementation.
|
||||
// If similar setter functions exist, add tests for them following the writeConfig pattern.
|
||||
|
||||
@@ -122,7 +122,8 @@ jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/config-manager.js',
|
||||
() => ({
|
||||
getDefaultSubtasks: jest.fn(() => 3),
|
||||
getDebugFlag: jest.fn(() => false)
|
||||
getDebugFlag: jest.fn(() => false),
|
||||
getDefaultNumTasks: jest.fn(() => 10)
|
||||
})
|
||||
);
|
||||
|
||||
@@ -199,6 +200,10 @@ const generateTaskFiles = (
|
||||
)
|
||||
).default;
|
||||
|
||||
const { getDefaultSubtasks } = await import(
|
||||
'../../../../../scripts/modules/config-manager.js'
|
||||
);
|
||||
|
||||
// Import the module under test
|
||||
const { default: expandTask } = await import(
|
||||
'../../../../../scripts/modules/task-manager/expand-task.js'
|
||||
@@ -946,4 +951,120 @@ describe('expandTask', () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Dynamic Subtask Generation', () => {
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const taskId = 1;
|
||||
const context = { session: null, mcpLog: null };
|
||||
|
||||
beforeEach(() => {
|
||||
// Reset all mocks
|
||||
jest.clearAllMocks();
|
||||
|
||||
// Setup default mocks
|
||||
readJSON.mockReturnValue({
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Test Task',
|
||||
description: 'A test task',
|
||||
status: 'pending',
|
||||
subtasks: []
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
findTaskById.mockReturnValue({
|
||||
id: 1,
|
||||
title: 'Test Task',
|
||||
description: 'A test task',
|
||||
status: 'pending',
|
||||
subtasks: []
|
||||
});
|
||||
|
||||
findProjectRoot.mockReturnValue('/mock/project/root');
|
||||
});
|
||||
|
||||
test('should accept 0 as valid numSubtasks value for dynamic generation', async () => {
|
||||
// Act - Call with numSubtasks=0 (should not throw error)
|
||||
const result = await expandTask(
|
||||
tasksPath,
|
||||
taskId,
|
||||
0,
|
||||
false,
|
||||
'',
|
||||
context,
|
||||
false
|
||||
);
|
||||
|
||||
// Assert - Should complete successfully
|
||||
expect(result).toBeDefined();
|
||||
expect(generateTextService).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should use dynamic prompting when numSubtasks is 0', async () => {
|
||||
// Act
|
||||
await expandTask(tasksPath, taskId, 0, false, '', context, false);
|
||||
|
||||
// Assert - Verify generateTextService was called
|
||||
expect(generateTextService).toHaveBeenCalled();
|
||||
|
||||
// Get the call arguments to verify the system prompt
|
||||
const callArgs = generateTextService.mock.calls[0][0];
|
||||
expect(callArgs.systemPrompt).toContain(
|
||||
'an appropriate number of specific subtasks'
|
||||
);
|
||||
});
|
||||
|
||||
test('should use specific count prompting when numSubtasks is positive', async () => {
|
||||
// Act
|
||||
await expandTask(tasksPath, taskId, 5, false, '', context, false);
|
||||
|
||||
// Assert - Verify generateTextService was called
|
||||
expect(generateTextService).toHaveBeenCalled();
|
||||
|
||||
// Get the call arguments to verify the system prompt
|
||||
const callArgs = generateTextService.mock.calls[0][0];
|
||||
expect(callArgs.systemPrompt).toContain('5 specific subtasks');
|
||||
});
|
||||
|
||||
test('should reject negative numSubtasks values and fallback to default', async () => {
|
||||
// Mock getDefaultSubtasks to return a specific value
|
||||
getDefaultSubtasks.mockReturnValue(4);
|
||||
|
||||
// Act
|
||||
await expandTask(tasksPath, taskId, -3, false, '', context, false);
|
||||
|
||||
// Assert - Should use default value instead of negative
|
||||
expect(generateTextService).toHaveBeenCalled();
|
||||
const callArgs = generateTextService.mock.calls[0][0];
|
||||
expect(callArgs.systemPrompt).toContain('4 specific subtasks');
|
||||
});
|
||||
|
||||
test('should use getDefaultSubtasks when numSubtasks is undefined', async () => {
|
||||
// Mock getDefaultSubtasks to return a specific value
|
||||
getDefaultSubtasks.mockReturnValue(6);
|
||||
|
||||
// Act - Call without specifying numSubtasks (undefined)
|
||||
await expandTask(tasksPath, taskId, undefined, false, '', context, false);
|
||||
|
||||
// Assert - Should use default value
|
||||
expect(generateTextService).toHaveBeenCalled();
|
||||
const callArgs = generateTextService.mock.calls[0][0];
|
||||
expect(callArgs.systemPrompt).toContain('6 specific subtasks');
|
||||
});
|
||||
|
||||
test('should use getDefaultSubtasks when numSubtasks is null', async () => {
|
||||
// Mock getDefaultSubtasks to return a specific value
|
||||
getDefaultSubtasks.mockReturnValue(7);
|
||||
|
||||
// Act - Call with null numSubtasks
|
||||
await expandTask(tasksPath, taskId, null, false, '', context, false);
|
||||
|
||||
// Assert - Should use default value
|
||||
expect(generateTextService).toHaveBeenCalled();
|
||||
const callArgs = generateTextService.mock.calls[0][0];
|
||||
expect(callArgs.systemPrompt).toContain('7 specific subtasks');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -47,7 +47,8 @@ jest.unstable_mockModule('../../../../../scripts/modules/ui.js', () => ({
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/config-manager.js',
|
||||
() => ({
|
||||
getDebugFlag: jest.fn(() => false)
|
||||
getDebugFlag: jest.fn(() => false),
|
||||
getDefaultNumTasks: jest.fn(() => 10)
|
||||
})
|
||||
);
|
||||
|
||||
@@ -94,13 +95,15 @@ jest.unstable_mockModule('path', () => ({
|
||||
}));
|
||||
|
||||
// Import the mocked modules
|
||||
const { readJSON, writeJSON, log, promptYesNo } = await import(
|
||||
const { readJSON, promptYesNo } = await import(
|
||||
'../../../../../scripts/modules/utils.js'
|
||||
);
|
||||
|
||||
const { generateObjectService } = await import(
|
||||
'../../../../../scripts/modules/ai-services-unified.js'
|
||||
);
|
||||
|
||||
// Note: getDefaultNumTasks validation happens at CLI/MCP level, not in the main parse-prd module
|
||||
const generateTaskFiles = (
|
||||
await import(
|
||||
'../../../../../scripts/modules/task-manager/generate-task-files.js'
|
||||
@@ -433,4 +436,123 @@ describe('parsePRD', () => {
|
||||
// Verify prompt was NOT called with append flag
|
||||
expect(promptYesNo).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
describe('Dynamic Task Generation', () => {
|
||||
test('should use dynamic prompting when numTasks is 0', async () => {
|
||||
// Setup mocks to simulate normal conditions (no existing output file)
|
||||
fs.default.existsSync.mockImplementation((p) => {
|
||||
if (p === 'tasks/tasks.json') return false; // Output file doesn't exist
|
||||
if (p === 'tasks') return true; // Directory exists
|
||||
return false;
|
||||
});
|
||||
|
||||
// Call the function with numTasks=0 for dynamic generation
|
||||
await parsePRD('path/to/prd.txt', 'tasks/tasks.json', 0);
|
||||
|
||||
// Verify generateObjectService was called
|
||||
expect(generateObjectService).toHaveBeenCalled();
|
||||
|
||||
// Get the call arguments to verify the prompt
|
||||
const callArgs = generateObjectService.mock.calls[0][0];
|
||||
expect(callArgs.prompt).toContain('an appropriate number of');
|
||||
expect(callArgs.prompt).not.toContain('approximately 0');
|
||||
});
|
||||
|
||||
test('should use specific count prompting when numTasks is positive', async () => {
|
||||
// Setup mocks to simulate normal conditions (no existing output file)
|
||||
fs.default.existsSync.mockImplementation((p) => {
|
||||
if (p === 'tasks/tasks.json') return false; // Output file doesn't exist
|
||||
if (p === 'tasks') return true; // Directory exists
|
||||
return false;
|
||||
});
|
||||
|
||||
// Call the function with specific numTasks
|
||||
await parsePRD('path/to/prd.txt', 'tasks/tasks.json', 5);
|
||||
|
||||
// Verify generateObjectService was called
|
||||
expect(generateObjectService).toHaveBeenCalled();
|
||||
|
||||
// Get the call arguments to verify the prompt
|
||||
const callArgs = generateObjectService.mock.calls[0][0];
|
||||
expect(callArgs.prompt).toContain('approximately 5');
|
||||
expect(callArgs.prompt).not.toContain('an appropriate number of');
|
||||
});
|
||||
|
||||
test('should accept 0 as valid numTasks value', async () => {
|
||||
// Setup mocks to simulate normal conditions (no existing output file)
|
||||
fs.default.existsSync.mockImplementation((p) => {
|
||||
if (p === 'tasks/tasks.json') return false; // Output file doesn't exist
|
||||
if (p === 'tasks') return true; // Directory exists
|
||||
return false;
|
||||
});
|
||||
|
||||
// Call the function with numTasks=0 - should not throw error
|
||||
const result = await parsePRD('path/to/prd.txt', 'tasks/tasks.json', 0);
|
||||
|
||||
// Verify it completed successfully
|
||||
expect(result).toEqual({
|
||||
success: true,
|
||||
tasksPath: 'tasks/tasks.json',
|
||||
telemetryData: {}
|
||||
});
|
||||
});
|
||||
|
||||
test('should use dynamic prompting when numTasks is negative (no validation in main module)', async () => {
|
||||
// Setup mocks to simulate normal conditions (no existing output file)
|
||||
fs.default.existsSync.mockImplementation((p) => {
|
||||
if (p === 'tasks/tasks.json') return false; // Output file doesn't exist
|
||||
if (p === 'tasks') return true; // Directory exists
|
||||
return false;
|
||||
});
|
||||
|
||||
// Call the function with negative numTasks
|
||||
// Note: The main parse-prd.js module doesn't validate numTasks - validation happens at CLI/MCP level
|
||||
await parsePRD('path/to/prd.txt', 'tasks/tasks.json', -5);
|
||||
|
||||
// Verify generateObjectService was called
|
||||
expect(generateObjectService).toHaveBeenCalled();
|
||||
const callArgs = generateObjectService.mock.calls[0][0];
|
||||
// Negative values are treated as <= 0, so should use dynamic prompting
|
||||
expect(callArgs.prompt).toContain('an appropriate number of');
|
||||
expect(callArgs.prompt).not.toContain('approximately -5');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Configuration Integration', () => {
|
||||
test('should use dynamic prompting when numTasks is null', async () => {
|
||||
// Setup mocks to simulate normal conditions (no existing output file)
|
||||
fs.default.existsSync.mockImplementation((p) => {
|
||||
if (p === 'tasks/tasks.json') return false; // Output file doesn't exist
|
||||
if (p === 'tasks') return true; // Directory exists
|
||||
return false;
|
||||
});
|
||||
|
||||
// Call the function with null numTasks
|
||||
await parsePRD('path/to/prd.txt', 'tasks/tasks.json', null);
|
||||
|
||||
// Verify generateObjectService was called with dynamic prompting
|
||||
expect(generateObjectService).toHaveBeenCalled();
|
||||
const callArgs = generateObjectService.mock.calls[0][0];
|
||||
expect(callArgs.prompt).toContain('an appropriate number of');
|
||||
});
|
||||
|
||||
test('should use dynamic prompting when numTasks is invalid string', async () => {
|
||||
// Setup mocks to simulate normal conditions (no existing output file)
|
||||
fs.default.existsSync.mockImplementation((p) => {
|
||||
if (p === 'tasks/tasks.json') return false; // Output file doesn't exist
|
||||
if (p === 'tasks') return true; // Directory exists
|
||||
return false;
|
||||
});
|
||||
|
||||
// Call the function with invalid numTasks (string that's not a number)
|
||||
await parsePRD('path/to/prd.txt', 'tasks/tasks.json', 'invalid');
|
||||
|
||||
// Verify generateObjectService was called with dynamic prompting
|
||||
// Note: The main module doesn't validate - it just uses the value as-is
|
||||
// Since 'invalid' > 0 is false, it uses dynamic prompting
|
||||
expect(generateObjectService).toHaveBeenCalled();
|
||||
const callArgs = generateObjectService.mock.calls[0][0];
|
||||
expect(callArgs.prompt).toContain('an appropriate number of');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
Reference in New Issue
Block a user