chore: add more granular e2e
This commit is contained in:
411
tests/e2e/tests/commands/add-task.test.js
Normal file
411
tests/e2e/tests/commands/add-task.test.js
Normal file
@@ -0,0 +1,411 @@
|
||||
/**
|
||||
* Comprehensive E2E tests for add-task command
|
||||
* Tests all aspects of task creation including AI and manual modes
|
||||
*/
|
||||
|
||||
export default async function testAddTask(logger, helpers, context) {
|
||||
const { testDir } = context;
|
||||
const results = {
|
||||
status: 'passed',
|
||||
errors: [],
|
||||
tests: []
|
||||
};
|
||||
|
||||
async function runTest(name, testFn) {
|
||||
try {
|
||||
logger.info(`\nRunning: ${name}`);
|
||||
await testFn();
|
||||
results.tests.push({ name, status: 'passed' });
|
||||
logger.success(`✓ ${name}`);
|
||||
} catch (error) {
|
||||
results.tests.push({ name, status: 'failed', error: error.message });
|
||||
results.errors.push({ test: name, error: error.message });
|
||||
logger.error(`✗ ${name}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
logger.info('Starting comprehensive add-task tests...');
|
||||
|
||||
// Test 1: Basic AI task creation with --prompt
|
||||
await runTest('AI task creation with prompt', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', 'Create a user authentication system with JWT tokens'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
const taskId = helpers.extractTaskId(result.stdout);
|
||||
if (!taskId) {
|
||||
throw new Error('Failed to extract task ID from output');
|
||||
}
|
||||
// Verify task was created with AI-generated content
|
||||
const showResult = await helpers.taskMaster('show', [taskId], { cwd: testDir });
|
||||
if (!showResult.stdout.includes('authentication') && !showResult.stdout.includes('JWT')) {
|
||||
throw new Error('AI did not properly understand the prompt');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 2: Manual task creation with --title and --description
|
||||
await runTest('Manual task creation', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'add-task',
|
||||
[
|
||||
'--title', 'Setup database connection',
|
||||
'--description', 'Configure PostgreSQL connection with connection pooling'
|
||||
],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
const taskId = helpers.extractTaskId(result.stdout);
|
||||
if (!taskId) {
|
||||
throw new Error('Failed to extract task ID');
|
||||
}
|
||||
// Verify exact title and description
|
||||
const showResult = await helpers.taskMaster('show', [taskId], { cwd: testDir });
|
||||
if (!showResult.stdout.includes('Setup database connection')) {
|
||||
throw new Error('Title not set correctly');
|
||||
}
|
||||
if (!showResult.stdout.includes('Configure PostgreSQL connection')) {
|
||||
throw new Error('Description not set correctly');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 3: Task creation with tags
|
||||
await runTest('Task creation with tags', async () => {
|
||||
// First create a tag
|
||||
await helpers.taskMaster(
|
||||
'add-tag',
|
||||
['backend', '--description', 'Backend tasks'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
|
||||
// Create task with tag
|
||||
const result = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', 'Create REST API endpoints', '--tag', 'backend'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
const taskId = helpers.extractTaskId(result.stdout);
|
||||
|
||||
// Verify task is in tag
|
||||
const listResult = await helpers.taskMaster('list', ['--tag', 'backend'], { cwd: testDir });
|
||||
if (!listResult.stdout.includes(taskId)) {
|
||||
throw new Error('Task not found in specified tag');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 4: Task creation with priority
|
||||
await runTest('Task creation with priority', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', 'Fix critical security vulnerability', '--priority', 'high'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
const taskId = helpers.extractTaskId(result.stdout);
|
||||
|
||||
// Verify priority was set
|
||||
const showResult = await helpers.taskMaster('show', [taskId], { cwd: testDir });
|
||||
if (!showResult.stdout.includes('high') && !showResult.stdout.includes('High')) {
|
||||
throw new Error('Priority not set correctly');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 5: Task creation with dependencies at creation time
|
||||
await runTest('Task creation with dependencies', async () => {
|
||||
// Create dependency task first
|
||||
const depResult = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--title', 'Setup environment'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const depTaskId = helpers.extractTaskId(depResult.stdout);
|
||||
|
||||
// Create task with dependency
|
||||
const result = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', 'Deploy application', '--depends-on', depTaskId],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
const taskId = helpers.extractTaskId(result.stdout);
|
||||
|
||||
// Verify dependency was set
|
||||
const showResult = await helpers.taskMaster('show', [taskId], { cwd: testDir });
|
||||
if (!showResult.stdout.includes(depTaskId)) {
|
||||
throw new Error('Dependency not set correctly');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 6: Task creation with custom metadata
|
||||
await runTest('Task creation with metadata', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'add-task',
|
||||
[
|
||||
'--prompt', 'Implement caching layer',
|
||||
'--metadata', 'team=backend',
|
||||
'--metadata', 'sprint=2024-Q1'
|
||||
],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
const taskId = helpers.extractTaskId(result.stdout);
|
||||
|
||||
// Verify metadata (check in tasks.json)
|
||||
const tasksPath = `${testDir}/.taskmaster/tasks/tasks.json`;
|
||||
const tasks = helpers.readJson(tasksPath);
|
||||
const task = tasks.tasks.find(t => t.id === taskId);
|
||||
if (!task || !task.metadata || task.metadata.team !== 'backend' || task.metadata.sprint !== '2024-Q1') {
|
||||
throw new Error('Metadata not set correctly');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 7: Error handling - empty prompt
|
||||
await runTest('Error handling - empty prompt', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', ''],
|
||||
{ cwd: testDir, allowFailure: true }
|
||||
);
|
||||
if (result.exitCode === 0) {
|
||||
throw new Error('Should have failed with empty prompt');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 8: Error handling - invalid priority
|
||||
await runTest('Error handling - invalid priority', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', 'Test task', '--priority', 'invalid'],
|
||||
{ cwd: testDir, allowFailure: true }
|
||||
);
|
||||
if (result.exitCode === 0) {
|
||||
throw new Error('Should have failed with invalid priority');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 9: Error handling - non-existent dependency
|
||||
await runTest('Error handling - non-existent dependency', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', 'Test task', '--depends-on', '99999'],
|
||||
{ cwd: testDir, allowFailure: true }
|
||||
);
|
||||
if (result.exitCode === 0) {
|
||||
throw new Error('Should have failed with non-existent dependency');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 10: Very long prompt handling
|
||||
await runTest('Very long prompt handling', async () => {
|
||||
const longPrompt = 'Create a comprehensive system that ' + 'handles many features '.repeat(50);
|
||||
const result = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', longPrompt],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
const taskId = helpers.extractTaskId(result.stdout);
|
||||
if (!taskId) {
|
||||
throw new Error('Failed to create task with long prompt');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 11: Special characters in prompt
|
||||
await runTest('Special characters in prompt', async () => {
|
||||
const specialPrompt = 'Implement feature: "User\'s data & settings" <with> special|chars!';
|
||||
const result = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', specialPrompt],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
const taskId = helpers.extractTaskId(result.stdout);
|
||||
if (!taskId) {
|
||||
throw new Error('Failed to create task with special characters');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 12: Multiple tasks in parallel
|
||||
await runTest('Multiple tasks in parallel', async () => {
|
||||
const promises = [];
|
||||
for (let i = 0; i < 3; i++) {
|
||||
promises.push(
|
||||
helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', `Parallel task ${i + 1}`],
|
||||
{ cwd: testDir }
|
||||
)
|
||||
);
|
||||
}
|
||||
const results = await Promise.all(promises);
|
||||
|
||||
for (let i = 0; i < results.length; i++) {
|
||||
if (results[i].exitCode !== 0) {
|
||||
throw new Error(`Parallel task ${i + 1} failed`);
|
||||
}
|
||||
const taskId = helpers.extractTaskId(results[i].stdout);
|
||||
if (!taskId) {
|
||||
throw new Error(`Failed to extract task ID for parallel task ${i + 1}`);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Test 13: AI fallback behavior (simulate by using invalid model)
|
||||
await runTest('AI fallback behavior', async () => {
|
||||
// Set an invalid model to trigger fallback
|
||||
await helpers.taskMaster(
|
||||
'models',
|
||||
['--set-main', 'invalid-model-xyz'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', 'Test fallback behavior'],
|
||||
{ cwd: testDir, allowFailure: true }
|
||||
);
|
||||
|
||||
// Should either use fallback model or create task without AI
|
||||
// The exact behavior depends on implementation
|
||||
if (result.exitCode === 0) {
|
||||
const taskId = helpers.extractTaskId(result.stdout);
|
||||
if (!taskId) {
|
||||
throw new Error('Fallback did not create a task');
|
||||
}
|
||||
}
|
||||
|
||||
// Reset to valid model
|
||||
await helpers.taskMaster(
|
||||
'models',
|
||||
['--set-main', 'gpt-3.5-turbo'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
});
|
||||
|
||||
// Test 14: AI quality check - verify reasonable output
|
||||
await runTest('AI quality - reasonable title and description', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', 'Build a responsive navigation menu with dropdown support'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
const taskId = helpers.extractTaskId(result.stdout);
|
||||
|
||||
const showResult = await helpers.taskMaster('show', [taskId], { cwd: testDir });
|
||||
const output = showResult.stdout.toLowerCase();
|
||||
|
||||
// Check for relevant keywords that indicate AI understood the prompt
|
||||
const relevantKeywords = ['navigation', 'menu', 'dropdown', 'responsive'];
|
||||
const foundKeywords = relevantKeywords.filter(keyword => output.includes(keyword));
|
||||
|
||||
if (foundKeywords.length < 2) {
|
||||
throw new Error('AI output does not seem to understand the prompt properly');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 15: Task creation with all options combined
|
||||
await runTest('Task creation with all options', async () => {
|
||||
// Create dependency
|
||||
const depResult = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--title', 'Prerequisite task'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const depTaskId = helpers.extractTaskId(depResult.stdout);
|
||||
|
||||
// Create tag
|
||||
await helpers.taskMaster(
|
||||
'add-tag',
|
||||
['feature-complete', '--description', 'Complete feature test'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
|
||||
// Create task with all options
|
||||
const result = await helpers.taskMaster(
|
||||
'add-task',
|
||||
[
|
||||
'--prompt', 'Comprehensive task with all features',
|
||||
'--priority', 'medium',
|
||||
'--tag', 'feature-complete',
|
||||
'--depends-on', depTaskId,
|
||||
'--metadata', 'complexity=high',
|
||||
'--metadata', 'estimated_hours=8'
|
||||
],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
const taskId = helpers.extractTaskId(result.stdout);
|
||||
|
||||
// Verify all options were applied
|
||||
const showResult = await helpers.taskMaster('show', [taskId], { cwd: testDir });
|
||||
const listResult = await helpers.taskMaster('list', ['--tag', 'feature-complete'], { cwd: testDir });
|
||||
const tasksData = helpers.readJson(`${testDir}/.taskmaster/tasks/tasks.json`);
|
||||
const task = tasksData.tasks.find(t => t.id === taskId);
|
||||
|
||||
if (!showResult.stdout.includes('medium') && !showResult.stdout.includes('Medium')) {
|
||||
throw new Error('Priority not set');
|
||||
}
|
||||
if (!listResult.stdout.includes(taskId)) {
|
||||
throw new Error('Task not in tag');
|
||||
}
|
||||
if (!showResult.stdout.includes(depTaskId)) {
|
||||
throw new Error('Dependency not set');
|
||||
}
|
||||
if (!task || !task.metadata || task.metadata.complexity !== 'high') {
|
||||
throw new Error('Metadata not set correctly');
|
||||
}
|
||||
});
|
||||
|
||||
// Calculate summary
|
||||
const totalTests = results.tests.length;
|
||||
const passedTests = results.tests.filter(t => t.status === 'passed').length;
|
||||
const failedTests = results.tests.filter(t => t.status === 'failed').length;
|
||||
|
||||
logger.info('\n=== Add-Task Test Summary ===');
|
||||
logger.info(`Total tests: ${totalTests}`);
|
||||
logger.info(`Passed: ${passedTests}`);
|
||||
logger.info(`Failed: ${failedTests}`);
|
||||
|
||||
if (failedTests > 0) {
|
||||
results.status = 'failed';
|
||||
logger.error(`\n${failedTests} tests failed`);
|
||||
} else {
|
||||
logger.success('\n✅ All add-task tests passed!');
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
results.status = 'failed';
|
||||
results.errors.push({
|
||||
test: 'add-task test suite',
|
||||
error: error.message,
|
||||
stack: error.stack
|
||||
});
|
||||
logger.error(`Add-task test suite failed: ${error.message}`);
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
390
tests/e2e/tests/commands/analyze-complexity.test.js
Normal file
390
tests/e2e/tests/commands/analyze-complexity.test.js
Normal file
@@ -0,0 +1,390 @@
|
||||
/**
|
||||
* Comprehensive E2E tests for analyze-complexity command
|
||||
* Tests all aspects of complexity analysis including research mode and output formats
|
||||
*/
|
||||
|
||||
export default async function testAnalyzeComplexity(logger, helpers, context) {
|
||||
const { testDir } = context;
|
||||
const results = {
|
||||
status: 'passed',
|
||||
errors: [],
|
||||
tests: []
|
||||
};
|
||||
|
||||
async function runTest(name, testFn) {
|
||||
try {
|
||||
logger.info(`\nRunning: ${name}`);
|
||||
await testFn();
|
||||
results.tests.push({ name, status: 'passed' });
|
||||
logger.success(`✓ ${name}`);
|
||||
} catch (error) {
|
||||
results.tests.push({ name, status: 'failed', error: error.message });
|
||||
results.errors.push({ test: name, error: error.message });
|
||||
logger.error(`✗ ${name}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
logger.info('Starting comprehensive analyze-complexity tests...');
|
||||
|
||||
// Setup: Create some tasks for analysis
|
||||
logger.info('Setting up test tasks...');
|
||||
const taskIds = [];
|
||||
|
||||
// Create simple task
|
||||
const simple = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--title', 'Simple task', '--description', 'A very simple task'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
taskIds.push(helpers.extractTaskId(simple.stdout));
|
||||
|
||||
// Create complex task with subtasks
|
||||
const complex = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', 'Build a complete e-commerce platform with payment processing'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const complexId = helpers.extractTaskId(complex.stdout);
|
||||
taskIds.push(complexId);
|
||||
|
||||
// Expand complex task to add subtasks
|
||||
await helpers.taskMaster('expand', [complexId], { cwd: testDir });
|
||||
|
||||
// Create task with dependencies
|
||||
const withDeps = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--title', 'Deployment task', '--depends-on', taskIds[0]],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
taskIds.push(helpers.extractTaskId(withDeps.stdout));
|
||||
|
||||
// Test 1: Basic complexity analysis
|
||||
await runTest('Basic complexity analysis', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'analyze-complexity',
|
||||
[],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
// Check for basic output
|
||||
if (!result.stdout.includes('Complexity') && !result.stdout.includes('complexity')) {
|
||||
throw new Error('Output does not contain complexity information');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 2: Complexity analysis with research flag
|
||||
await runTest('Complexity analysis with --research', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'analyze-complexity',
|
||||
['--research'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
// Research mode should provide more detailed analysis
|
||||
if (!result.stdout.includes('Complexity') && !result.stdout.includes('complexity')) {
|
||||
throw new Error('Research mode did not provide complexity analysis');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 3: Complexity analysis with custom output file
|
||||
await runTest('Complexity analysis with custom output', async () => {
|
||||
const outputPath = '.taskmaster/reports/custom-complexity.json';
|
||||
const result = await helpers.taskMaster(
|
||||
'analyze-complexity',
|
||||
['--output', outputPath],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
// Verify file was created
|
||||
const fullPath = `${testDir}/${outputPath}`;
|
||||
if (!helpers.fileExists(fullPath)) {
|
||||
throw new Error('Custom output file was not created');
|
||||
}
|
||||
// Verify it's valid JSON
|
||||
const report = helpers.readJson(fullPath);
|
||||
if (!report || typeof report !== 'object') {
|
||||
throw new Error('Output file is not valid JSON');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 4: Complexity analysis for specific tasks
|
||||
await runTest('Complexity analysis for specific tasks', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'analyze-complexity',
|
||||
['--tasks', taskIds.join(',')],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
// Should analyze only specified tasks
|
||||
for (const taskId of taskIds) {
|
||||
if (!result.stdout.includes(taskId)) {
|
||||
throw new Error(`Task ${taskId} not included in analysis`);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Test 5: Complexity analysis with custom thresholds
|
||||
await runTest('Complexity analysis with custom thresholds', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'analyze-complexity',
|
||||
['--low-threshold', '3', '--medium-threshold', '7', '--high-threshold', '10'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
// Output should reflect custom thresholds
|
||||
if (!result.stdout.includes('low') || !result.stdout.includes('medium') || !result.stdout.includes('high')) {
|
||||
throw new Error('Custom thresholds not reflected in output');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 6: Complexity analysis with JSON output format
|
||||
await runTest('Complexity analysis with JSON format', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'analyze-complexity',
|
||||
['--format', 'json'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
// Output should be valid JSON
|
||||
try {
|
||||
const parsed = JSON.parse(result.stdout);
|
||||
if (!parsed || typeof parsed !== 'object') {
|
||||
throw new Error('Output is not valid JSON object');
|
||||
}
|
||||
} catch (e) {
|
||||
throw new Error('Output is not valid JSON format');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 7: Complexity analysis with detailed breakdown
|
||||
await runTest('Complexity analysis with --detailed flag', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'analyze-complexity',
|
||||
['--detailed'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
// Should include detailed breakdown
|
||||
const expectedDetails = ['subtasks', 'dependencies', 'description', 'metadata'];
|
||||
const foundDetails = expectedDetails.filter(detail =>
|
||||
result.stdout.toLowerCase().includes(detail)
|
||||
);
|
||||
if (foundDetails.length < 2) {
|
||||
throw new Error('Detailed breakdown not comprehensive enough');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 8: Complexity analysis for empty project
|
||||
await runTest('Complexity analysis with no tasks', async () => {
|
||||
// Create a new temp directory
|
||||
const emptyDir = `${testDir}_empty`;
|
||||
await helpers.executeCommand('mkdir', ['-p', emptyDir]);
|
||||
await helpers.taskMaster('init', ['-y'], { cwd: emptyDir });
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'analyze-complexity',
|
||||
[],
|
||||
{ cwd: emptyDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
// Should handle empty project gracefully
|
||||
if (!result.stdout.includes('No tasks') && !result.stdout.includes('0')) {
|
||||
throw new Error('Empty project not handled gracefully');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 9: Complexity analysis with tag filter
|
||||
await runTest('Complexity analysis filtered by tag', async () => {
|
||||
// Create tag and tagged task
|
||||
await helpers.taskMaster('add-tag', ['complex-tag'], { cwd: testDir });
|
||||
const taggedResult = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--title', 'Tagged complex task', '--tag', 'complex-tag'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const taggedId = helpers.extractTaskId(taggedResult.stdout);
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'analyze-complexity',
|
||||
['--tag', 'complex-tag'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
// Should only analyze tagged tasks
|
||||
if (!result.stdout.includes(taggedId)) {
|
||||
throw new Error('Tagged task not included in filtered analysis');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 10: Complexity analysis with status filter
|
||||
await runTest('Complexity analysis filtered by status', async () => {
|
||||
// Set one task to completed
|
||||
await helpers.taskMaster('set-status', [taskIds[0], 'completed'], { cwd: testDir });
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'analyze-complexity',
|
||||
['--status', 'pending'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
// Should not include completed task
|
||||
if (result.stdout.includes(taskIds[0])) {
|
||||
throw new Error('Completed task included in pending-only analysis');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 11: Generate complexity report command
|
||||
await runTest('Generate complexity report', async () => {
|
||||
// First run analyze-complexity to generate data
|
||||
await helpers.taskMaster(
|
||||
'analyze-complexity',
|
||||
['--output', '.taskmaster/reports/task-complexity-report.json'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'complexity-report',
|
||||
[],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
// Should display report
|
||||
if (!result.stdout.includes('Complexity Report') && !result.stdout.includes('complexity')) {
|
||||
throw new Error('Complexity report not displayed');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 12: Error handling - invalid threshold values
|
||||
await runTest('Error handling - invalid thresholds', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'analyze-complexity',
|
||||
['--low-threshold', '-1'],
|
||||
{ cwd: testDir, allowFailure: true }
|
||||
);
|
||||
if (result.exitCode === 0) {
|
||||
throw new Error('Should have failed with negative threshold');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 13: Error handling - invalid output path
|
||||
await runTest('Error handling - invalid output path', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'analyze-complexity',
|
||||
['--output', '/invalid/path/report.json'],
|
||||
{ cwd: testDir, allowFailure: true }
|
||||
);
|
||||
if (result.exitCode === 0) {
|
||||
throw new Error('Should have failed with invalid output path');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 14: Performance test - large number of tasks
|
||||
await runTest('Performance - analyze many tasks', async () => {
|
||||
// Create 20 more tasks
|
||||
const promises = [];
|
||||
for (let i = 0; i < 20; i++) {
|
||||
promises.push(
|
||||
helpers.taskMaster(
|
||||
'add-task',
|
||||
['--title', `Performance test task ${i}`],
|
||||
{ cwd: testDir }
|
||||
)
|
||||
);
|
||||
}
|
||||
await Promise.all(promises);
|
||||
|
||||
const startTime = Date.now();
|
||||
const result = await helpers.taskMaster(
|
||||
'analyze-complexity',
|
||||
[],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const duration = Date.now() - startTime;
|
||||
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
// Should complete in reasonable time (< 10 seconds)
|
||||
if (duration > 10000) {
|
||||
throw new Error(`Analysis took too long: ${duration}ms`);
|
||||
}
|
||||
logger.info(`Analyzed ~25 tasks in ${duration}ms`);
|
||||
});
|
||||
|
||||
// Test 15: Verify complexity scoring algorithm
|
||||
await runTest('Verify complexity scoring accuracy', async () => {
|
||||
// The complex task with subtasks should have higher score than simple task
|
||||
const result = await helpers.taskMaster(
|
||||
'analyze-complexity',
|
||||
['--format', 'json'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
const analysis = JSON.parse(result.stdout);
|
||||
const simpleTask = analysis.tasks?.find(t => t.id === taskIds[0]);
|
||||
const complexTask = analysis.tasks?.find(t => t.id === taskIds[1]);
|
||||
|
||||
if (!simpleTask || !complexTask) {
|
||||
throw new Error('Could not find tasks in analysis');
|
||||
}
|
||||
|
||||
if (simpleTask.complexity >= complexTask.complexity) {
|
||||
throw new Error('Complex task should have higher complexity score than simple task');
|
||||
}
|
||||
});
|
||||
|
||||
// Calculate summary
|
||||
const totalTests = results.tests.length;
|
||||
const passedTests = results.tests.filter(t => t.status === 'passed').length;
|
||||
const failedTests = results.tests.filter(t => t.status === 'failed').length;
|
||||
|
||||
logger.info('\n=== Analyze-Complexity Test Summary ===');
|
||||
logger.info(`Total tests: ${totalTests}`);
|
||||
logger.info(`Passed: ${passedTests}`);
|
||||
logger.info(`Failed: ${failedTests}`);
|
||||
|
||||
if (failedTests > 0) {
|
||||
results.status = 'failed';
|
||||
logger.error(`\n${failedTests} tests failed`);
|
||||
} else {
|
||||
logger.success('\n✅ All analyze-complexity tests passed!');
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
results.status = 'failed';
|
||||
results.errors.push({
|
||||
test: 'analyze-complexity test suite',
|
||||
error: error.message,
|
||||
stack: error.stack
|
||||
});
|
||||
logger.error(`Analyze-complexity test suite failed: ${error.message}`);
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
492
tests/e2e/tests/commands/expand-task.test.js
Normal file
492
tests/e2e/tests/commands/expand-task.test.js
Normal file
@@ -0,0 +1,492 @@
|
||||
/**
|
||||
* Comprehensive E2E tests for expand-task command
|
||||
* Tests all aspects of task expansion including single, multiple, and recursive expansion
|
||||
*/
|
||||
|
||||
export default async function testExpandTask(logger, helpers, context) {
|
||||
const { testDir } = context;
|
||||
const results = {
|
||||
status: 'passed',
|
||||
errors: [],
|
||||
tests: []
|
||||
};
|
||||
|
||||
async function runTest(name, testFn) {
|
||||
try {
|
||||
logger.info(`\nRunning: ${name}`);
|
||||
await testFn();
|
||||
results.tests.push({ name, status: 'passed' });
|
||||
logger.success(`✓ ${name}`);
|
||||
} catch (error) {
|
||||
results.tests.push({ name, status: 'failed', error: error.message });
|
||||
results.errors.push({ test: name, error: error.message });
|
||||
logger.error(`✗ ${name}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
logger.info('Starting comprehensive expand-task tests...');
|
||||
|
||||
// Setup: Create tasks for expansion testing
|
||||
logger.info('Setting up test tasks...');
|
||||
|
||||
// Create simple task for expansion
|
||||
const simpleResult = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', 'Create a user authentication system'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const simpleTaskId = helpers.extractTaskId(simpleResult.stdout);
|
||||
|
||||
// Create complex task for expansion
|
||||
const complexResult = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', 'Build a full-stack web application with React frontend and Node.js backend'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const complexTaskId = helpers.extractTaskId(complexResult.stdout);
|
||||
|
||||
// Create manual task (no AI prompt)
|
||||
const manualResult = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--title', 'Manual task for expansion', '--description', 'This task needs to be broken down into subtasks'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const manualTaskId = helpers.extractTaskId(manualResult.stdout);
|
||||
|
||||
// Test 1: Single task expansion
|
||||
await runTest('Single task expansion', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'expand',
|
||||
[simpleTaskId],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify subtasks were created
|
||||
const showResult = await helpers.taskMaster('show', [simpleTaskId], { cwd: testDir });
|
||||
if (!showResult.stdout.includes('Subtasks:') && !showResult.stdout.includes('.1')) {
|
||||
throw new Error('No subtasks created during expansion');
|
||||
}
|
||||
|
||||
// Check expansion output mentions subtasks
|
||||
if (!result.stdout.includes('subtask') && !result.stdout.includes('expanded')) {
|
||||
throw new Error('Expansion output does not mention subtasks');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 2: Expansion of already expanded task (should skip)
|
||||
await runTest('Expansion of already expanded task', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'expand',
|
||||
[simpleTaskId],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should indicate task is already expanded
|
||||
if (!result.stdout.includes('already') && !result.stdout.includes('skip')) {
|
||||
throw new Error('Did not indicate task was already expanded');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 3: Force re-expansion with --force
|
||||
await runTest('Force re-expansion', async () => {
|
||||
// Get initial subtask count
|
||||
const beforeShow = await helpers.taskMaster('show', [simpleTaskId], { cwd: testDir });
|
||||
const beforeSubtasks = (beforeShow.stdout.match(/\d+\.\d+/g) || []).length;
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'expand',
|
||||
[simpleTaskId, '--force'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify it actually re-expanded
|
||||
if (!result.stdout.includes('expanded') && !result.stdout.includes('Re-expand')) {
|
||||
throw new Error('Force flag did not trigger re-expansion');
|
||||
}
|
||||
|
||||
// Check if subtasks changed (they might be different)
|
||||
const afterShow = await helpers.taskMaster('show', [simpleTaskId], { cwd: testDir });
|
||||
const afterSubtasks = (afterShow.stdout.match(/\d+\.\d+/g) || []).length;
|
||||
|
||||
if (afterSubtasks === 0) {
|
||||
throw new Error('Force re-expansion removed all subtasks');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 4: Expand multiple tasks
|
||||
await runTest('Expand multiple tasks', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'expand',
|
||||
[complexTaskId, manualTaskId],
|
||||
{ cwd: testDir, timeout: 180000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify both tasks were expanded
|
||||
const showComplex = await helpers.taskMaster('show', [complexTaskId], { cwd: testDir });
|
||||
const showManual = await helpers.taskMaster('show', [manualTaskId], { cwd: testDir });
|
||||
|
||||
if (!showComplex.stdout.includes('Subtasks:')) {
|
||||
throw new Error('Complex task was not expanded');
|
||||
}
|
||||
if (!showManual.stdout.includes('Subtasks:')) {
|
||||
throw new Error('Manual task was not expanded');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 5: Expand all tasks with --all
|
||||
await runTest('Expand all tasks', async () => {
|
||||
// Create a few more tasks
|
||||
await helpers.taskMaster('add-task', ['--prompt', 'Task A for expand all'], { cwd: testDir });
|
||||
await helpers.taskMaster('add-task', ['--prompt', 'Task B for expand all'], { cwd: testDir });
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'expand',
|
||||
['--all'],
|
||||
{ cwd: testDir, timeout: 240000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should mention expanding multiple tasks
|
||||
if (!result.stdout.includes('Expand') || !result.stdout.includes('all')) {
|
||||
throw new Error('Expand all did not indicate it was processing all tasks');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 6: Error handling - invalid task ID
|
||||
await runTest('Error handling - invalid task ID', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'expand',
|
||||
['99999'],
|
||||
{ cwd: testDir, allowFailure: true }
|
||||
);
|
||||
if (result.exitCode === 0) {
|
||||
throw new Error('Should have failed with invalid task ID');
|
||||
}
|
||||
if (!result.stderr.includes('not found') && !result.stderr.includes('invalid')) {
|
||||
throw new Error('Error message does not indicate task not found');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 7: Expansion quality verification
|
||||
await runTest('Expansion quality - relevant subtasks', async () => {
|
||||
// Create a specific task
|
||||
const specificResult = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', 'Implement user login with email and password'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const specificTaskId = helpers.extractTaskId(specificResult.stdout);
|
||||
|
||||
// Expand it
|
||||
await helpers.taskMaster('expand', [specificTaskId], { cwd: testDir, timeout: 120000 });
|
||||
|
||||
// Check subtasks are relevant
|
||||
const showResult = await helpers.taskMaster('show', [specificTaskId], { cwd: testDir });
|
||||
const subtaskText = showResult.stdout.toLowerCase();
|
||||
|
||||
// Should have subtasks related to login functionality
|
||||
const relevantKeywords = ['email', 'password', 'validation', 'auth', 'login', 'user', 'security'];
|
||||
const foundKeywords = relevantKeywords.filter(keyword => subtaskText.includes(keyword));
|
||||
|
||||
if (foundKeywords.length < 3) {
|
||||
throw new Error('Subtasks do not seem relevant to user login task');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 8: Recursive expansion of subtasks
|
||||
await runTest('Recursive expansion with --recursive', async () => {
|
||||
// Create task for recursive expansion
|
||||
const recursiveResult = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', 'Build a complete project management system'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const recursiveTaskId = helpers.extractTaskId(recursiveResult.stdout);
|
||||
|
||||
// First expand the main task
|
||||
await helpers.taskMaster('expand', [recursiveTaskId], { cwd: testDir, timeout: 120000 });
|
||||
|
||||
// Now expand recursively
|
||||
const result = await helpers.taskMaster(
|
||||
'expand',
|
||||
[recursiveTaskId, '--recursive'],
|
||||
{ cwd: testDir, timeout: 180000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Check for nested subtasks (e.g., 1.1.1)
|
||||
const showResult = await helpers.taskMaster('show', [recursiveTaskId], { cwd: testDir });
|
||||
if (!showResult.stdout.match(/\d+\.\d+\.\d+/)) {
|
||||
throw new Error('Recursive expansion did not create nested subtasks');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 9: Expand with depth limit
|
||||
await runTest('Expand with depth limit', async () => {
|
||||
// Create task for depth testing
|
||||
const depthResult = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', 'Create a mobile application'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const depthTaskId = helpers.extractTaskId(depthResult.stdout);
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'expand',
|
||||
[depthTaskId, '--depth', '2'],
|
||||
{ cwd: testDir, timeout: 180000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should have subtasks but not too deep
|
||||
const showResult = await helpers.taskMaster('show', [depthTaskId], { cwd: testDir });
|
||||
const hasLevel1 = showResult.stdout.match(/\d+\.1/);
|
||||
const hasLevel2 = showResult.stdout.match(/\d+\.1\.1/);
|
||||
const hasLevel3 = showResult.stdout.match(/\d+\.1\.1\.1/);
|
||||
|
||||
if (!hasLevel1) {
|
||||
throw new Error('No level 1 subtasks created');
|
||||
}
|
||||
if (hasLevel3) {
|
||||
throw new Error('Depth limit not respected - found level 3 subtasks');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 10: Expand task with existing subtasks
|
||||
await runTest('Expand task with manual subtasks', async () => {
|
||||
// Create task and add manual subtask
|
||||
const mixedResult = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--title', 'Mixed subtasks task'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const mixedTaskId = helpers.extractTaskId(mixedResult.stdout);
|
||||
|
||||
// Add manual subtask
|
||||
await helpers.taskMaster(
|
||||
'add-subtask',
|
||||
[mixedTaskId, 'Manual subtask 1'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
|
||||
// Now expand with AI
|
||||
const result = await helpers.taskMaster(
|
||||
'expand',
|
||||
[mixedTaskId],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should preserve manual subtask and add AI ones
|
||||
const showResult = await helpers.taskMaster('show', [mixedTaskId], { cwd: testDir });
|
||||
if (!showResult.stdout.includes('Manual subtask 1')) {
|
||||
throw new Error('Manual subtask was removed during expansion');
|
||||
}
|
||||
|
||||
// Count total subtasks - should be more than 1
|
||||
const subtaskCount = (showResult.stdout.match(/\d+\.\d+/g) || []).length;
|
||||
if (subtaskCount <= 1) {
|
||||
throw new Error('AI did not add additional subtasks');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 11: Expand with custom prompt
|
||||
await runTest('Expand with custom prompt', async () => {
|
||||
// Create task
|
||||
const customResult = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--title', 'Generic development task'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const customTaskId = helpers.extractTaskId(customResult.stdout);
|
||||
|
||||
// Expand with custom instructions
|
||||
const result = await helpers.taskMaster(
|
||||
'expand',
|
||||
[customTaskId, '--prompt', 'Break this down focusing on security aspects'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify subtasks focus on security
|
||||
const showResult = await helpers.taskMaster('show', [customTaskId], { cwd: testDir });
|
||||
const subtaskText = showResult.stdout.toLowerCase();
|
||||
|
||||
if (!subtaskText.includes('security') && !subtaskText.includes('secure') &&
|
||||
!subtaskText.includes('auth') && !subtaskText.includes('protect')) {
|
||||
throw new Error('Custom prompt did not influence subtask generation');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 12: Performance - expand large task
|
||||
await runTest('Performance - expand complex task', async () => {
|
||||
const perfResult = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', 'Build a complete enterprise resource planning (ERP) system with all modules'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const perfTaskId = helpers.extractTaskId(perfResult.stdout);
|
||||
|
||||
const startTime = Date.now();
|
||||
const result = await helpers.taskMaster(
|
||||
'expand',
|
||||
[perfTaskId],
|
||||
{ cwd: testDir, timeout: 180000 }
|
||||
);
|
||||
const duration = Date.now() - startTime;
|
||||
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
logger.info(`Complex task expanded in ${duration}ms`);
|
||||
|
||||
// Should create many subtasks for complex task
|
||||
const showResult = await helpers.taskMaster('show', [perfTaskId], { cwd: testDir });
|
||||
const subtaskCount = (showResult.stdout.match(/\d+\.\d+/g) || []).length;
|
||||
|
||||
if (subtaskCount < 5) {
|
||||
throw new Error('Complex task should have generated more subtasks');
|
||||
}
|
||||
logger.info(`Generated ${subtaskCount} subtasks`);
|
||||
});
|
||||
|
||||
// Test 13: Expand with tag context
|
||||
await runTest('Expand within tag context', async () => {
|
||||
// Create tag and task
|
||||
await helpers.taskMaster('add-tag', ['frontend-expansion'], { cwd: testDir });
|
||||
const taggedResult = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', 'Create UI components', '--tag', 'frontend-expansion'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const taggedTaskId = helpers.extractTaskId(taggedResult.stdout);
|
||||
|
||||
// Expand within tag context
|
||||
const result = await helpers.taskMaster(
|
||||
'expand',
|
||||
[taggedTaskId, '--tag', 'frontend-expansion'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify subtasks inherit tag
|
||||
const listResult = await helpers.taskMaster(
|
||||
'list',
|
||||
['--tag', 'frontend-expansion'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
|
||||
// Should show parent and subtasks in tag
|
||||
const taskMatches = listResult.stdout.match(/\d+(\.\d+)*/g) || [];
|
||||
if (taskMatches.length <= 1) {
|
||||
throw new Error('Subtasks did not inherit tag context');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 14: Expand completed task
|
||||
await runTest('Expand completed task', async () => {
|
||||
// Create and complete a task
|
||||
const completedResult = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--title', 'Completed task'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const completedTaskId = helpers.extractTaskId(completedResult.stdout);
|
||||
await helpers.taskMaster('set-status', [completedTaskId, 'completed'], { cwd: testDir });
|
||||
|
||||
// Try to expand
|
||||
const result = await helpers.taskMaster(
|
||||
'expand',
|
||||
[completedTaskId],
|
||||
{ cwd: testDir, allowFailure: true }
|
||||
);
|
||||
|
||||
// Should either fail or warn about completed status
|
||||
if (result.exitCode === 0 && !result.stdout.includes('completed') && !result.stdout.includes('warning')) {
|
||||
throw new Error('No warning about expanding completed task');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 15: Batch expansion with mixed results
|
||||
await runTest('Batch expansion with mixed results', async () => {
|
||||
// Create tasks in different states
|
||||
const task1 = await helpers.taskMaster('add-task', ['--prompt', 'New task 1'], { cwd: testDir });
|
||||
const taskId1 = helpers.extractTaskId(task1.stdout);
|
||||
|
||||
const task2 = await helpers.taskMaster('add-task', ['--prompt', 'New task 2'], { cwd: testDir });
|
||||
const taskId2 = helpers.extractTaskId(task2.stdout);
|
||||
|
||||
// Expand task2 first
|
||||
await helpers.taskMaster('expand', [taskId2], { cwd: testDir });
|
||||
|
||||
// Now expand both - should skip task2
|
||||
const result = await helpers.taskMaster(
|
||||
'expand',
|
||||
[taskId1, taskId2],
|
||||
{ cwd: testDir, timeout: 180000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should indicate one was skipped
|
||||
if (!result.stdout.includes('skip') || !result.stdout.includes('already')) {
|
||||
throw new Error('Did not indicate that already-expanded task was skipped');
|
||||
}
|
||||
});
|
||||
|
||||
// Calculate summary
|
||||
const totalTests = results.tests.length;
|
||||
const passedTests = results.tests.filter(t => t.status === 'passed').length;
|
||||
const failedTests = results.tests.filter(t => t.status === 'failed').length;
|
||||
|
||||
logger.info('\n=== Expand-Task Test Summary ===');
|
||||
logger.info(`Total tests: ${totalTests}`);
|
||||
logger.info(`Passed: ${passedTests}`);
|
||||
logger.info(`Failed: ${failedTests}`);
|
||||
|
||||
if (failedTests > 0) {
|
||||
results.status = 'failed';
|
||||
logger.error(`\n${failedTests} tests failed`);
|
||||
} else {
|
||||
logger.success('\n✅ All expand-task tests passed!');
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
results.status = 'failed';
|
||||
results.errors.push({
|
||||
test: 'expand-task test suite',
|
||||
error: error.message,
|
||||
stack: error.stack
|
||||
});
|
||||
logger.error(`Expand-task test suite failed: ${error.message}`);
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
787
tests/e2e/tests/commands/parse-prd.test.js
Normal file
787
tests/e2e/tests/commands/parse-prd.test.js
Normal file
@@ -0,0 +1,787 @@
|
||||
/**
|
||||
* Comprehensive E2E tests for parse-prd command
|
||||
* Tests all aspects of PRD parsing including different formats and error handling
|
||||
*/
|
||||
|
||||
export default async function testParsePrd(logger, helpers, context) {
|
||||
const { testDir } = context;
|
||||
const results = {
|
||||
status: 'passed',
|
||||
errors: [],
|
||||
tests: []
|
||||
};
|
||||
|
||||
async function runTest(name, testFn) {
|
||||
try {
|
||||
logger.info(`\nRunning: ${name}`);
|
||||
await testFn();
|
||||
results.tests.push({ name, status: 'passed' });
|
||||
logger.success(`✓ ${name}`);
|
||||
} catch (error) {
|
||||
results.tests.push({ name, status: 'failed', error: error.message });
|
||||
results.errors.push({ test: name, error: error.message });
|
||||
logger.error(`✗ ${name}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
logger.info('Starting comprehensive parse-prd tests...');
|
||||
|
||||
// Test 1: Basic PRD parsing from file
|
||||
await runTest('Basic PRD parsing', async () => {
|
||||
// Create a simple PRD file
|
||||
const prdContent = `# Product Requirements Document
|
||||
|
||||
## Overview
|
||||
Build a task management system for developers.
|
||||
|
||||
## Features
|
||||
1. Create and manage tasks
|
||||
2. Set task dependencies
|
||||
3. Track task status
|
||||
4. Generate reports
|
||||
|
||||
## Technical Requirements
|
||||
- Node.js backend
|
||||
- RESTful API
|
||||
- JSON data storage
|
||||
- CLI interface
|
||||
|
||||
## User Stories
|
||||
As a developer, I want to:
|
||||
- Create tasks quickly from the command line
|
||||
- View my task list with priorities
|
||||
- Mark tasks as complete
|
||||
- See task dependencies`;
|
||||
|
||||
helpers.writeFile(`${testDir}/simple-prd.txt`, prdContent);
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'parse-prd',
|
||||
['simple-prd.txt'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Check for success message
|
||||
if (!result.stdout.includes('task') || !result.stdout.includes('created')) {
|
||||
throw new Error('PRD parsing did not report task creation');
|
||||
}
|
||||
|
||||
// Verify tasks were created
|
||||
const listResult = await helpers.taskMaster('list', [], { cwd: testDir });
|
||||
if (!listResult.stdout.includes('Create and manage tasks') &&
|
||||
!listResult.stdout.includes('task')) {
|
||||
throw new Error('Tasks from PRD not found in task list');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 2: PRD parsing with complex structure
|
||||
await runTest('Complex PRD parsing', async () => {
|
||||
const complexPrd = `<PRD>
|
||||
# E-Commerce Platform PRD
|
||||
|
||||
## Executive Summary
|
||||
A comprehensive e-commerce platform with multi-vendor support.
|
||||
|
||||
## Core Features
|
||||
### User Management
|
||||
- User registration and authentication
|
||||
- Role-based access control
|
||||
- User profiles and preferences
|
||||
|
||||
### Product Catalog
|
||||
- Product listing and search
|
||||
- Categories and filters
|
||||
- Product reviews and ratings
|
||||
|
||||
### Shopping Cart
|
||||
- Add/remove items
|
||||
- Save for later
|
||||
- Apply discount codes
|
||||
|
||||
### Payment Processing
|
||||
- Multiple payment methods
|
||||
- Secure checkout
|
||||
- Order confirmation
|
||||
|
||||
## Technical Architecture
|
||||
### Frontend
|
||||
- React.js with TypeScript
|
||||
- Responsive design
|
||||
- Progressive Web App
|
||||
|
||||
### Backend
|
||||
- Node.js with Express
|
||||
- PostgreSQL database
|
||||
- Redis for caching
|
||||
|
||||
### Infrastructure
|
||||
- Docker containers
|
||||
- Kubernetes orchestration
|
||||
- CI/CD pipeline
|
||||
|
||||
## Development Phases
|
||||
Phase 1: Core infrastructure and user management
|
||||
Phase 2: Product catalog and search
|
||||
Phase 3: Shopping cart and checkout
|
||||
Phase 4: Payment integration
|
||||
Phase 5: Admin dashboard
|
||||
|
||||
## Dependencies
|
||||
- User management must be complete before any other features
|
||||
- Product catalog required before shopping cart
|
||||
- Shopping cart required before payment processing
|
||||
</PRD>`;
|
||||
|
||||
helpers.writeFile(`${testDir}/complex-prd.md`, complexPrd);
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'parse-prd',
|
||||
['complex-prd.md'],
|
||||
{ cwd: testDir, timeout: 180000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should create multiple tasks
|
||||
const taskCountMatch = result.stdout.match(/(\d+) tasks? created/i);
|
||||
if (!taskCountMatch || parseInt(taskCountMatch[1]) < 5) {
|
||||
throw new Error('Complex PRD should create more tasks');
|
||||
}
|
||||
|
||||
// Check for phase-based tasks
|
||||
const listResult = await helpers.taskMaster('list', [], { cwd: testDir });
|
||||
if (!listResult.stdout.includes('Phase') && !listResult.stdout.includes('phase')) {
|
||||
throw new Error('Phase-based tasks not created from PRD');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 3: PRD parsing with custom task template
|
||||
await runTest('PRD parsing with task template', async () => {
|
||||
const templatePrd = `# Project: API Development
|
||||
|
||||
## Tasks
|
||||
[TASK] Design RESTful API endpoints
|
||||
- Define resource models
|
||||
- Document API specifications
|
||||
- Create OpenAPI schema
|
||||
|
||||
[TASK] Implement authentication
|
||||
- JWT token generation
|
||||
- Refresh token mechanism
|
||||
- Role-based permissions
|
||||
|
||||
[TASK] Build core endpoints
|
||||
- CRUD operations for resources
|
||||
- Input validation
|
||||
- Error handling
|
||||
|
||||
[TASK] Add caching layer
|
||||
- Redis integration
|
||||
- Cache invalidation strategy
|
||||
- Performance monitoring`;
|
||||
|
||||
helpers.writeFile(`${testDir}/template-prd.txt`, templatePrd);
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'parse-prd',
|
||||
['template-prd.txt', '--template', '[TASK]'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should recognize custom template
|
||||
const listResult = await helpers.taskMaster('list', [], { cwd: testDir });
|
||||
if (!listResult.stdout.includes('Design RESTful API') ||
|
||||
!listResult.stdout.includes('authentication')) {
|
||||
throw new Error('Custom template tasks not parsed correctly');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 4: Incremental PRD update
|
||||
await runTest('Incremental PRD update', async () => {
|
||||
// First PRD
|
||||
const initialPrd = `# Initial Requirements
|
||||
|
||||
## Phase 1
|
||||
- Setup project structure
|
||||
- Configure development environment`;
|
||||
|
||||
helpers.writeFile(`${testDir}/incremental-prd.txt`, initialPrd);
|
||||
|
||||
// Parse initial PRD
|
||||
await helpers.taskMaster(
|
||||
'parse-prd',
|
||||
['incremental-prd.txt'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
|
||||
// Get initial task count
|
||||
const initialList = await helpers.taskMaster('list', [], { cwd: testDir });
|
||||
const initialTaskCount = (initialList.stdout.match(/\d+\s*\|/g) || []).length;
|
||||
|
||||
// Update PRD with additional content
|
||||
const updatedPrd = `# Initial Requirements
|
||||
|
||||
## Phase 1
|
||||
- Setup project structure
|
||||
- Configure development environment
|
||||
|
||||
## Phase 2 (NEW)
|
||||
- Implement user authentication
|
||||
- Create database schema
|
||||
- Build API endpoints`;
|
||||
|
||||
helpers.writeFile(`${testDir}/incremental-prd.txt`, updatedPrd);
|
||||
|
||||
// Parse updated PRD
|
||||
const result = await helpers.taskMaster(
|
||||
'parse-prd',
|
||||
['incremental-prd.txt', '--update'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should add new tasks without duplicating existing ones
|
||||
const updatedList = await helpers.taskMaster('list', [], { cwd: testDir });
|
||||
const updatedTaskCount = (updatedList.stdout.match(/\d+\s*\|/g) || []).length;
|
||||
|
||||
if (updatedTaskCount <= initialTaskCount) {
|
||||
throw new Error('Incremental update did not add new tasks');
|
||||
}
|
||||
|
||||
if (!updatedList.stdout.includes('authentication') ||
|
||||
!updatedList.stdout.includes('Phase 2')) {
|
||||
throw new Error('New phase tasks not added');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 5: PRD parsing with dependencies
|
||||
await runTest('PRD with explicit dependencies', async () => {
|
||||
const dependencyPrd = `# Project with Dependencies
|
||||
|
||||
## Tasks and Dependencies
|
||||
|
||||
### 1. Database Setup
|
||||
No dependencies
|
||||
|
||||
### 2. User Model
|
||||
Depends on: Database Setup
|
||||
|
||||
### 3. Authentication Service
|
||||
Depends on: User Model
|
||||
|
||||
### 4. API Endpoints
|
||||
Depends on: Authentication Service, User Model
|
||||
|
||||
### 5. Frontend Integration
|
||||
Depends on: API Endpoints
|
||||
|
||||
## Additional Notes
|
||||
Tasks should be completed in dependency order.`;
|
||||
|
||||
helpers.writeFile(`${testDir}/dependency-prd.txt`, dependencyPrd);
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'parse-prd',
|
||||
['dependency-prd.txt'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify dependencies were set
|
||||
const listResult = await helpers.taskMaster('list', [], { cwd: testDir });
|
||||
const taskIds = {};
|
||||
|
||||
// Extract task IDs
|
||||
const lines = listResult.stdout.split('\n');
|
||||
lines.forEach(line => {
|
||||
if (line.includes('Database Setup')) {
|
||||
const match = line.match(/(\d+)\s*\|/);
|
||||
if (match) taskIds.database = match[1];
|
||||
} else if (line.includes('User Model')) {
|
||||
const match = line.match(/(\d+)\s*\|/);
|
||||
if (match) taskIds.userModel = match[1];
|
||||
}
|
||||
});
|
||||
|
||||
// Check dependency relationships
|
||||
if (taskIds.userModel && taskIds.database) {
|
||||
const showResult = await helpers.taskMaster('show', [taskIds.userModel], { cwd: testDir });
|
||||
if (!showResult.stdout.includes(taskIds.database)) {
|
||||
throw new Error('Dependencies not properly set from PRD');
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Test 6: Error handling - non-existent file
|
||||
await runTest('Error handling - non-existent file', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'parse-prd',
|
||||
['non-existent-prd.txt'],
|
||||
{ cwd: testDir, allowFailure: true }
|
||||
);
|
||||
if (result.exitCode === 0) {
|
||||
throw new Error('Should have failed with non-existent file');
|
||||
}
|
||||
if (!result.stderr.includes('not found') && !result.stderr.includes('exist')) {
|
||||
throw new Error('Error message does not indicate file not found');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 7: Error handling - malformed PRD
|
||||
await runTest('Error handling - malformed PRD', async () => {
|
||||
const malformedPrd = `This is not a valid PRD format
|
||||
|
||||
Random text without structure
|
||||
No headers or sections
|
||||
Just plain text`;
|
||||
|
||||
helpers.writeFile(`${testDir}/malformed-prd.txt`, malformedPrd);
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'parse-prd',
|
||||
['malformed-prd.txt'],
|
||||
{ cwd: testDir, allowFailure: true }
|
||||
);
|
||||
|
||||
// Should either fail or create minimal tasks
|
||||
if (result.exitCode === 0) {
|
||||
// If it succeeds, should create at least one task
|
||||
const listResult = await helpers.taskMaster('list', [], { cwd: testDir });
|
||||
const taskCount = (listResult.stdout.match(/\d+\s*\|/g) || []).length;
|
||||
if (taskCount === 0) {
|
||||
throw new Error('No tasks created from malformed PRD');
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Test 8: PRD parsing with different formats
|
||||
await runTest('PRD parsing - JSON format', async () => {
|
||||
const jsonPrd = {
|
||||
"project": "Mobile App Development",
|
||||
"features": [
|
||||
{
|
||||
"name": "User Authentication",
|
||||
"tasks": [
|
||||
"Design login UI",
|
||||
"Implement OAuth integration",
|
||||
"Add biometric authentication"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Data Synchronization",
|
||||
"tasks": [
|
||||
"Implement offline mode",
|
||||
"Create sync engine",
|
||||
"Handle conflict resolution"
|
||||
]
|
||||
}
|
||||
],
|
||||
"technical": {
|
||||
"platform": "React Native",
|
||||
"backend": "Firebase"
|
||||
}
|
||||
};
|
||||
|
||||
helpers.writeFile(`${testDir}/json-prd.json`, JSON.stringify(jsonPrd, null, 2));
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'parse-prd',
|
||||
['json-prd.json'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify tasks from JSON were created
|
||||
const listResult = await helpers.taskMaster('list', [], { cwd: testDir });
|
||||
if (!listResult.stdout.includes('Authentication') ||
|
||||
!listResult.stdout.includes('Synchronization')) {
|
||||
throw new Error('JSON PRD features not parsed correctly');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 9: PRD with markdown formatting
|
||||
await runTest('PRD with rich markdown', async () => {
|
||||
const markdownPrd = `# **Project**: Developer Tools Suite
|
||||
|
||||
## 🎯 Goals
|
||||
- Increase developer productivity
|
||||
- Automate repetitive tasks
|
||||
- Improve code quality
|
||||
|
||||
## 📋 Features
|
||||
|
||||
### Code Analysis Tool
|
||||
- [ ] Static code analysis
|
||||
- [ ] Security vulnerability scanning
|
||||
- [ ] Performance profiling
|
||||
- [ ] Code complexity metrics
|
||||
|
||||
### Documentation Generator
|
||||
1. **Auto-generate API docs** from code comments
|
||||
2. **Create architecture diagrams** from codebase
|
||||
3. **Generate changelog** from git history
|
||||
|
||||
### Testing Framework
|
||||
| Feature | Priority | Effort |
|
||||
|---------|----------|--------|
|
||||
| Unit test generation | High | Medium |
|
||||
| Integration test templates | Medium | Low |
|
||||
| Load testing suite | Low | High |
|
||||
|
||||
## 🔗 Links
|
||||
- [Design Docs](https://example.com/design)
|
||||
- [API Specs](https://example.com/api)
|
||||
|
||||
## ⚠️ Constraints
|
||||
- Must support multiple programming languages
|
||||
- Should integrate with existing CI/CD pipelines
|
||||
- Performance impact < 5% on build times`;
|
||||
|
||||
helpers.writeFile(`${testDir}/markdown-prd.md`, markdownPrd);
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'parse-prd',
|
||||
['markdown-prd.md'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Check parsing handled markdown elements
|
||||
const listResult = await helpers.taskMaster('list', [], { cwd: testDir });
|
||||
if (!listResult.stdout.includes('Code Analysis') ||
|
||||
!listResult.stdout.includes('Documentation Generator')) {
|
||||
throw new Error('Markdown formatting interfered with parsing');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 10: Large PRD performance test
|
||||
await runTest('Performance - large PRD', async () => {
|
||||
// Generate a large PRD
|
||||
let largePrd = `# Large Enterprise System PRD\n\n`;
|
||||
|
||||
for (let i = 1; i <= 20; i++) {
|
||||
largePrd += `## Module ${i}: ${['User', 'Product', 'Order', 'Payment', 'Shipping'][i % 5]} Management\n\n`;
|
||||
largePrd += `### Features\n`;
|
||||
for (let j = 1; j <= 5; j++) {
|
||||
largePrd += `- Feature ${i}.${j}: Implement ${['CRUD', 'Search', 'Filter', 'Export', 'Import'][j-1]} functionality\n`;
|
||||
}
|
||||
largePrd += `\n### Technical Requirements\n`;
|
||||
largePrd += `- Database tables for module ${i}\n`;
|
||||
largePrd += `- API endpoints for module ${i}\n`;
|
||||
largePrd += `- Unit tests for module ${i}\n\n`;
|
||||
}
|
||||
|
||||
helpers.writeFile(`${testDir}/large-prd.txt`, largePrd);
|
||||
|
||||
const startTime = Date.now();
|
||||
const result = await helpers.taskMaster(
|
||||
'parse-prd',
|
||||
['large-prd.txt'],
|
||||
{ cwd: testDir, timeout: 300000 }
|
||||
);
|
||||
const duration = Date.now() - startTime;
|
||||
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
logger.info(`Large PRD parsed in ${duration}ms`);
|
||||
|
||||
// Should create many tasks
|
||||
const taskCountMatch = result.stdout.match(/(\d+) tasks? created/i);
|
||||
const taskCount = taskCountMatch ? parseInt(taskCountMatch[1]) : 0;
|
||||
|
||||
if (taskCount < 20) {
|
||||
throw new Error(`Large PRD should create more tasks (got ${taskCount})`);
|
||||
}
|
||||
logger.info(`Created ${taskCount} tasks from large PRD`);
|
||||
});
|
||||
|
||||
// Test 11: PRD with images and diagrams references
|
||||
await runTest('PRD with external references', async () => {
|
||||
const referencePrd = `# System Architecture PRD
|
||||
|
||||
## Overview
|
||||
See architecture diagram: 
|
||||
|
||||
## Features
|
||||
Based on the wireframes in /designs/wireframes/:
|
||||
|
||||
1. Dashboard (see dashboard-wireframe.png)
|
||||
- Real-time metrics display
|
||||
- Customizable widgets
|
||||
- Export functionality
|
||||
|
||||
2. User Management (see user-flow.pdf)
|
||||
- CRUD operations
|
||||
- Role assignment
|
||||
- Activity logging
|
||||
|
||||
## API Design
|
||||
Refer to swagger.yaml for detailed API specifications.
|
||||
|
||||
## Database Schema
|
||||
See database-schema.sql for table definitions.`;
|
||||
|
||||
helpers.writeFile(`${testDir}/reference-prd.md`, referencePrd);
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'parse-prd',
|
||||
['reference-prd.md'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should parse content despite external references
|
||||
const listResult = await helpers.taskMaster('list', [], { cwd: testDir });
|
||||
if (!listResult.stdout.includes('Dashboard') ||
|
||||
!listResult.stdout.includes('User Management')) {
|
||||
throw new Error('Failed to parse PRD with external references');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 12: PRD parsing with priority hints
|
||||
await runTest('PRD with priority indicators', async () => {
|
||||
const priorityPrd = `# Project Roadmap
|
||||
|
||||
## Critical Features (P0)
|
||||
- Security authentication system
|
||||
- Data encryption at rest
|
||||
- Audit logging
|
||||
|
||||
## High Priority (P1)
|
||||
- User dashboard
|
||||
- Reporting module
|
||||
- API rate limiting
|
||||
|
||||
## Medium Priority (P2)
|
||||
- Dark mode support
|
||||
- Export to PDF
|
||||
- Batch operations
|
||||
|
||||
## Nice to Have (P3)
|
||||
- Theme customization
|
||||
- Advanced analytics
|
||||
- Third-party integrations`;
|
||||
|
||||
helpers.writeFile(`${testDir}/priority-prd.txt`, priorityPrd);
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'parse-prd',
|
||||
['priority-prd.txt'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Check if priorities were recognized
|
||||
// Get task details to verify priority assignment
|
||||
const listResult = await helpers.taskMaster('list', [], { cwd: testDir });
|
||||
|
||||
// Find a critical task
|
||||
const lines = listResult.stdout.split('\n');
|
||||
let criticalTaskId = null;
|
||||
lines.forEach(line => {
|
||||
if (line.includes('Security authentication') || line.includes('encryption')) {
|
||||
const match = line.match(/(\d+)\s*\|/);
|
||||
if (match) criticalTaskId = match[1];
|
||||
}
|
||||
});
|
||||
|
||||
if (criticalTaskId) {
|
||||
const showResult = await helpers.taskMaster('show', [criticalTaskId], { cwd: testDir });
|
||||
// Check if it has high priority
|
||||
if (!showResult.stdout.includes('high') && !showResult.stdout.includes('High')) {
|
||||
logger.warning('Critical tasks may not have been assigned high priority');
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Test 13: Multiple PRD files
|
||||
await runTest('Parse multiple PRD files', async () => {
|
||||
// Create multiple PRD files
|
||||
const prd1 = `# Frontend Requirements
|
||||
- Build responsive UI
|
||||
- Implement state management
|
||||
- Add unit tests`;
|
||||
|
||||
const prd2 = `# Backend Requirements
|
||||
- Design REST API
|
||||
- Setup database
|
||||
- Implement caching`;
|
||||
|
||||
helpers.writeFile(`${testDir}/frontend-prd.txt`, prd1);
|
||||
helpers.writeFile(`${testDir}/backend-prd.txt`, prd2);
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'parse-prd',
|
||||
['frontend-prd.txt', 'backend-prd.txt'],
|
||||
{ cwd: testDir, timeout: 180000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should create tasks from both files
|
||||
const listResult = await helpers.taskMaster('list', [], { cwd: testDir });
|
||||
if (!listResult.stdout.includes('responsive UI') ||
|
||||
!listResult.stdout.includes('REST API')) {
|
||||
throw new Error('Not all PRD files were parsed');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 14: PRD with code blocks
|
||||
await runTest('PRD with code examples', async () => {
|
||||
const codePrd = `# Technical Implementation PRD
|
||||
|
||||
## Authentication Module
|
||||
|
||||
Implement JWT-based authentication with the following structure:
|
||||
|
||||
\`\`\`javascript
|
||||
// Expected token payload
|
||||
{
|
||||
userId: string,
|
||||
email: string,
|
||||
roles: string[],
|
||||
exp: number
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
### Tasks:
|
||||
1. Create token generation service
|
||||
2. Implement token validation middleware
|
||||
3. Add refresh token mechanism
|
||||
|
||||
## Database Schema
|
||||
|
||||
\`\`\`sql
|
||||
CREATE TABLE users (
|
||||
id UUID PRIMARY KEY,
|
||||
email VARCHAR(255) UNIQUE NOT NULL,
|
||||
password_hash VARCHAR(255) NOT NULL,
|
||||
created_at TIMESTAMP DEFAULT NOW()
|
||||
);
|
||||
\`\`\`
|
||||
|
||||
### Tasks:
|
||||
1. Create database migrations
|
||||
2. Add indexes for performance
|
||||
3. Implement data validation`;
|
||||
|
||||
helpers.writeFile(`${testDir}/code-prd.md`, codePrd);
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'parse-prd',
|
||||
['code-prd.md'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should parse tasks despite code blocks
|
||||
const listResult = await helpers.taskMaster('list', [], { cwd: testDir });
|
||||
if (!listResult.stdout.includes('token generation') ||
|
||||
!listResult.stdout.includes('database migrations')) {
|
||||
throw new Error('Code blocks interfered with task parsing');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 15: PRD parsing with auto-grouping
|
||||
await runTest('PRD auto-grouping into epics', async () => {
|
||||
const epicPrd = `# E-Learning Platform
|
||||
|
||||
## User Management Epic
|
||||
- User registration and profiles
|
||||
- Role-based access control
|
||||
- Social login integration
|
||||
- Password reset functionality
|
||||
|
||||
## Course Management Epic
|
||||
- Course creation tools
|
||||
- Video upload and processing
|
||||
- Quiz and assignment builder
|
||||
- Progress tracking
|
||||
|
||||
## Payment Processing Epic
|
||||
- Subscription management
|
||||
- Payment gateway integration
|
||||
- Invoice generation
|
||||
- Refund processing
|
||||
|
||||
## Analytics Epic
|
||||
- User engagement metrics
|
||||
- Course completion rates
|
||||
- Revenue analytics
|
||||
- Custom reports`;
|
||||
|
||||
helpers.writeFile(`${testDir}/epic-prd.txt`, epicPrd);
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'parse-prd',
|
||||
['epic-prd.txt', '--group-by-section'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should create grouped tasks
|
||||
const listResult = await helpers.taskMaster('list', [], { cwd: testDir });
|
||||
|
||||
// Check for epic grouping (tasks might have similar IDs or tags)
|
||||
if (!listResult.stdout.includes('User Management') ||
|
||||
!listResult.stdout.includes('Course Management')) {
|
||||
throw new Error('Epic grouping not reflected in tasks');
|
||||
}
|
||||
});
|
||||
|
||||
// Calculate summary
|
||||
const totalTests = results.tests.length;
|
||||
const passedTests = results.tests.filter(t => t.status === 'passed').length;
|
||||
const failedTests = results.tests.filter(t => t.status === 'failed').length;
|
||||
|
||||
logger.info('\n=== Parse-PRD Test Summary ===');
|
||||
logger.info(`Total tests: ${totalTests}`);
|
||||
logger.info(`Passed: ${passedTests}`);
|
||||
logger.info(`Failed: ${failedTests}`);
|
||||
|
||||
if (failedTests > 0) {
|
||||
results.status = 'failed';
|
||||
logger.error(`\n${failedTests} tests failed`);
|
||||
} else {
|
||||
logger.success('\n✅ All parse-prd tests passed!');
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
results.status = 'failed';
|
||||
results.errors.push({
|
||||
test: 'parse-prd test suite',
|
||||
error: error.message,
|
||||
stack: error.stack
|
||||
});
|
||||
logger.error(`Parse-prd test suite failed: ${error.message}`);
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
496
tests/e2e/tests/commands/research-save.test.js
Normal file
496
tests/e2e/tests/commands/research-save.test.js
Normal file
@@ -0,0 +1,496 @@
|
||||
/**
|
||||
* Comprehensive E2E tests for research-save command
|
||||
* Tests all aspects of saving research results to files and knowledge base
|
||||
*/
|
||||
|
||||
export default async function testResearchSave(logger, helpers, context) {
|
||||
const { testDir } = context;
|
||||
const results = {
|
||||
status: 'passed',
|
||||
errors: [],
|
||||
tests: []
|
||||
};
|
||||
|
||||
async function runTest(name, testFn) {
|
||||
try {
|
||||
logger.info(`\nRunning: ${name}`);
|
||||
await testFn();
|
||||
results.tests.push({ name, status: 'passed' });
|
||||
logger.success(`✓ ${name}`);
|
||||
} catch (error) {
|
||||
results.tests.push({ name, status: 'failed', error: error.message });
|
||||
results.errors.push({ test: name, error: error.message });
|
||||
logger.error(`✗ ${name}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
logger.info('Starting comprehensive research-save tests...');
|
||||
|
||||
// Test 1: Basic research and save
|
||||
await runTest('Basic research and save', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'research-save',
|
||||
['How to implement OAuth 2.0 in Node.js', '--output', 'oauth-guide.md'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify file was created
|
||||
const outputPath = `${testDir}/oauth-guide.md`;
|
||||
if (!helpers.fileExists(outputPath)) {
|
||||
throw new Error('Research output file was not created');
|
||||
}
|
||||
|
||||
// Check file content
|
||||
const content = helpers.readFile(outputPath);
|
||||
if (!content.includes('OAuth') || !content.includes('Node.js')) {
|
||||
throw new Error('Saved research does not contain expected content');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 2: Research save with task context
|
||||
await runTest('Research save with task context', async () => {
|
||||
// Create a task
|
||||
const taskResult = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--title', 'Implement secure API authentication'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const taskId = helpers.extractTaskId(taskResult.stdout);
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'research-save',
|
||||
['--task', taskId, 'JWT vs OAuth comparison for REST APIs', '--output', 'auth-research.md'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Check saved content includes task context
|
||||
const content = helpers.readFile(`${testDir}/auth-research.md`);
|
||||
if (!content.includes('JWT') || !content.includes('OAuth')) {
|
||||
throw new Error('Research does not cover requested topics');
|
||||
}
|
||||
|
||||
// Should reference the task
|
||||
if (!content.includes(taskId) && !content.includes('Task #')) {
|
||||
throw new Error('Saved research does not reference the task context');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 3: Research save to knowledge base
|
||||
await runTest('Save to knowledge base', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'research-save',
|
||||
['Database indexing strategies', '--knowledge-base', '--category', 'database'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Check knowledge base directory
|
||||
const kbPath = `${testDir}/.taskmaster/knowledge-base/database`;
|
||||
if (!helpers.fileExists(kbPath)) {
|
||||
throw new Error('Knowledge base category directory not created');
|
||||
}
|
||||
|
||||
// Should create a file with timestamp or ID
|
||||
const files = helpers.listFiles(kbPath);
|
||||
if (files.length === 0) {
|
||||
throw new Error('No files created in knowledge base');
|
||||
}
|
||||
|
||||
// Verify content
|
||||
const savedFile = files[0];
|
||||
const content = helpers.readFile(`${kbPath}/${savedFile}`);
|
||||
if (!content.includes('index') || !content.includes('database')) {
|
||||
throw new Error('Knowledge base entry lacks expected content');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 4: Research save with custom format
|
||||
await runTest('Save with custom format', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'research-save',
|
||||
['React performance optimization', '--output', 'react-perf.json', '--format', 'json'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify JSON format
|
||||
const content = helpers.readFile(`${testDir}/react-perf.json`);
|
||||
let parsed;
|
||||
try {
|
||||
parsed = JSON.parse(content);
|
||||
} catch (e) {
|
||||
throw new Error('Output is not valid JSON');
|
||||
}
|
||||
|
||||
// Check JSON structure
|
||||
if (!parsed.topic || !parsed.content || !parsed.timestamp) {
|
||||
throw new Error('JSON output missing expected fields');
|
||||
}
|
||||
|
||||
if (!parsed.content.toLowerCase().includes('react') ||
|
||||
!parsed.content.toLowerCase().includes('performance')) {
|
||||
throw new Error('JSON content not relevant to query');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 5: Research save with metadata
|
||||
await runTest('Save with metadata', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'research-save',
|
||||
[
|
||||
'Microservices communication patterns',
|
||||
'--output', 'microservices.md',
|
||||
'--metadata', 'author=TaskMaster',
|
||||
'--metadata', 'tags=architecture,microservices',
|
||||
'--metadata', 'version=1.0'
|
||||
],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Check file content for metadata
|
||||
const content = helpers.readFile(`${testDir}/microservices.md`);
|
||||
|
||||
// Should include metadata in frontmatter or header
|
||||
if (!content.includes('author') && !content.includes('Author')) {
|
||||
throw new Error('Metadata not included in saved file');
|
||||
}
|
||||
|
||||
if (!content.includes('microservice') || !content.includes('communication')) {
|
||||
throw new Error('Research content not relevant');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 6: Append to existing file
|
||||
await runTest('Append to existing research file', async () => {
|
||||
// Create initial file
|
||||
const initialContent = '# API Research\n\n## Previous Research\n\nInitial content here.\n\n';
|
||||
helpers.writeFile(`${testDir}/api-research.md`, initialContent);
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'research-save',
|
||||
['GraphQL schema design best practices', '--output', 'api-research.md', '--append'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Check file was appended
|
||||
const content = helpers.readFile(`${testDir}/api-research.md`);
|
||||
if (!content.includes('Previous Research')) {
|
||||
throw new Error('Original content was overwritten instead of appended');
|
||||
}
|
||||
if (!content.includes('GraphQL') || !content.includes('schema')) {
|
||||
throw new Error('New research not appended');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 7: Research save with references
|
||||
await runTest('Save with source references', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'research-save',
|
||||
['TypeScript decorators guide', '--output', 'decorators.md', '--include-references'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Check for references section
|
||||
const content = helpers.readFile(`${testDir}/decorators.md`);
|
||||
if (!content.includes('TypeScript') || !content.includes('decorator')) {
|
||||
throw new Error('Research content not relevant');
|
||||
}
|
||||
|
||||
// Should include references or sources
|
||||
const hasReferences = content.includes('Reference') ||
|
||||
content.includes('Source') ||
|
||||
content.includes('Further reading') ||
|
||||
content.includes('Links');
|
||||
if (!hasReferences) {
|
||||
throw new Error('No references section included');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 8: Batch research and save
|
||||
await runTest('Batch research topics', async () => {
|
||||
const topics = [
|
||||
'Docker best practices',
|
||||
'Kubernetes deployment strategies',
|
||||
'CI/CD pipeline setup'
|
||||
];
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'research-save',
|
||||
['--batch', '--output-dir', 'devops-research', ...topics],
|
||||
{ cwd: testDir, timeout: 180000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Check directory was created
|
||||
const outputDir = `${testDir}/devops-research`;
|
||||
if (!helpers.fileExists(outputDir)) {
|
||||
throw new Error('Output directory not created');
|
||||
}
|
||||
|
||||
// Should have files for each topic
|
||||
const files = helpers.listFiles(outputDir);
|
||||
if (files.length < topics.length) {
|
||||
throw new Error(`Expected ${topics.length} files, found ${files.length}`);
|
||||
}
|
||||
|
||||
// Verify each file has relevant content
|
||||
let foundDocker = false, foundK8s = false, foundCICD = false;
|
||||
files.forEach(file => {
|
||||
const content = helpers.readFile(`${outputDir}/${file}`).toLowerCase();
|
||||
if (content.includes('docker')) foundDocker = true;
|
||||
if (content.includes('kubernetes')) foundK8s = true;
|
||||
if (content.includes('ci') || content.includes('cd') || content.includes('pipeline')) foundCICD = true;
|
||||
});
|
||||
|
||||
if (!foundDocker || !foundK8s || !foundCICD) {
|
||||
throw new Error('Not all topics were researched and saved');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 9: Research save with template
|
||||
await runTest('Save with custom template', async () => {
|
||||
// Create template file
|
||||
const template = `# {{TOPIC}}
|
||||
|
||||
Date: {{DATE}}
|
||||
Category: {{CATEGORY}}
|
||||
|
||||
## Summary
|
||||
{{SUMMARY}}
|
||||
|
||||
## Detailed Research
|
||||
{{CONTENT}}
|
||||
|
||||
## Key Takeaways
|
||||
{{TAKEAWAYS}}
|
||||
|
||||
## Implementation Notes
|
||||
{{NOTES}}
|
||||
`;
|
||||
helpers.writeFile(`${testDir}/research-template.md`, template);
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'research-save',
|
||||
[
|
||||
'Redis caching strategies',
|
||||
'--output', 'redis-research.md',
|
||||
'--template', 'research-template.md',
|
||||
'--category', 'performance'
|
||||
],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Check template was used
|
||||
const content = helpers.readFile(`${testDir}/redis-research.md`);
|
||||
if (!content.includes('Redis caching strategies')) {
|
||||
throw new Error('Template topic not filled');
|
||||
}
|
||||
if (!content.includes('Category: performance')) {
|
||||
throw new Error('Template category not filled');
|
||||
}
|
||||
if (!content.includes('Key Takeaways') || !content.includes('Implementation Notes')) {
|
||||
throw new Error('Template structure not preserved');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 10: Error handling - invalid output path
|
||||
await runTest('Error handling - invalid output path', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'research-save',
|
||||
['Test topic', '--output', '/invalid/path/file.md'],
|
||||
{ cwd: testDir, allowFailure: true }
|
||||
);
|
||||
if (result.exitCode === 0) {
|
||||
throw new Error('Should have failed with invalid output path');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 11: Research save with task integration
|
||||
await runTest('Save and link to task', async () => {
|
||||
// Create task
|
||||
const taskResult = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--title', 'Implement caching layer'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const taskId = helpers.extractTaskId(taskResult.stdout);
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'research-save',
|
||||
[
|
||||
'--task', taskId,
|
||||
'Caching strategies comparison',
|
||||
'--output', 'caching-research.md',
|
||||
'--link-to-task'
|
||||
],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Check task was updated with research link
|
||||
const showResult = await helpers.taskMaster('show', [taskId], { cwd: testDir });
|
||||
if (!showResult.stdout.includes('caching-research.md') &&
|
||||
!showResult.stdout.includes('Research')) {
|
||||
throw new Error('Task not updated with research link');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 12: Research save with compression
|
||||
await runTest('Save with compression for large research', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'research-save',
|
||||
[
|
||||
'Comprehensive guide to distributed systems',
|
||||
'--output', 'dist-systems.md.gz',
|
||||
'--compress'
|
||||
],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Check compressed file exists
|
||||
const compressedPath = `${testDir}/dist-systems.md.gz`;
|
||||
if (!helpers.fileExists(compressedPath)) {
|
||||
throw new Error('Compressed file not created');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 13: Research save with versioning
|
||||
await runTest('Save with version control', async () => {
|
||||
// Save initial version
|
||||
await helpers.taskMaster(
|
||||
'research-save',
|
||||
['API design patterns', '--output', 'api-patterns.md', '--version'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
|
||||
// Save updated version
|
||||
const result = await helpers.taskMaster(
|
||||
'research-save',
|
||||
['API design patterns - updated', '--output', 'api-patterns.md', '--version'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Check for version files
|
||||
const files = helpers.listFiles(testDir);
|
||||
const versionFiles = files.filter(f => f.includes('api-patterns') && f.includes('.v'));
|
||||
|
||||
if (versionFiles.length === 0) {
|
||||
throw new Error('No version files created');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 14: Research save with export formats
|
||||
await runTest('Export to multiple formats', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'research-save',
|
||||
[
|
||||
'Testing strategies overview',
|
||||
'--output', 'testing',
|
||||
'--formats', 'md,json,txt'
|
||||
],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Check all format files exist
|
||||
const formats = ['md', 'json', 'txt'];
|
||||
formats.forEach(format => {
|
||||
const filePath = `${testDir}/testing.${format}`;
|
||||
if (!helpers.fileExists(filePath)) {
|
||||
throw new Error(`${format} format file not created`);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// Test 15: Research save with summary generation
|
||||
await runTest('Save with auto-generated summary', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'research-save',
|
||||
[
|
||||
'Machine learning deployment strategies',
|
||||
'--output', 'ml-deployment.md',
|
||||
'--include-summary',
|
||||
'--summary-length', '200'
|
||||
],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Check for summary section
|
||||
const content = helpers.readFile(`${testDir}/ml-deployment.md`);
|
||||
if (!content.includes('Summary') && !content.includes('TL;DR') && !content.includes('Overview')) {
|
||||
throw new Error('No summary section found');
|
||||
}
|
||||
|
||||
// Content should be about ML deployment
|
||||
if (!content.includes('machine learning') && !content.includes('ML') && !content.includes('deployment')) {
|
||||
throw new Error('Research content not relevant to query');
|
||||
}
|
||||
});
|
||||
|
||||
// Calculate summary
|
||||
const totalTests = results.tests.length;
|
||||
const passedTests = results.tests.filter(t => t.status === 'passed').length;
|
||||
const failedTests = results.tests.filter(t => t.status === 'failed').length;
|
||||
|
||||
logger.info('\n=== Research-Save Test Summary ===');
|
||||
logger.info(`Total tests: ${totalTests}`);
|
||||
logger.info(`Passed: ${passedTests}`);
|
||||
logger.info(`Failed: ${failedTests}`);
|
||||
|
||||
if (failedTests > 0) {
|
||||
results.status = 'failed';
|
||||
logger.error(`\n${failedTests} tests failed`);
|
||||
} else {
|
||||
logger.success('\n✅ All research-save tests passed!');
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
results.status = 'failed';
|
||||
results.errors.push({
|
||||
test: 'research-save test suite',
|
||||
error: error.message,
|
||||
stack: error.stack
|
||||
});
|
||||
logger.error(`Research-save test suite failed: ${error.message}`);
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
424
tests/e2e/tests/commands/research.test.js
Normal file
424
tests/e2e/tests/commands/research.test.js
Normal file
@@ -0,0 +1,424 @@
|
||||
/**
|
||||
* Comprehensive E2E tests for research command
|
||||
* Tests all aspects of AI-powered research functionality
|
||||
*/
|
||||
|
||||
export default async function testResearch(logger, helpers, context) {
|
||||
const { testDir } = context;
|
||||
const results = {
|
||||
status: 'passed',
|
||||
errors: [],
|
||||
tests: []
|
||||
};
|
||||
|
||||
async function runTest(name, testFn) {
|
||||
try {
|
||||
logger.info(`\nRunning: ${name}`);
|
||||
await testFn();
|
||||
results.tests.push({ name, status: 'passed' });
|
||||
logger.success(`✓ ${name}`);
|
||||
} catch (error) {
|
||||
results.tests.push({ name, status: 'failed', error: error.message });
|
||||
results.errors.push({ test: name, error: error.message });
|
||||
logger.error(`✗ ${name}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
logger.info('Starting comprehensive research tests...');
|
||||
|
||||
// Test 1: Basic research on a topic
|
||||
await runTest('Basic research query', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'research',
|
||||
['What are the best practices for implementing JWT authentication in Node.js?'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Check for relevant research output
|
||||
const output = result.stdout.toLowerCase();
|
||||
if (!output.includes('jwt') || !output.includes('authentication')) {
|
||||
throw new Error('Research output does not contain expected keywords');
|
||||
}
|
||||
|
||||
// Should provide actionable information
|
||||
const hasActionableInfo = output.includes('implement') ||
|
||||
output.includes('use') ||
|
||||
output.includes('practice') ||
|
||||
output.includes('security');
|
||||
if (!hasActionableInfo) {
|
||||
throw new Error('Research output lacks actionable information');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 2: Research with specific context
|
||||
await runTest('Research with project context', async () => {
|
||||
// Create a task to provide context
|
||||
const taskResult = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--title', 'Implement user authentication', '--description', 'Need to add secure login to our Express.js API'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const taskId = helpers.extractTaskId(taskResult.stdout);
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'research',
|
||||
['--task', taskId, 'Compare bcrypt vs argon2 for password hashing'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should mention both technologies
|
||||
const output = result.stdout.toLowerCase();
|
||||
if (!output.includes('bcrypt') || !output.includes('argon2')) {
|
||||
throw new Error('Research did not compare both technologies');
|
||||
}
|
||||
|
||||
// Should relate to the task context
|
||||
if (!output.includes('password') || !output.includes('hash')) {
|
||||
throw new Error('Research not relevant to password hashing');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 3: Research with output format options
|
||||
await runTest('Research with markdown output', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'research',
|
||||
['--format', 'markdown', 'How to implement rate limiting in REST APIs?'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Check for markdown formatting
|
||||
const hasMarkdown = result.stdout.includes('#') ||
|
||||
result.stdout.includes('*') ||
|
||||
result.stdout.includes('-') ||
|
||||
result.stdout.includes('```');
|
||||
if (!hasMarkdown) {
|
||||
throw new Error('Output does not appear to be in markdown format');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 4: Research with depth parameter
|
||||
await runTest('Research with depth control', async () => {
|
||||
const shallowResult = await helpers.taskMaster(
|
||||
'research',
|
||||
['--depth', 'shallow', 'React state management options'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
|
||||
const deepResult = await helpers.taskMaster(
|
||||
'research',
|
||||
['--depth', 'deep', 'React state management options'],
|
||||
{ cwd: testDir, timeout: 180000 }
|
||||
);
|
||||
|
||||
if (shallowResult.exitCode !== 0 || deepResult.exitCode !== 0) {
|
||||
throw new Error('Research with depth parameter failed');
|
||||
}
|
||||
|
||||
// Deep research should provide more content
|
||||
if (deepResult.stdout.length <= shallowResult.stdout.length) {
|
||||
throw new Error('Deep research did not provide more detailed information');
|
||||
}
|
||||
|
||||
// Both should mention state management solutions
|
||||
const solutions = ['redux', 'context', 'mobx', 'zustand', 'recoil'];
|
||||
const shallowMentions = solutions.filter(s => shallowResult.stdout.toLowerCase().includes(s)).length;
|
||||
const deepMentions = solutions.filter(s => deepResult.stdout.toLowerCase().includes(s)).length;
|
||||
|
||||
if (deepMentions <= shallowMentions) {
|
||||
throw new Error('Deep research should cover more solutions');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 5: Research for multiple tasks
|
||||
await runTest('Research across multiple tasks', async () => {
|
||||
// Create related tasks
|
||||
const task1 = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--title', 'Setup database connection'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const taskId1 = helpers.extractTaskId(task1.stdout);
|
||||
|
||||
const task2 = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--title', 'Implement caching layer'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const taskId2 = helpers.extractTaskId(task2.stdout);
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'research',
|
||||
['--tasks', `${taskId1},${taskId2}`, 'Best practices for database connection pooling and Redis caching'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should cover both topics
|
||||
const output = result.stdout.toLowerCase();
|
||||
if (!output.includes('database') || !output.includes('connection')) {
|
||||
throw new Error('Research did not cover database connections');
|
||||
}
|
||||
if (!output.includes('redis') || !output.includes('cach')) {
|
||||
throw new Error('Research did not cover caching');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 6: Research with source preferences
|
||||
await runTest('Research with source preferences', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'research',
|
||||
['--sources', 'official-docs,stackoverflow', 'How to use React hooks effectively?'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should focus on practical examples
|
||||
const output = result.stdout.toLowerCase();
|
||||
if (!output.includes('hook') || !output.includes('react')) {
|
||||
throw new Error('Research not relevant to React hooks');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 7: Research with language/framework context
|
||||
await runTest('Research with technology context', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'research',
|
||||
['--context', 'python,django', 'How to optimize database queries?'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should be Python/Django specific
|
||||
const output = result.stdout.toLowerCase();
|
||||
if (!output.includes('django') || !output.includes('orm') || !output.includes('queryset')) {
|
||||
throw new Error('Research not specific to Django context');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 8: Research error handling - empty query
|
||||
await runTest('Error handling - empty query', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'research',
|
||||
[''],
|
||||
{ cwd: testDir, allowFailure: true }
|
||||
);
|
||||
if (result.exitCode === 0) {
|
||||
throw new Error('Should have failed with empty query');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 9: Research with time constraints
|
||||
await runTest('Research with recency filter', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'research',
|
||||
['--since', '2023', 'Latest JavaScript features and ES2024 updates'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should mention recent features
|
||||
const output = result.stdout.toLowerCase();
|
||||
const recentFeatures = ['es2023', 'es2024', '2023', '2024', 'latest', 'recent'];
|
||||
const mentionsRecent = recentFeatures.some(feature => output.includes(feature));
|
||||
|
||||
if (!mentionsRecent) {
|
||||
throw new Error('Research did not focus on recent information');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 10: Research with comparison request
|
||||
await runTest('Research comparison analysis', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'research',
|
||||
['Compare REST vs GraphQL vs gRPC for microservices communication'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should mention all three technologies
|
||||
const output = result.stdout.toLowerCase();
|
||||
if (!output.includes('rest') || !output.includes('graphql') || !output.includes('grpc')) {
|
||||
throw new Error('Research did not compare all three technologies');
|
||||
}
|
||||
|
||||
// Should include pros/cons or comparison points
|
||||
const hasComparison = output.includes('advantage') ||
|
||||
output.includes('disadvantage') ||
|
||||
output.includes('pros') ||
|
||||
output.includes('cons') ||
|
||||
output.includes('better') ||
|
||||
output.includes('when to use');
|
||||
if (!hasComparison) {
|
||||
throw new Error('Research lacks comparative analysis');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 11: Research with code examples request
|
||||
await runTest('Research with code examples', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'research',
|
||||
['--include-examples', 'How to implement a singleton pattern in TypeScript?'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should include code blocks
|
||||
if (!result.stdout.includes('```') && !result.stdout.includes('class') && !result.stdout.includes('function')) {
|
||||
throw new Error('Research did not include code examples');
|
||||
}
|
||||
|
||||
// Should be TypeScript specific
|
||||
const output = result.stdout.toLowerCase();
|
||||
if (!output.includes('typescript') && !output.includes('private constructor')) {
|
||||
throw new Error('Examples not specific to TypeScript');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 12: Research for architecture decisions
|
||||
await runTest('Research for architecture decisions', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'research',
|
||||
['--type', 'architecture', 'Microservices vs monolithic architecture for a startup'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should provide architectural insights
|
||||
const output = result.stdout.toLowerCase();
|
||||
const archKeywords = ['scalability', 'deployment', 'complexity', 'team size', 'maintenance', 'cost'];
|
||||
const mentionedKeywords = archKeywords.filter(keyword => output.includes(keyword)).length;
|
||||
|
||||
if (mentionedKeywords < 3) {
|
||||
throw new Error('Research lacks architectural considerations');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 13: Research with tag context
|
||||
await runTest('Research within tag context', async () => {
|
||||
// Create tag and tagged tasks
|
||||
await helpers.taskMaster('add-tag', ['security-research'], { cwd: testDir });
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'research',
|
||||
['--tag', 'security-research', 'OWASP top 10 vulnerabilities and mitigation strategies'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should focus on security
|
||||
const output = result.stdout.toLowerCase();
|
||||
const securityTerms = ['vulnerability', 'security', 'attack', 'protection', 'owasp', 'mitigation'];
|
||||
const mentionedTerms = securityTerms.filter(term => output.includes(term)).length;
|
||||
|
||||
if (mentionedTerms < 4) {
|
||||
throw new Error('Research not focused on security topics');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 14: Research performance with complex query
|
||||
await runTest('Performance - complex research query', async () => {
|
||||
const startTime = Date.now();
|
||||
const result = await helpers.taskMaster(
|
||||
'research',
|
||||
['Comprehensive guide to building a scalable real-time chat application with WebSockets, including architecture, database design, message queuing, and deployment strategies'],
|
||||
{ cwd: testDir, timeout: 180000 }
|
||||
);
|
||||
const duration = Date.now() - startTime;
|
||||
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
logger.info(`Complex research completed in ${duration}ms`);
|
||||
|
||||
// Should cover all requested topics
|
||||
const output = result.stdout.toLowerCase();
|
||||
const topics = ['websocket', 'architecture', 'database', 'queue', 'deployment', 'scalab'];
|
||||
const coveredTopics = topics.filter(topic => output.includes(topic)).length;
|
||||
|
||||
if (coveredTopics < 4) {
|
||||
throw new Error('Complex research did not cover all requested topics');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 15: Research with export option (preparing for research-save)
|
||||
await runTest('Research with export preparation', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'research',
|
||||
['--prepare-export', 'Best practices for API versioning'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should indicate export readiness
|
||||
if (!result.stdout.includes('API') || !result.stdout.includes('version')) {
|
||||
throw new Error('Research content not relevant to query');
|
||||
}
|
||||
|
||||
// Check if research is structured for saving
|
||||
const hasStructure = result.stdout.includes('#') ||
|
||||
result.stdout.includes('##') ||
|
||||
result.stdout.includes('1.') ||
|
||||
result.stdout.includes('*');
|
||||
if (!hasStructure) {
|
||||
throw new Error('Research not well-structured for export');
|
||||
}
|
||||
});
|
||||
|
||||
// Calculate summary
|
||||
const totalTests = results.tests.length;
|
||||
const passedTests = results.tests.filter(t => t.status === 'passed').length;
|
||||
const failedTests = results.tests.filter(t => t.status === 'failed').length;
|
||||
|
||||
logger.info('\n=== Research Test Summary ===');
|
||||
logger.info(`Total tests: ${totalTests}`);
|
||||
logger.info(`Passed: ${passedTests}`);
|
||||
logger.info(`Failed: ${failedTests}`);
|
||||
|
||||
if (failedTests > 0) {
|
||||
results.status = 'failed';
|
||||
logger.error(`\n${failedTests} tests failed`);
|
||||
} else {
|
||||
logger.success('\n✅ All research tests passed!');
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
results.status = 'failed';
|
||||
results.errors.push({
|
||||
test: 'research test suite',
|
||||
error: error.message,
|
||||
stack: error.stack
|
||||
});
|
||||
logger.error(`Research test suite failed: ${error.message}`);
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
475
tests/e2e/tests/commands/update-subtask.test.js
Normal file
475
tests/e2e/tests/commands/update-subtask.test.js
Normal file
@@ -0,0 +1,475 @@
|
||||
/**
|
||||
* Comprehensive E2E tests for update-subtask command
|
||||
* Tests all aspects of subtask updates including AI and manual modes
|
||||
*/
|
||||
|
||||
export default async function testUpdateSubtask(logger, helpers, context) {
|
||||
const { testDir } = context;
|
||||
const results = {
|
||||
status: 'passed',
|
||||
errors: [],
|
||||
tests: []
|
||||
};
|
||||
|
||||
async function runTest(name, testFn) {
|
||||
try {
|
||||
logger.info(`\nRunning: ${name}`);
|
||||
await testFn();
|
||||
results.tests.push({ name, status: 'passed' });
|
||||
logger.success(`✓ ${name}`);
|
||||
} catch (error) {
|
||||
results.tests.push({ name, status: 'failed', error: error.message });
|
||||
results.errors.push({ test: name, error: error.message });
|
||||
logger.error(`✗ ${name}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
logger.info('Starting comprehensive update-subtask tests...');
|
||||
|
||||
// Setup: Create parent task with subtasks
|
||||
logger.info('Setting up parent task with subtasks...');
|
||||
|
||||
// Create parent task
|
||||
const parentResult = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', 'Build a user authentication system'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const parentTaskId = helpers.extractTaskId(parentResult.stdout);
|
||||
|
||||
// Expand to get AI-generated subtasks
|
||||
await helpers.taskMaster('expand', [parentTaskId], { cwd: testDir, timeout: 120000 });
|
||||
|
||||
// Add some manual subtasks
|
||||
await helpers.taskMaster(
|
||||
'add-subtask',
|
||||
[parentTaskId, 'Setup database schema'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
await helpers.taskMaster(
|
||||
'add-subtask',
|
||||
[parentTaskId, 'Create login endpoint'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
|
||||
// Test 1: Basic AI-powered subtask update
|
||||
await runTest('AI-powered subtask update', async () => {
|
||||
const subtaskId = `${parentTaskId}.1`;
|
||||
const result = await helpers.taskMaster(
|
||||
'update-subtask',
|
||||
['--id', subtaskId, '--prompt', 'Make this subtask focus on JWT token implementation'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify subtask was updated
|
||||
const showResult = await helpers.taskMaster('show', [parentTaskId], { cwd: testDir });
|
||||
if (!showResult.stdout.includes('JWT') || !showResult.stdout.includes('token')) {
|
||||
throw new Error('Subtask not updated with JWT focus');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 2: Manual subtask update (without AI)
|
||||
await runTest('Manual subtask update', async () => {
|
||||
const subtaskId = `${parentTaskId}.2`;
|
||||
const result = await helpers.taskMaster(
|
||||
'update-subtask',
|
||||
[
|
||||
'--id', subtaskId,
|
||||
'--title', 'Implement OAuth 2.0 integration',
|
||||
'--description', 'Add support for Google and GitHub OAuth providers'
|
||||
],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify exact updates
|
||||
const showResult = await helpers.taskMaster('show', [parentTaskId], { cwd: testDir });
|
||||
if (!showResult.stdout.includes('OAuth 2.0')) {
|
||||
throw new Error('Subtask title not updated');
|
||||
}
|
||||
if (!showResult.stdout.includes('Google') || !showResult.stdout.includes('GitHub')) {
|
||||
throw new Error('Subtask description not updated');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 3: Update subtask status
|
||||
await runTest('Update subtask status', async () => {
|
||||
const subtaskId = `${parentTaskId}.3`;
|
||||
const result = await helpers.taskMaster(
|
||||
'update-subtask',
|
||||
['--id', subtaskId, '--status', 'in_progress'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify status change
|
||||
const tasksJson = helpers.readJson(`${testDir}/.taskmaster/tasks/tasks.json`);
|
||||
const parentTask = tasksJson.tasks.find(t => t.id === parentTaskId);
|
||||
const subtask = parentTask.subtasks.find(s => s.id === subtaskId);
|
||||
|
||||
if (subtask.status !== 'in_progress') {
|
||||
throw new Error('Subtask status not updated');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 4: Update subtask priority
|
||||
await runTest('Update subtask priority', async () => {
|
||||
const subtaskId = `${parentTaskId}.4`;
|
||||
const result = await helpers.taskMaster(
|
||||
'update-subtask',
|
||||
['--id', subtaskId, '--priority', 'high'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify priority change
|
||||
const tasksJson = helpers.readJson(`${testDir}/.taskmaster/tasks/tasks.json`);
|
||||
const parentTask = tasksJson.tasks.find(t => t.id === parentTaskId);
|
||||
const subtask = parentTask.subtasks.find(s => s.id === subtaskId);
|
||||
|
||||
if (subtask.priority !== 'high') {
|
||||
throw new Error('Subtask priority not updated');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 5: Batch subtask updates
|
||||
await runTest('Batch subtask updates', async () => {
|
||||
// Update multiple subtasks at once
|
||||
const subtaskIds = [`${parentTaskId}.1`, `${parentTaskId}.2`];
|
||||
const result = await helpers.taskMaster(
|
||||
'update-subtask',
|
||||
['--ids', subtaskIds.join(','), '--status', 'completed'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify all were updated
|
||||
const tasksJson = helpers.readJson(`${testDir}/.taskmaster/tasks/tasks.json`);
|
||||
const parentTask = tasksJson.tasks.find(t => t.id === parentTaskId);
|
||||
|
||||
subtaskIds.forEach(id => {
|
||||
const subtask = parentTask.subtasks.find(s => s.id === id);
|
||||
if (subtask.status !== 'completed') {
|
||||
throw new Error(`Subtask ${id} not updated in batch`);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// Test 6: Update subtask with dependencies
|
||||
await runTest('Update subtask dependencies', async () => {
|
||||
const subtask1 = `${parentTaskId}.3`;
|
||||
const subtask2 = `${parentTaskId}.4`;
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'update-subtask',
|
||||
['--id', subtask2, '--depends-on', subtask1],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify dependency was added
|
||||
const tasksJson = helpers.readJson(`${testDir}/.taskmaster/tasks/tasks.json`);
|
||||
const parentTask = tasksJson.tasks.find(t => t.id === parentTaskId);
|
||||
const subtask = parentTask.subtasks.find(s => s.id === subtask2);
|
||||
|
||||
if (!subtask.dependencies || !subtask.dependencies.includes(subtask1)) {
|
||||
throw new Error('Subtask dependency not added');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 7: AI enhancement of existing subtask
|
||||
await runTest('AI enhancement of manual subtask', async () => {
|
||||
// Get last manual subtask
|
||||
const showResult = await helpers.taskMaster('show', [parentTaskId], { cwd: testDir });
|
||||
const subtaskMatches = showResult.stdout.match(/(\d+\.\d+)/g) || [];
|
||||
const lastSubtaskId = subtaskMatches[subtaskMatches.length - 1];
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'update-subtask',
|
||||
['--id', lastSubtaskId, '--enhance', '--prompt', 'Add security considerations'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should include security aspects
|
||||
const updatedShow = await helpers.taskMaster('show', [parentTaskId], { cwd: testDir });
|
||||
const hasSecurityMention = updatedShow.stdout.toLowerCase().includes('security') ||
|
||||
updatedShow.stdout.toLowerCase().includes('secure') ||
|
||||
updatedShow.stdout.toLowerCase().includes('protection');
|
||||
|
||||
if (!hasSecurityMention) {
|
||||
throw new Error('AI enhancement did not add security considerations');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 8: Error handling - invalid subtask ID
|
||||
await runTest('Error handling - invalid subtask ID', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'update-subtask',
|
||||
['--id', '999.999', '--title', 'Invalid update'],
|
||||
{ cwd: testDir, allowFailure: true }
|
||||
);
|
||||
if (result.exitCode === 0) {
|
||||
throw new Error('Should have failed with invalid subtask ID');
|
||||
}
|
||||
if (!result.stderr.includes('not found') && !result.stderr.includes('invalid')) {
|
||||
throw new Error('Error message not clear about invalid ID');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 9: Update subtask metadata
|
||||
await runTest('Update subtask metadata', async () => {
|
||||
const subtaskId = `${parentTaskId}.1`;
|
||||
const result = await helpers.taskMaster(
|
||||
'update-subtask',
|
||||
[
|
||||
'--id', subtaskId,
|
||||
'--metadata', 'assigned_to=john@example.com',
|
||||
'--metadata', 'estimated_hours=4'
|
||||
],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify metadata
|
||||
const tasksJson = helpers.readJson(`${testDir}/.taskmaster/tasks/tasks.json`);
|
||||
const parentTask = tasksJson.tasks.find(t => t.id === parentTaskId);
|
||||
const subtask = parentTask.subtasks.find(s => s.id === subtaskId);
|
||||
|
||||
if (!subtask.metadata || subtask.metadata.assigned_to !== 'john@example.com') {
|
||||
throw new Error('Subtask metadata not updated');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 10: Update with validation
|
||||
await runTest('Update with validation rules', async () => {
|
||||
// Try to update completed subtask (should warn or fail based on rules)
|
||||
const subtaskId = `${parentTaskId}.1`; // This was marked completed earlier
|
||||
const result = await helpers.taskMaster(
|
||||
'update-subtask',
|
||||
['--id', subtaskId, '--title', 'Trying to update completed task', '--force'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
|
||||
// Should either succeed with --force or provide clear message
|
||||
if (result.exitCode !== 0 && !result.stderr.includes('completed')) {
|
||||
throw new Error('No clear message about updating completed subtask');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 11: Complex update with multiple fields
|
||||
await runTest('Complex multi-field update', async () => {
|
||||
// Create fresh subtask
|
||||
await helpers.taskMaster(
|
||||
'add-subtask',
|
||||
[parentTaskId, 'Fresh subtask for complex update'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
|
||||
const showResult = await helpers.taskMaster('show', [parentTaskId], { cwd: testDir });
|
||||
const subtaskMatches = showResult.stdout.match(/(\d+\.\d+)/g) || [];
|
||||
const newSubtaskId = subtaskMatches[subtaskMatches.length - 1];
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'update-subtask',
|
||||
[
|
||||
'--id', newSubtaskId,
|
||||
'--prompt', 'Enhance with testing requirements',
|
||||
'--priority', 'medium',
|
||||
'--status', 'in_progress',
|
||||
'--metadata', 'test_coverage=required'
|
||||
],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify all updates applied
|
||||
const tasksJson = helpers.readJson(`${testDir}/.taskmaster/tasks/tasks.json`);
|
||||
const parentTask = tasksJson.tasks.find(t => t.id === parentTaskId);
|
||||
const subtask = parentTask.subtasks.find(s => s.id === newSubtaskId);
|
||||
|
||||
if (subtask.priority !== 'medium' || subtask.status !== 'in_progress') {
|
||||
throw new Error('Not all fields updated');
|
||||
}
|
||||
if (!subtask.metadata || subtask.metadata.test_coverage !== 'required') {
|
||||
throw new Error('Metadata not updated in complex update');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 12: Update subtask in context of parent task
|
||||
await runTest('Context-aware subtask update', async () => {
|
||||
// Create new parent task with specific context
|
||||
const contextParent = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', 'Build REST API with Node.js'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const contextParentId = helpers.extractTaskId(contextParent.stdout);
|
||||
|
||||
// Add subtask
|
||||
await helpers.taskMaster(
|
||||
'add-subtask',
|
||||
[contextParentId, 'Create endpoints'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
|
||||
const subtaskId = `${contextParentId}.1`;
|
||||
const result = await helpers.taskMaster(
|
||||
'update-subtask',
|
||||
['--id', subtaskId, '--prompt', 'Focus on CRUD operations', '--use-parent-context'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should maintain REST API context
|
||||
const showResult = await helpers.taskMaster('show', [contextParentId], { cwd: testDir });
|
||||
const hasApiContext = showResult.stdout.toLowerCase().includes('api') ||
|
||||
showResult.stdout.toLowerCase().includes('endpoint') ||
|
||||
showResult.stdout.toLowerCase().includes('rest');
|
||||
|
||||
if (!hasApiContext) {
|
||||
throw new Error('Parent context not preserved in subtask update');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 13: Reorder subtasks during update
|
||||
await runTest('Reorder subtasks', async () => {
|
||||
const subtaskId = `${parentTaskId}.3`;
|
||||
const result = await helpers.taskMaster(
|
||||
'update-subtask',
|
||||
['--id', subtaskId, '--position', '1'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify reordering
|
||||
const showResult = await helpers.taskMaster('show', [parentTaskId], { cwd: testDir });
|
||||
// The subtask that was at position 3 should now appear first
|
||||
// This is implementation dependent, so we just check it succeeded
|
||||
});
|
||||
|
||||
// Test 14: Update with tag assignment
|
||||
await runTest('Update subtask with tags', async () => {
|
||||
// Create tag first
|
||||
await helpers.taskMaster('add-tag', ['backend-subtasks'], { cwd: testDir });
|
||||
|
||||
const subtaskId = `${parentTaskId}.1`;
|
||||
const result = await helpers.taskMaster(
|
||||
'update-subtask',
|
||||
['--id', subtaskId, '--tag', 'backend-subtasks'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify tag was assigned
|
||||
const tasksJson = helpers.readJson(`${testDir}/.taskmaster/tasks/tasks.json`);
|
||||
const parentTask = tasksJson.tasks.find(t => t.id === parentTaskId);
|
||||
const subtask = parentTask.subtasks.find(s => s.id === subtaskId);
|
||||
|
||||
if (!subtask.tags || !subtask.tags.includes('backend-subtasks')) {
|
||||
throw new Error('Tag not assigned to subtask');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 15: Performance - update many subtasks
|
||||
await runTest('Performance - bulk subtask updates', async () => {
|
||||
// Create parent with many subtasks
|
||||
const perfParent = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', 'Large project with many components'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const perfParentId = helpers.extractTaskId(perfParent.stdout);
|
||||
|
||||
// Add 20 subtasks
|
||||
const promises = [];
|
||||
for (let i = 1; i <= 20; i++) {
|
||||
promises.push(
|
||||
helpers.taskMaster(
|
||||
'add-subtask',
|
||||
[perfParentId, `Component ${i}`],
|
||||
{ cwd: testDir }
|
||||
)
|
||||
);
|
||||
}
|
||||
await Promise.all(promises);
|
||||
|
||||
// Update all to in_progress
|
||||
const subtaskIds = [];
|
||||
for (let i = 1; i <= 20; i++) {
|
||||
subtaskIds.push(`${perfParentId}.${i}`);
|
||||
}
|
||||
|
||||
const startTime = Date.now();
|
||||
const result = await helpers.taskMaster(
|
||||
'update-subtask',
|
||||
['--ids', subtaskIds.join(','), '--status', 'in_progress'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const duration = Date.now() - startTime;
|
||||
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
logger.info(`Updated 20 subtasks in ${duration}ms`);
|
||||
if (duration > 5000) {
|
||||
throw new Error(`Bulk update too slow: ${duration}ms`);
|
||||
}
|
||||
});
|
||||
|
||||
// Calculate summary
|
||||
const totalTests = results.tests.length;
|
||||
const passedTests = results.tests.filter(t => t.status === 'passed').length;
|
||||
const failedTests = results.tests.filter(t => t.status === 'failed').length;
|
||||
|
||||
logger.info('\n=== Update-Subtask Test Summary ===');
|
||||
logger.info(`Total tests: ${totalTests}`);
|
||||
logger.info(`Passed: ${passedTests}`);
|
||||
logger.info(`Failed: ${failedTests}`);
|
||||
|
||||
if (failedTests > 0) {
|
||||
results.status = 'failed';
|
||||
logger.error(`\n${failedTests} tests failed`);
|
||||
} else {
|
||||
logger.success('\n✅ All update-subtask tests passed!');
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
results.status = 'failed';
|
||||
results.errors.push({
|
||||
test: 'update-subtask test suite',
|
||||
error: error.message,
|
||||
stack: error.stack
|
||||
});
|
||||
logger.error(`Update-subtask test suite failed: ${error.message}`);
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
484
tests/e2e/tests/commands/update-task.test.js
Normal file
484
tests/e2e/tests/commands/update-task.test.js
Normal file
@@ -0,0 +1,484 @@
|
||||
/**
|
||||
* Comprehensive E2E tests for update-task command
|
||||
* Tests all aspects of task updates including AI-powered and manual updates
|
||||
*/
|
||||
|
||||
export default async function testUpdateTask(logger, helpers, context) {
|
||||
const { testDir } = context;
|
||||
const results = {
|
||||
status: 'passed',
|
||||
errors: [],
|
||||
tests: []
|
||||
};
|
||||
|
||||
async function runTest(name, testFn) {
|
||||
try {
|
||||
logger.info(`\nRunning: ${name}`);
|
||||
await testFn();
|
||||
results.tests.push({ name, status: 'passed' });
|
||||
logger.success(`✓ ${name}`);
|
||||
} catch (error) {
|
||||
results.tests.push({ name, status: 'failed', error: error.message });
|
||||
results.errors.push({ test: name, error: error.message });
|
||||
logger.error(`✗ ${name}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
logger.info('Starting comprehensive update-task tests...');
|
||||
|
||||
// Setup: Create various tasks for testing
|
||||
logger.info('Setting up test tasks...');
|
||||
|
||||
// Create simple task
|
||||
const simpleResult = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--title', 'Simple task', '--description', 'Initial description'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const simpleTaskId = helpers.extractTaskId(simpleResult.stdout);
|
||||
|
||||
// Create AI task
|
||||
const aiResult = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', 'Create a logging system for the application'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const aiTaskId = helpers.extractTaskId(aiResult.stdout);
|
||||
|
||||
// Create task with metadata
|
||||
const metaResult = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--title', 'Task with metadata', '--metadata', 'version=1.0'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const metaTaskId = helpers.extractTaskId(metaResult.stdout);
|
||||
|
||||
// Test 1: Basic manual update - description only
|
||||
await runTest('Basic description update', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'update-task',
|
||||
[simpleTaskId, '--description', 'Updated description with more details'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify update
|
||||
const showResult = await helpers.taskMaster('show', [simpleTaskId], { cwd: testDir });
|
||||
if (!showResult.stdout.includes('Updated description with more details')) {
|
||||
throw new Error('Description not updated');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 2: AI-powered task update
|
||||
await runTest('AI-powered task update', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'update-task',
|
||||
[aiTaskId, '--prompt', 'Add requirements for structured logging with log levels and rotation'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify AI enhancements
|
||||
const showResult = await helpers.taskMaster('show', [aiTaskId], { cwd: testDir });
|
||||
const output = showResult.stdout.toLowerCase();
|
||||
|
||||
// Should mention logging concepts
|
||||
const hasLoggingConcepts = output.includes('log level') ||
|
||||
output.includes('rotation') ||
|
||||
output.includes('structured') ||
|
||||
output.includes('logging');
|
||||
if (!hasLoggingConcepts) {
|
||||
throw new Error('AI did not enhance task with logging requirements');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 3: Update multiple fields simultaneously
|
||||
await runTest('Multi-field update', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'update-task',
|
||||
[
|
||||
simpleTaskId,
|
||||
'--title', 'Renamed task',
|
||||
'--description', 'New comprehensive description',
|
||||
'--priority', 'high',
|
||||
'--status', 'in_progress'
|
||||
],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify all updates
|
||||
const showResult = await helpers.taskMaster('show', [simpleTaskId], { cwd: testDir });
|
||||
if (!showResult.stdout.includes('Renamed task')) {
|
||||
throw new Error('Title not updated');
|
||||
}
|
||||
if (!showResult.stdout.includes('New comprehensive description')) {
|
||||
throw new Error('Description not updated');
|
||||
}
|
||||
if (!showResult.stdout.includes('high') && !showResult.stdout.includes('High')) {
|
||||
throw new Error('Priority not updated');
|
||||
}
|
||||
if (!showResult.stdout.includes('in_progress') && !showResult.stdout.includes('In Progress')) {
|
||||
throw new Error('Status not updated');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 4: Update task metadata
|
||||
await runTest('Update task metadata', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'update-task',
|
||||
[
|
||||
metaTaskId,
|
||||
'--metadata', 'version=2.0',
|
||||
'--metadata', 'author=test-user',
|
||||
'--metadata', 'reviewed=true'
|
||||
],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify metadata
|
||||
const tasksJson = helpers.readJson(`${testDir}/.taskmaster/tasks/tasks.json`);
|
||||
const task = tasksJson.tasks.find(t => t.id === metaTaskId);
|
||||
|
||||
if (!task.metadata || task.metadata.version !== '2.0' ||
|
||||
task.metadata.author !== 'test-user' || task.metadata.reviewed !== 'true') {
|
||||
throw new Error('Metadata not properly updated');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 5: Error handling - non-existent task
|
||||
await runTest('Error handling - non-existent task', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'update-task',
|
||||
['99999', '--description', 'This should fail'],
|
||||
{ cwd: testDir, allowFailure: true }
|
||||
);
|
||||
if (result.exitCode === 0) {
|
||||
throw new Error('Should have failed with non-existent task');
|
||||
}
|
||||
if (!result.stderr.includes('not found') && !result.stderr.includes('exist')) {
|
||||
throw new Error('Error message not clear about missing task');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 6: Update with validation of AI output
|
||||
await runTest('AI update with validation', async () => {
|
||||
// Create task with specific context
|
||||
const validationResult = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', 'Setup CI/CD pipeline'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const validationTaskId = helpers.extractTaskId(validationResult.stdout);
|
||||
|
||||
// Update with specific requirements
|
||||
const result = await helpers.taskMaster(
|
||||
'update-task',
|
||||
[validationTaskId, '--prompt', 'Add automated testing and deployment stages', '--validate-ai'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Check AI added relevant content
|
||||
const showResult = await helpers.taskMaster('show', [validationTaskId], { cwd: testDir });
|
||||
const output = showResult.stdout.toLowerCase();
|
||||
|
||||
if (!output.includes('test') || !output.includes('deploy')) {
|
||||
throw new Error('AI validation failed - missing required concepts');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 7: Update task with tag changes
|
||||
await runTest('Update task tags', async () => {
|
||||
// Create tags
|
||||
await helpers.taskMaster('add-tag', ['frontend'], { cwd: testDir });
|
||||
await helpers.taskMaster('add-tag', ['urgent'], { cwd: testDir });
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'update-task',
|
||||
[simpleTaskId, '--add-tag', 'frontend', '--add-tag', 'urgent'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify tags in appropriate contexts
|
||||
const frontendList = await helpers.taskMaster('list', ['--tag', 'frontend'], { cwd: testDir });
|
||||
const urgentList = await helpers.taskMaster('list', ['--tag', 'urgent'], { cwd: testDir });
|
||||
|
||||
if (!frontendList.stdout.includes(simpleTaskId)) {
|
||||
throw new Error('Task not found in frontend tag');
|
||||
}
|
||||
if (!urgentList.stdout.includes(simpleTaskId)) {
|
||||
throw new Error('Task not found in urgent tag');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 8: Remove tags from task
|
||||
await runTest('Remove task tags', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'update-task',
|
||||
[simpleTaskId, '--remove-tag', 'urgent'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify tag removed
|
||||
const urgentList = await helpers.taskMaster('list', ['--tag', 'urgent'], { cwd: testDir });
|
||||
if (urgentList.stdout.includes(simpleTaskId)) {
|
||||
throw new Error('Task still in removed tag');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 9: Update with dependencies
|
||||
await runTest('Update task dependencies', async () => {
|
||||
// Create dependency task
|
||||
const depResult = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--title', 'Dependency task'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const depTaskId = helpers.extractTaskId(depResult.stdout);
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'update-task',
|
||||
[aiTaskId, '--add-dependency', depTaskId],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify dependency added
|
||||
const showResult = await helpers.taskMaster('show', [aiTaskId], { cwd: testDir });
|
||||
if (!showResult.stdout.includes(depTaskId)) {
|
||||
throw new Error('Dependency not added to task');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 10: Complex AI enhancement
|
||||
await runTest('Complex AI task enhancement', async () => {
|
||||
// Create task needing enhancement
|
||||
const enhanceResult = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--title', 'Basic API endpoint', '--description', 'Create user endpoint'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const enhanceTaskId = helpers.extractTaskId(enhanceResult.stdout);
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'update-task',
|
||||
[
|
||||
enhanceTaskId,
|
||||
'--prompt', 'Enhance with REST best practices, error handling, validation, and OpenAPI documentation',
|
||||
'--keep-original'
|
||||
],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should preserve original and add enhancements
|
||||
const showResult = await helpers.taskMaster('show', [enhanceTaskId], { cwd: testDir });
|
||||
if (!showResult.stdout.includes('user endpoint')) {
|
||||
throw new Error('Original content lost during enhancement');
|
||||
}
|
||||
|
||||
// Check for enhancements
|
||||
const output = showResult.stdout.toLowerCase();
|
||||
const enhancements = ['validation', 'error', 'rest', 'openapi', 'documentation'];
|
||||
const foundEnhancements = enhancements.filter(e => output.includes(e)).length;
|
||||
|
||||
if (foundEnhancements < 3) {
|
||||
throw new Error('AI did not add sufficient enhancements');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 11: Bulk property update
|
||||
await runTest('Update common properties across tasks', async () => {
|
||||
// Update all tasks to have a common property
|
||||
const taskIds = [simpleTaskId, aiTaskId, metaTaskId];
|
||||
|
||||
// This tests if update-task can handle multiple IDs (implementation dependent)
|
||||
// If not supported, test single updates in sequence
|
||||
for (const taskId of taskIds) {
|
||||
const result = await helpers.taskMaster(
|
||||
'update-task',
|
||||
[taskId, '--metadata', 'project=test-suite'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Failed to update task ${taskId}: ${result.stderr}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Verify all have the metadata
|
||||
const tasksJson = helpers.readJson(`${testDir}/.taskmaster/tasks/tasks.json`);
|
||||
taskIds.forEach(taskId => {
|
||||
const task = tasksJson.tasks.find(t => t.id === taskId);
|
||||
if (!task.metadata || task.metadata.project !== 'test-suite') {
|
||||
throw new Error(`Task ${taskId} missing project metadata`);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// Test 12: Update completed task
|
||||
await runTest('Update completed task handling', async () => {
|
||||
// Complete a task first
|
||||
await helpers.taskMaster('set-status', [simpleTaskId, 'completed'], { cwd: testDir });
|
||||
|
||||
// Try to update it
|
||||
const result = await helpers.taskMaster(
|
||||
'update-task',
|
||||
[simpleTaskId, '--description', 'Trying to update completed task'],
|
||||
{ cwd: testDir, allowFailure: true }
|
||||
);
|
||||
|
||||
// Should either fail with clear message or succeed with warning
|
||||
if (result.exitCode !== 0) {
|
||||
if (!result.stderr.includes('completed')) {
|
||||
throw new Error('No clear message about updating completed task');
|
||||
}
|
||||
} else if (!result.stdout.includes('warning') && !result.stdout.includes('completed')) {
|
||||
throw new Error('No warning about updating completed task');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 13: Update with context preservation
|
||||
await runTest('Context-aware AI update', async () => {
|
||||
// Create task with rich context
|
||||
const contextResult = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--prompt', 'Implement user profile page with React'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const contextTaskId = helpers.extractTaskId(contextResult.stdout);
|
||||
|
||||
// Expand to add subtasks
|
||||
await helpers.taskMaster('expand', [contextTaskId], { cwd: testDir, timeout: 120000 });
|
||||
|
||||
// Update with context preservation
|
||||
const result = await helpers.taskMaster(
|
||||
'update-task',
|
||||
[contextTaskId, '--prompt', 'Add accessibility features', '--preserve-context'],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should maintain React context and add accessibility
|
||||
const showResult = await helpers.taskMaster('show', [contextTaskId], { cwd: testDir });
|
||||
const output = showResult.stdout.toLowerCase();
|
||||
|
||||
if (!output.includes('react')) {
|
||||
throw new Error('Lost React context during update');
|
||||
}
|
||||
if (!output.includes('accessibility') && !output.includes('a11y') && !output.includes('aria')) {
|
||||
throw new Error('Accessibility features not added');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 14: Update with estimation
|
||||
await runTest('Update task with time estimation', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'update-task',
|
||||
[
|
||||
aiTaskId,
|
||||
'--estimate', '8h',
|
||||
'--metadata', 'story_points=5'
|
||||
],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify estimation added
|
||||
const tasksJson = helpers.readJson(`${testDir}/.taskmaster/tasks/tasks.json`);
|
||||
const task = tasksJson.tasks.find(t => t.id === aiTaskId);
|
||||
|
||||
if (!task.estimate || !task.estimate.includes('8h')) {
|
||||
throw new Error('Time estimate not added');
|
||||
}
|
||||
if (!task.metadata || task.metadata.story_points !== '5') {
|
||||
throw new Error('Story points not added');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 15: Performance - large description update
|
||||
await runTest('Performance - large content update', async () => {
|
||||
// Create large description
|
||||
const largeDescription = 'This is a detailed task description. '.repeat(100) +
|
||||
'\n\n## Requirements\n' +
|
||||
'- Requirement item\n'.repeat(50);
|
||||
|
||||
const startTime = Date.now();
|
||||
const result = await helpers.taskMaster(
|
||||
'update-task',
|
||||
[metaTaskId, '--description', largeDescription],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const duration = Date.now() - startTime;
|
||||
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
logger.info(`Large update completed in ${duration}ms`);
|
||||
if (duration > 5000) {
|
||||
throw new Error(`Update too slow: ${duration}ms`);
|
||||
}
|
||||
|
||||
// Verify content was saved
|
||||
const showResult = await helpers.taskMaster('show', [metaTaskId], { cwd: testDir });
|
||||
if (!showResult.stdout.includes('detailed task description')) {
|
||||
throw new Error('Large description not saved properly');
|
||||
}
|
||||
});
|
||||
|
||||
// Calculate summary
|
||||
const totalTests = results.tests.length;
|
||||
const passedTests = results.tests.filter(t => t.status === 'passed').length;
|
||||
const failedTests = results.tests.filter(t => t.status === 'failed').length;
|
||||
|
||||
logger.info('\n=== Update-Task Test Summary ===');
|
||||
logger.info(`Total tests: ${totalTests}`);
|
||||
logger.info(`Passed: ${passedTests}`);
|
||||
logger.info(`Failed: ${failedTests}`);
|
||||
|
||||
if (failedTests > 0) {
|
||||
results.status = 'failed';
|
||||
logger.error(`\n${failedTests} tests failed`);
|
||||
} else {
|
||||
logger.success('\n✅ All update-task tests passed!');
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
results.status = 'failed';
|
||||
results.errors.push({
|
||||
test: 'update-task test suite',
|
||||
error: error.message,
|
||||
stack: error.stack
|
||||
});
|
||||
logger.error(`Update-task test suite failed: ${error.message}`);
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
483
tests/e2e/tests/commands/update-tasks.test.js
Normal file
483
tests/e2e/tests/commands/update-tasks.test.js
Normal file
@@ -0,0 +1,483 @@
|
||||
/**
|
||||
* Comprehensive E2E tests for update-tasks command
|
||||
* Tests bulk task update functionality with various filters and AI capabilities
|
||||
*/
|
||||
|
||||
export default async function testUpdateTasks(logger, helpers, context) {
|
||||
const { testDir } = context;
|
||||
const results = {
|
||||
status: 'passed',
|
||||
errors: [],
|
||||
tests: []
|
||||
};
|
||||
|
||||
async function runTest(name, testFn) {
|
||||
try {
|
||||
logger.info(`\nRunning: ${name}`);
|
||||
await testFn();
|
||||
results.tests.push({ name, status: 'passed' });
|
||||
logger.success(`✓ ${name}`);
|
||||
} catch (error) {
|
||||
results.tests.push({ name, status: 'failed', error: error.message });
|
||||
results.errors.push({ test: name, error: error.message });
|
||||
logger.error(`✗ ${name}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
logger.info('Starting comprehensive update-tasks tests...');
|
||||
|
||||
// Setup: Create a variety of tasks for bulk operations
|
||||
logger.info('Setting up test tasks for bulk operations...');
|
||||
|
||||
// Create tasks with different statuses
|
||||
const taskIds = [];
|
||||
|
||||
// Pending tasks
|
||||
for (let i = 1; i <= 3; i++) {
|
||||
const result = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--title', `Pending task ${i}`, '--priority', i === 1 ? 'high' : 'medium'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
taskIds.push(helpers.extractTaskId(result.stdout));
|
||||
}
|
||||
|
||||
// In-progress tasks
|
||||
for (let i = 1; i <= 2; i++) {
|
||||
const result = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--title', `In-progress task ${i}`],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const taskId = helpers.extractTaskId(result.stdout);
|
||||
taskIds.push(taskId);
|
||||
await helpers.taskMaster('set-status', [taskId, 'in_progress'], { cwd: testDir });
|
||||
}
|
||||
|
||||
// Tasks with tags
|
||||
await helpers.taskMaster('add-tag', ['backend'], { cwd: testDir });
|
||||
await helpers.taskMaster('add-tag', ['frontend'], { cwd: testDir });
|
||||
|
||||
for (let i = 1; i <= 2; i++) {
|
||||
const result = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--title', `Backend task ${i}`, '--tag', 'backend'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
taskIds.push(helpers.extractTaskId(result.stdout));
|
||||
}
|
||||
|
||||
// Test 1: Bulk update by status
|
||||
await runTest('Bulk update tasks by status', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'update-tasks',
|
||||
['--status', 'pending', '--set-priority', 'high'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should report number of tasks updated
|
||||
if (!result.stdout.includes('updated') || !result.stdout.match(/\d+/)) {
|
||||
throw new Error('No update count reported');
|
||||
}
|
||||
|
||||
// Verify all pending tasks now have high priority
|
||||
const listResult = await helpers.taskMaster('list', ['--status', 'pending'], { cwd: testDir });
|
||||
const pendingTasks = listResult.stdout.match(/\d+\s*\|/g) || [];
|
||||
|
||||
// Check a sample task
|
||||
if (pendingTasks.length > 0) {
|
||||
const showResult = await helpers.taskMaster('show', [taskIds[0]], { cwd: testDir });
|
||||
if (!showResult.stdout.includes('high') && !showResult.stdout.includes('High')) {
|
||||
throw new Error('Priority not updated for pending tasks');
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Test 2: Bulk update by tag
|
||||
await runTest('Bulk update tasks by tag', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'update-tasks',
|
||||
['--tag', 'backend', '--add-metadata', 'team=backend-team'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify backend tasks have metadata
|
||||
const listResult = await helpers.taskMaster('list', ['--tag', 'backend'], { cwd: testDir });
|
||||
const backendTaskIds = (listResult.stdout.match(/\d+(?=\s*\|)/g) || []);
|
||||
|
||||
if (backendTaskIds.length > 0) {
|
||||
const tasksJson = helpers.readJson(`${testDir}/.taskmaster/tasks/tasks.json`);
|
||||
const backendTask = tasksJson.tasks.find(t => backendTaskIds.includes(t.id));
|
||||
|
||||
if (!backendTask || !backendTask.metadata || backendTask.metadata.team !== 'backend-team') {
|
||||
throw new Error('Metadata not added to backend tasks');
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Test 3: Bulk update with AI enhancement
|
||||
await runTest('Bulk AI enhancement', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'update-tasks',
|
||||
['--tag', 'backend', '--enhance', '--prompt', 'Add security considerations'],
|
||||
{ cwd: testDir, timeout: 180000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Check that tasks were enhanced
|
||||
const listResult = await helpers.taskMaster('list', ['--tag', 'backend'], { cwd: testDir });
|
||||
const backendTaskIds = (listResult.stdout.match(/\d+(?=\s*\|)/g) || []);
|
||||
|
||||
if (backendTaskIds.length > 0) {
|
||||
const showResult = await helpers.taskMaster('show', [backendTaskIds[0]], { cwd: testDir });
|
||||
const hasSecurityMention = showResult.stdout.toLowerCase().includes('security') ||
|
||||
showResult.stdout.toLowerCase().includes('secure') ||
|
||||
showResult.stdout.toLowerCase().includes('auth');
|
||||
|
||||
if (!hasSecurityMention) {
|
||||
throw new Error('AI enhancement did not add security considerations');
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Test 4: Bulk status change
|
||||
await runTest('Bulk status change', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'update-tasks',
|
||||
['--priority', 'high', '--set-status', 'in_progress'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify high priority tasks are now in progress
|
||||
const listResult = await helpers.taskMaster('list', ['--priority', 'high'], { cwd: testDir });
|
||||
const highPriorityIds = (listResult.stdout.match(/\d+(?=\s*\|)/g) || []);
|
||||
|
||||
if (highPriorityIds.length > 0) {
|
||||
const showResult = await helpers.taskMaster('show', [highPriorityIds[0]], { cwd: testDir });
|
||||
if (!showResult.stdout.includes('in_progress') && !showResult.stdout.includes('In Progress')) {
|
||||
throw new Error('Status not updated for high priority tasks');
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Test 5: Bulk update with multiple filters
|
||||
await runTest('Bulk update with combined filters', async () => {
|
||||
// Add frontend tag to some tasks
|
||||
await helpers.taskMaster('update-task', [taskIds[0], '--add-tag', 'frontend'], { cwd: testDir });
|
||||
await helpers.taskMaster('update-task', [taskIds[1], '--add-tag', 'frontend'], { cwd: testDir });
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'update-tasks',
|
||||
['--tag', 'frontend', '--status', 'in_progress', '--add-metadata', 'urgent=true'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should only update tasks matching both filters
|
||||
const updateCount = result.stdout.match(/(\d+) tasks? updated/);
|
||||
if (!updateCount) {
|
||||
throw new Error('Update count not reported');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 6: Bulk update all tasks
|
||||
await runTest('Update all tasks', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'update-tasks',
|
||||
['--all', '--add-metadata', 'batch_update=test'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify all tasks have the metadata
|
||||
const tasksJson = helpers.readJson(`${testDir}/.taskmaster/tasks/tasks.json`);
|
||||
const tasksWithoutMetadata = tasksJson.tasks.filter(
|
||||
t => !t.metadata || t.metadata.batch_update !== 'test'
|
||||
);
|
||||
|
||||
if (tasksWithoutMetadata.length > 0) {
|
||||
throw new Error('Not all tasks were updated');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 7: Bulk update with confirmation
|
||||
await runTest('Bulk update with safety check', async () => {
|
||||
// This test checks if dangerous operations require confirmation
|
||||
// The actual behavior depends on implementation
|
||||
const result = await helpers.taskMaster(
|
||||
'update-tasks',
|
||||
['--all', '--set-status', 'completed', '--force'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
|
||||
// Should either succeed with --force or show warning
|
||||
if (result.exitCode !== 0 && !result.stderr.includes('confirm')) {
|
||||
throw new Error('No safety check for dangerous bulk operation');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 8: Bulk update by ID list
|
||||
await runTest('Bulk update specific task IDs', async () => {
|
||||
const targetIds = taskIds.slice(0, 3);
|
||||
const result = await helpers.taskMaster(
|
||||
'update-tasks',
|
||||
['--ids', targetIds.join(','), '--add-metadata', 'selected=true'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify only specified tasks were updated
|
||||
const tasksJson = helpers.readJson(`${testDir}/.taskmaster/tasks/tasks.json`);
|
||||
targetIds.forEach(id => {
|
||||
const task = tasksJson.tasks.find(t => t.id === id);
|
||||
if (!task.metadata || task.metadata.selected !== 'true') {
|
||||
throw new Error(`Task ${id} not updated`);
|
||||
}
|
||||
});
|
||||
|
||||
// Verify other tasks were not updated
|
||||
const otherTasks = tasksJson.tasks.filter(t => !targetIds.includes(t.id));
|
||||
otherTasks.forEach(task => {
|
||||
if (task.metadata && task.metadata.selected === 'true') {
|
||||
throw new Error(`Task ${task.id} incorrectly updated`);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// Test 9: Bulk update with complex query
|
||||
await runTest('Complex query bulk update', async () => {
|
||||
// Create tasks with specific patterns
|
||||
for (let i = 1; i <= 3; i++) {
|
||||
await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--title', `API endpoint: /users/${i}`, '--tag', 'backend'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
}
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'update-tasks',
|
||||
['--query', 'title:API endpoint', '--add-metadata', 'type=api'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Verify API tasks were updated
|
||||
const tasksJson = helpers.readJson(`${testDir}/.taskmaster/tasks/tasks.json`);
|
||||
const apiTasks = tasksJson.tasks.filter(t => t.title.includes('API endpoint'));
|
||||
|
||||
apiTasks.forEach(task => {
|
||||
if (!task.metadata || task.metadata.type !== 'api') {
|
||||
throw new Error('API tasks not properly updated');
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// Test 10: Error handling - no matching tasks
|
||||
await runTest('Error handling - no matches', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'update-tasks',
|
||||
['--tag', 'non-existent-tag', '--set-priority', 'low'],
|
||||
{ cwd: testDir, allowFailure: true }
|
||||
);
|
||||
|
||||
// Should indicate no tasks matched
|
||||
if (!result.stdout.includes('0 tasks') && !result.stdout.includes('No tasks')) {
|
||||
throw new Error('No clear message about zero matches');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 11: Bulk update with dry run
|
||||
await runTest('Dry run mode', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'update-tasks',
|
||||
['--status', 'pending', '--set-priority', 'low', '--dry-run'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should show what would be updated
|
||||
if (!result.stdout.includes('would') || !result.stdout.includes('dry')) {
|
||||
throw new Error('Dry run output not clear');
|
||||
}
|
||||
|
||||
// Verify no actual changes
|
||||
const showResult = await helpers.taskMaster('show', [taskIds[0]], { cwd: testDir });
|
||||
if (showResult.stdout.includes('low') || showResult.stdout.includes('Low')) {
|
||||
throw new Error('Dry run actually modified tasks');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 12: Bulk update with progress reporting
|
||||
await runTest('Progress reporting for large updates', async () => {
|
||||
// Create many tasks
|
||||
const manyTaskIds = [];
|
||||
for (let i = 1; i <= 20; i++) {
|
||||
const result = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--title', `Bulk task ${i}`],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
manyTaskIds.push(helpers.extractTaskId(result.stdout));
|
||||
}
|
||||
|
||||
const result = await helpers.taskMaster(
|
||||
'update-tasks',
|
||||
['--ids', manyTaskIds.join(','), '--set-priority', 'medium', '--verbose'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Should show progress or summary
|
||||
const hasProgress = result.stdout.includes('updated') &&
|
||||
result.stdout.includes('20');
|
||||
if (!hasProgress) {
|
||||
throw new Error('No progress information for bulk update');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 13: Bulk update with rollback on error
|
||||
await runTest('Rollback on error', async () => {
|
||||
// Try to update with invalid data that should fail partway through
|
||||
const result = await helpers.taskMaster(
|
||||
'update-tasks',
|
||||
['--all', '--add-dependency', '99999'],
|
||||
{ cwd: testDir, allowFailure: true }
|
||||
);
|
||||
|
||||
// Should fail and indicate rollback or atomic operation
|
||||
if (result.exitCode === 0) {
|
||||
throw new Error('Should have failed with invalid dependency');
|
||||
}
|
||||
|
||||
// Verify no partial updates occurred
|
||||
const tasksJson = helpers.readJson(`${testDir}/.taskmaster/tasks/tasks.json`);
|
||||
const tasksWithBadDep = tasksJson.tasks.filter(
|
||||
t => t.dependencies && t.dependencies.includes('99999')
|
||||
);
|
||||
|
||||
if (tasksWithBadDep.length > 0) {
|
||||
throw new Error('Partial update occurred - no rollback');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 14: Bulk update with template
|
||||
await runTest('Bulk update with template', async () => {
|
||||
const result = await helpers.taskMaster(
|
||||
'update-tasks',
|
||||
[
|
||||
'--tag', 'backend',
|
||||
'--apply-template', 'standard-backend-task',
|
||||
'--template-fields', 'add testing requirements, add documentation needs'
|
||||
],
|
||||
{ cwd: testDir, timeout: 120000 }
|
||||
);
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
// Check tasks were updated with template
|
||||
const listResult = await helpers.taskMaster('list', ['--tag', 'backend'], { cwd: testDir });
|
||||
const backendTaskIds = (listResult.stdout.match(/\d+(?=\s*\|)/g) || []);
|
||||
|
||||
if (backendTaskIds.length > 0) {
|
||||
const showResult = await helpers.taskMaster('show', [backendTaskIds[0]], { cwd: testDir });
|
||||
const hasTemplateContent = showResult.stdout.toLowerCase().includes('test') ||
|
||||
showResult.stdout.toLowerCase().includes('documentation');
|
||||
|
||||
if (!hasTemplateContent) {
|
||||
throw new Error('Template not applied to tasks');
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Test 15: Performance test - bulk update many tasks
|
||||
await runTest('Performance - update 50 tasks', async () => {
|
||||
// Create 50 tasks
|
||||
const perfTaskIds = [];
|
||||
for (let i = 1; i <= 50; i++) {
|
||||
const result = await helpers.taskMaster(
|
||||
'add-task',
|
||||
['--title', `Performance test task ${i}`],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
perfTaskIds.push(helpers.extractTaskId(result.stdout));
|
||||
}
|
||||
|
||||
const startTime = Date.now();
|
||||
const result = await helpers.taskMaster(
|
||||
'update-tasks',
|
||||
['--ids', perfTaskIds.join(','), '--set-priority', 'low', '--add-metadata', 'perf_test=true'],
|
||||
{ cwd: testDir }
|
||||
);
|
||||
const duration = Date.now() - startTime;
|
||||
|
||||
if (result.exitCode !== 0) {
|
||||
throw new Error(`Command failed: ${result.stderr}`);
|
||||
}
|
||||
|
||||
logger.info(`Updated 50 tasks in ${duration}ms`);
|
||||
if (duration > 10000) {
|
||||
throw new Error(`Bulk update too slow: ${duration}ms`);
|
||||
}
|
||||
|
||||
// Verify all were updated
|
||||
const updateMatch = result.stdout.match(/(\d+) tasks? updated/);
|
||||
if (!updateMatch || parseInt(updateMatch[1]) !== 50) {
|
||||
throw new Error('Not all tasks were updated');
|
||||
}
|
||||
});
|
||||
|
||||
// Calculate summary
|
||||
const totalTests = results.tests.length;
|
||||
const passedTests = results.tests.filter(t => t.status === 'passed').length;
|
||||
const failedTests = results.tests.filter(t => t.status === 'failed').length;
|
||||
|
||||
logger.info('\n=== Update-Tasks Test Summary ===');
|
||||
logger.info(`Total tests: ${totalTests}`);
|
||||
logger.info(`Passed: ${passedTests}`);
|
||||
logger.info(`Failed: ${failedTests}`);
|
||||
|
||||
if (failedTests > 0) {
|
||||
results.status = 'failed';
|
||||
logger.error(`\n${failedTests} tests failed`);
|
||||
} else {
|
||||
logger.success('\n✅ All update-tasks tests passed!');
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
results.status = 'failed';
|
||||
results.errors.push({
|
||||
test: 'update-tasks test suite',
|
||||
error: error.message,
|
||||
stack: error.stack
|
||||
});
|
||||
logger.error(`Update-tasks test suite failed: ${error.message}`);
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
@@ -92,7 +92,7 @@ export class TestLogger {
|
||||
}
|
||||
|
||||
addCost(cost) {
|
||||
if (typeof cost === 'number' && !isNaN(cost)) {
|
||||
if (typeof cost === 'number' && !Number.isNaN(cost)) {
|
||||
this.totalCost += cost;
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user