feat: CLI & MCP progress tracking for parse-prd command (#1048)

* initial cutover

* update log to debug

* update tracker to pass units

* update test to match new base tracker format

* add streamTextService mocks

* remove unused imports

* Ensure the CLI waits for async main() completion

* refactor to reduce code duplication

* update comment

* reuse function

* ensure targetTag is defined in streaming mode

* avoid throwing inside process.exit spy

* check for null

* remove reference to generate

* fix formatting

* fix textStream assignment

* ensure no division by 0

* fix jest chalk mocks

* refactor for maintainability

* Improve bar chart calculation logic for consistent visual representation

* use custom streaming error types; fix mocks

* Update streamText extraction in parse-prd.js to match actual service response

* remove check - doesn't belong here

* update mocks

* remove streaming test that wasn't really doing anything

* add comment

* make parsing logic more DRY

* fix formatting

* Fix textStream extraction to match actual service response

* fix mock

* Add a cleanup method to ensure proper resource disposal and prevent memory leaks

* debounce progress updates to reduce UI flicker during rapid updates

* Implement timeout protection for streaming operations (60-second timeout) with automatic fallback to non-streaming mode.

* clear timeout properly

* Add a maximum buffer size limit (1MB) to prevent unbounded memory growth with very large streaming responses.

* fix formatting

* remove duplicate mock

* better docs

* fix formatting

* sanitize the dynamic property name

* Fix incorrect remaining progress calculation

* Use onError callback instead of console.warn

* Remove unused chalk import

* Add missing custom validator in fallback parsing configuration

* add custom validator parameter in fallback parsing

* chore: fix package-lock.json

* chore: large code refactor

* chore: increase timeout from 1 minute to 3 minutes

* fix: refactor and fix streaming

* Merge remote-tracking branch 'origin/next' into joedanz/parse-prd-progress

* fix: cleanup and fix unit tests

* chore: fix unit tests

* chore: fix format

* chore: run format

* chore: fix weird CI unit test error

* chore: fix format

---------

Co-authored-by: Ralph Khreish <35776126+Crunchyman-ralph@users.noreply.github.com>
This commit is contained in:
Joe Danziger
2025-08-12 16:37:07 -04:00
committed by GitHub
parent fc47714340
commit e3ed4d7c14
39 changed files with 6993 additions and 1137 deletions

View File

@@ -0,0 +1,97 @@
# Task Master Progress Testing Guide
Quick reference for testing streaming/non-streaming functionality with token tracking.
## 🎯 Test Modes
1. **MCP Streaming** - Has `reportProgress` + `mcpLog`, shows emoji indicators (🔴🟠🟢)
2. **CLI Streaming** - No `reportProgress`, shows terminal progress bars
3. **Non-Streaming** - No progress reporting, single response
## 🚀 Quick Commands
```bash
# Test Scripts (accept: mcp-streaming, cli-streaming, non-streaming, both, all)
node test-parse-prd.js [mode]
node test-analyze-complexity.js [mode]
node test-expand.js [mode] [num_subtasks]
node test-expand-all.js [mode] [num_subtasks]
node parse-prd-analysis.js [accuracy|complexity|all]
# CLI Commands
node scripts/dev.js parse-prd test.txt # Local dev (streaming)
node scripts/dev.js analyze-complexity --research
node scripts/dev.js expand --id=1 --force
node scripts/dev.js expand --all --force
task-master [command] # Global CLI (non-streaming)
```
## ✅ Success Indicators
### Indicators
- **Priority**: 🔴🔴🔴 (high), 🟠🟠⚪ (medium), 🟢⚪⚪ (low)
- **Complexity**: ●●● (7-10), ●●○ (4-6), ●○○ (1-3)
### Token Format
`Tokens (I/O): 2,150/1,847 ($0.0423)` (~4 chars per token)
### Progress Bars
```
Single: Generating subtasks... |████████░░| 80% (4/5)
Dual: Expanding 3 tasks | Task 2/3 |████████░░| 66%
Generating 5 subtasks... |██████░░░░| 60%
```
### Fractional Progress
`(completedTasks + currentSubtask/totalSubtasks) / totalTasks`
Example: 33% → 46% → 60% → 66% → 80% → 93% → 100%
## 🐛 Quick Fixes
| Issue | Fix |
|-------|-----|
| No streaming | Check `reportProgress` is passed |
| NaN% progress | Filter duplicate `subtask_progress` events |
| Missing tokens | Check `.env` has API keys |
| Broken bars | Terminal width > 80 |
| projectRoot.split | Use `projectRoot` not `session` |
```bash
# Debug
TASKMASTER_DEBUG=true node test-expand.js
npm run lint
```
## 📊 Benchmarks
- Single task: 10-20s (5 subtasks)
- Expand all: 30-45s (3 tasks)
- Streaming: ~10-20% faster
- Updates: Every 2-5s
## 🔄 Test Workflow
```bash
# Quick check
node test-parse-prd.js both && npm test
# Full suite (before release)
for test in parse-prd analyze-complexity expand expand-all; do
node test-$test.js all
done
node parse-prd-analysis.js all
npm test
```
## 🎯 MCP Tool Example
```javascript
{
"tool": "parse_prd",
"args": {
"input": "prd.txt",
"numTasks": "8",
"force": true,
"projectRoot": "/path/to/project"
}
}

View File

@@ -0,0 +1,334 @@
#!/usr/bin/env node
/**
* parse-prd-analysis.js
*
* Detailed timing and accuracy analysis for parse-prd progress reporting.
* Tests different task generation complexities using the sample PRD from fixtures.
* Validates real-time characteristics and focuses on progress behavior and performance metrics.
* Uses tests/fixtures/sample-prd.txt for consistent testing across all scenarios.
*/
import fs from 'fs';
import path from 'path';
import chalk from 'chalk';
import { fileURLToPath } from 'url';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
import parsePRD from '../../../scripts/modules/task-manager/parse-prd/index.js';
// Use the same project root as the main test file
const PROJECT_ROOT = path.resolve(__dirname, '..', '..', '..');
/**
* Get the path to the sample PRD file
*/
function getSamplePRDPath() {
return path.resolve(PROJECT_ROOT, 'tests', 'fixtures', 'sample-prd.txt');
}
/**
* Detailed Progress Reporter for timing analysis
*/
class DetailedProgressReporter {
constructor() {
this.progressHistory = [];
this.startTime = Date.now();
this.lastProgress = 0;
}
async reportProgress(data) {
const timestamp = Date.now() - this.startTime;
const timeSinceLastProgress =
this.progressHistory.length > 0
? timestamp -
this.progressHistory[this.progressHistory.length - 1].timestamp
: timestamp;
const entry = {
timestamp,
timeSinceLastProgress,
...data
};
this.progressHistory.push(entry);
const percentage = data.total
? Math.round((data.progress / data.total) * 100)
: 0;
console.log(
chalk.blue(`[${timestamp}ms] (+${timeSinceLastProgress}ms)`),
chalk.green(`${percentage}%`),
`(${data.progress}/${data.total})`,
chalk.yellow(data.message)
);
}
getAnalysis() {
if (this.progressHistory.length === 0) return null;
const totalDuration =
this.progressHistory[this.progressHistory.length - 1].timestamp;
const intervals = this.progressHistory
.slice(1)
.map((entry) => entry.timeSinceLastProgress);
const avgInterval =
intervals.length > 0
? intervals.reduce((a, b) => a + b, 0) / intervals.length
: 0;
const minInterval = intervals.length > 0 ? Math.min(...intervals) : 0;
const maxInterval = intervals.length > 0 ? Math.max(...intervals) : 0;
return {
totalReports: this.progressHistory.length,
totalDuration,
avgInterval: Math.round(avgInterval),
minInterval,
maxInterval,
intervals
};
}
printDetailedAnalysis() {
const analysis = this.getAnalysis();
if (!analysis) {
console.log(chalk.red('No progress data to analyze'));
return;
}
console.log(chalk.cyan('\n=== Detailed Progress Analysis ==='));
console.log(`Total Progress Reports: ${analysis.totalReports}`);
console.log(`Total Duration: ${analysis.totalDuration}ms`);
console.log(`Average Interval: ${analysis.avgInterval}ms`);
console.log(`Min Interval: ${analysis.minInterval}ms`);
console.log(`Max Interval: ${analysis.maxInterval}ms`);
console.log(chalk.cyan('\n=== Progress Timeline ==='));
this.progressHistory.forEach((entry, index) => {
const percentage = entry.total
? Math.round((entry.progress / entry.total) * 100)
: 0;
const intervalText =
index > 0 ? ` (+${entry.timeSinceLastProgress}ms)` : '';
console.log(
`${index + 1}. [${entry.timestamp}ms]${intervalText} ${percentage}% - ${entry.message}`
);
});
// Check for real-time characteristics
console.log(chalk.cyan('\n=== Real-time Characteristics ==='));
const hasRealTimeUpdates = analysis.intervals.some(
(interval) => interval < 10000
); // Less than 10s
const hasConsistentUpdates = analysis.intervals.length > 3;
const hasProgressiveUpdates = this.progressHistory.every(
(entry, index) =>
index === 0 ||
entry.progress >= this.progressHistory[index - 1].progress
);
console.log(`✅ Real-time updates: ${hasRealTimeUpdates ? 'YES' : 'NO'}`);
console.log(
`✅ Consistent updates: ${hasConsistentUpdates ? 'YES' : 'NO'}`
);
console.log(
`✅ Progressive updates: ${hasProgressiveUpdates ? 'YES' : 'NO'}`
);
}
}
/**
* Get PRD path for complexity testing
* For complexity testing, we'll use the same sample PRD but request different numbers of tasks
* This provides more realistic testing since the AI will generate different complexity based on task count
*/
function getPRDPathForComplexity(complexity = 'medium') {
// Always use the same sample PRD file - complexity will be controlled by task count
return getSamplePRDPath();
}
/**
* Test streaming with different task generation complexities
* Uses the same sample PRD but requests different numbers of tasks to test complexity scaling
*/
async function testStreamingComplexity() {
console.log(
chalk.cyan(
'🧪 Testing Streaming with Different Task Generation Complexities\n'
)
);
const complexities = ['simple', 'medium', 'complex'];
const results = [];
for (const complexity of complexities) {
console.log(
chalk.yellow(`\n--- Testing ${complexity.toUpperCase()} Complexity ---`)
);
const testPRDPath = getPRDPathForComplexity(complexity);
const testTasksPath = path.join(__dirname, `test-tasks-${complexity}.json`);
// Clean up existing file
if (fs.existsSync(testTasksPath)) {
fs.unlinkSync(testTasksPath);
}
const progressReporter = new DetailedProgressReporter();
const expectedTasks =
complexity === 'simple' ? 3 : complexity === 'medium' ? 6 : 10;
try {
const startTime = Date.now();
await parsePRD(testPRDPath, testTasksPath, expectedTasks, {
force: true,
append: false,
research: false,
reportProgress: progressReporter.reportProgress.bind(progressReporter),
projectRoot: PROJECT_ROOT
});
const endTime = Date.now();
const duration = endTime - startTime;
console.log(
chalk.green(`${complexity} complexity completed in ${duration}ms`)
);
progressReporter.printDetailedAnalysis();
results.push({
complexity,
duration,
analysis: progressReporter.getAnalysis()
});
} catch (error) {
console.error(
chalk.red(`${complexity} complexity failed: ${error.message}`)
);
results.push({
complexity,
error: error.message
});
} finally {
// Clean up (only the tasks file, not the PRD since we're using the fixture)
if (fs.existsSync(testTasksPath)) fs.unlinkSync(testTasksPath);
}
}
// Summary
console.log(chalk.cyan('\n=== Complexity Test Summary ==='));
results.forEach((result) => {
if (result.error) {
console.log(`${result.complexity}: ❌ FAILED - ${result.error}`);
} else {
console.log(
`${result.complexity}: ✅ ${result.duration}ms (${result.analysis.totalReports} reports)`
);
}
});
return results;
}
/**
* Test progress accuracy
*/
async function testProgressAccuracy() {
console.log(chalk.cyan('🧪 Testing Progress Accuracy\n'));
const testPRDPath = getSamplePRDPath();
const testTasksPath = path.join(__dirname, 'test-accuracy-tasks.json');
// Clean up existing file
if (fs.existsSync(testTasksPath)) {
fs.unlinkSync(testTasksPath);
}
const progressReporter = new DetailedProgressReporter();
try {
await parsePRD(testPRDPath, testTasksPath, 8, {
force: true,
append: false,
research: false,
reportProgress: progressReporter.reportProgress.bind(progressReporter),
projectRoot: PROJECT_ROOT
});
console.log(chalk.green('✅ Progress accuracy test completed'));
progressReporter.printDetailedAnalysis();
// Additional accuracy checks
const analysis = progressReporter.getAnalysis();
console.log(chalk.cyan('\n=== Accuracy Metrics ==='));
console.log(
`Progress consistency: ${analysis.intervals.every((i) => i > 0) ? 'PASS' : 'FAIL'}`
);
console.log(
`Reasonable intervals: ${analysis.intervals.every((i) => i < 30000) ? 'PASS' : 'FAIL'}`
);
console.log(
`Expected report count: ${analysis.totalReports >= 8 ? 'PASS' : 'FAIL'}`
);
} catch (error) {
console.error(
chalk.red(`❌ Progress accuracy test failed: ${error.message}`)
);
} finally {
// Clean up (only the tasks file, not the PRD since we're using the fixture)
if (fs.existsSync(testTasksPath)) fs.unlinkSync(testTasksPath);
}
}
/**
* Main test runner
*/
async function main() {
const args = process.argv.slice(2);
const testType = args[0] || 'accuracy';
console.log(chalk.bold.cyan('🚀 Task Master Detailed Progress Tests\n'));
console.log(chalk.blue(`Test type: ${testType}\n`));
try {
switch (testType.toLowerCase()) {
case 'accuracy':
await testProgressAccuracy();
break;
case 'complexity':
await testStreamingComplexity();
break;
case 'all':
console.log(chalk.yellow('Running all detailed tests...\n'));
await testProgressAccuracy();
console.log('\n' + '='.repeat(60) + '\n');
await testStreamingComplexity();
break;
default:
console.log(chalk.red(`Unknown test type: ${testType}`));
console.log(
chalk.yellow('Available options: accuracy, complexity, all')
);
process.exit(1);
}
console.log(chalk.green('\n🎉 Detailed tests completed successfully!'));
} catch (error) {
console.error(chalk.red(`\n❌ Test failed: ${error.message}`));
console.error(chalk.red(error.stack));
process.exit(1);
}
}
// Run if called directly
if (import.meta.url === `file://${process.argv[1]}`) {
// Top-level await is available in ESM; keep compatibility with Node ≥14
await main();
}

View File

@@ -0,0 +1,577 @@
#!/usr/bin/env node
/**
* test-parse-prd.js
*
* Comprehensive integration test for parse-prd functionality.
* Tests MCP streaming, CLI streaming, and non-streaming modes.
* Validates token tracking, message formats, and priority indicators across all contexts.
*/
import fs from 'fs';
import path from 'path';
import chalk from 'chalk';
import { fileURLToPath } from 'url';
// Get current directory
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
// Get project root (three levels up from tests/manual/progress/)
const PROJECT_ROOT = path.resolve(__dirname, '..', '..', '..');
// Import the parse-prd function
import parsePRD from '../../../scripts/modules/task-manager/parse-prd/index.js';
/**
* Mock Progress Reporter for testing
*/
class MockProgressReporter {
constructor(enableDebug = true) {
this.enableDebug = enableDebug;
this.progressHistory = [];
this.startTime = Date.now();
}
async reportProgress(data) {
const timestamp = Date.now() - this.startTime;
const entry = {
timestamp,
...data
};
this.progressHistory.push(entry);
if (this.enableDebug) {
const percentage = data.total
? Math.round((data.progress / data.total) * 100)
: 0;
console.log(
chalk.blue(`[${timestamp}ms]`),
chalk.green(`${percentage}%`),
chalk.yellow(data.message)
);
}
}
getProgressHistory() {
return this.progressHistory;
}
printSummary() {
console.log(chalk.green('\n=== Progress Summary ==='));
console.log(`Total progress reports: ${this.progressHistory.length}`);
console.log(
`Duration: ${this.progressHistory[this.progressHistory.length - 1]?.timestamp || 0}ms`
);
this.progressHistory.forEach((entry, index) => {
const percentage = entry.total
? Math.round((entry.progress / entry.total) * 100)
: 0;
console.log(
`${index + 1}. [${entry.timestamp}ms] ${percentage}% - ${entry.message}`
);
});
// Check for expected message formats
const hasInitialMessage = this.progressHistory.some(
(entry) =>
entry.message.includes('Starting PRD analysis') &&
entry.message.includes('Input:') &&
entry.message.includes('tokens')
);
// Make regex more flexible to handle potential whitespace variations
const hasTaskMessages = this.progressHistory.some((entry) =>
/^[🔴🟠🟢]{3} Task \d+\/\d+ - .+ \| ~Output: \d+ tokens/u.test(
entry.message.trim()
)
);
const hasCompletionMessage = this.progressHistory.some(
(entry) =>
entry.message.includes('✅ Task Generation Completed') &&
entry.message.includes('Tokens (I/O):')
);
console.log(chalk.cyan('\n=== Message Format Validation ==='));
console.log(
`✅ Initial message format: ${hasInitialMessage ? 'PASS' : 'FAIL'}`
);
console.log(`✅ Task message format: ${hasTaskMessages ? 'PASS' : 'FAIL'}`);
console.log(
`✅ Completion message format: ${hasCompletionMessage ? 'PASS' : 'FAIL'}`
);
}
}
/**
* Mock MCP Logger for testing
*/
class MockMCPLogger {
constructor(enableDebug = true) {
this.enableDebug = enableDebug;
this.logs = [];
}
_log(level, ...args) {
const entry = {
level,
timestamp: Date.now(),
message: args.join(' ')
};
this.logs.push(entry);
if (this.enableDebug) {
const color =
{
info: chalk.blue,
warn: chalk.yellow,
error: chalk.red,
debug: chalk.gray,
success: chalk.green
}[level] || chalk.white;
console.log(color(`[${level.toUpperCase()}]`), ...args);
}
}
info(...args) {
this._log('info', ...args);
}
warn(...args) {
this._log('warn', ...args);
}
error(...args) {
this._log('error', ...args);
}
debug(...args) {
this._log('debug', ...args);
}
success(...args) {
this._log('success', ...args);
}
getLogs() {
return this.logs;
}
}
/**
* Get the path to the sample PRD file
*/
function getSamplePRDPath() {
return path.resolve(PROJECT_ROOT, 'tests', 'fixtures', 'sample-prd.txt');
}
/**
* Create a basic test config file
*/
function createTestConfig() {
const testConfig = {
models: {
main: {
provider: 'anthropic',
modelId: 'claude-3-5-sonnet',
maxTokens: 64000,
temperature: 0.2
},
research: {
provider: 'perplexity',
modelId: 'sonar-pro',
maxTokens: 8700,
temperature: 0.1
},
fallback: {
provider: 'anthropic',
modelId: 'claude-3-5-sonnet',
maxTokens: 64000,
temperature: 0.2
}
},
global: {
logLevel: 'info',
debug: false,
defaultSubtasks: 5,
defaultPriority: 'medium',
projectName: 'Task Master Test',
ollamaBaseURL: 'http://localhost:11434/api',
bedrockBaseURL: 'https://bedrock.us-east-1.amazonaws.com'
}
};
const taskmasterDir = path.join(__dirname, '.taskmaster');
const configPath = path.join(taskmasterDir, 'config.json');
// Create .taskmaster directory if it doesn't exist
if (!fs.existsSync(taskmasterDir)) {
fs.mkdirSync(taskmasterDir, { recursive: true });
}
fs.writeFileSync(configPath, JSON.stringify(testConfig, null, 2));
return configPath;
}
/**
* Setup test files and configuration
*/
function setupTestFiles(testName) {
const testPRDPath = getSamplePRDPath();
const testTasksPath = path.join(__dirname, `test-${testName}-tasks.json`);
const configPath = createTestConfig();
// Clean up existing files
if (fs.existsSync(testTasksPath)) {
fs.unlinkSync(testTasksPath);
}
return { testPRDPath, testTasksPath, configPath };
}
/**
* Clean up test files
*/
function cleanupTestFiles(testTasksPath, configPath) {
if (fs.existsSync(testTasksPath)) fs.unlinkSync(testTasksPath);
if (fs.existsSync(configPath)) fs.unlinkSync(configPath);
}
/**
* Run parsePRD with configurable options
*/
async function runParsePRD(testPRDPath, testTasksPath, numTasks, options = {}) {
const startTime = Date.now();
const result = await parsePRD(testPRDPath, testTasksPath, numTasks, {
force: true,
append: false,
research: false,
projectRoot: PROJECT_ROOT,
...options
});
const endTime = Date.now();
const duration = endTime - startTime;
return { result, duration };
}
/**
* Verify task file existence and structure
*/
function verifyTaskResults(testTasksPath) {
if (fs.existsSync(testTasksPath)) {
const tasksData = JSON.parse(fs.readFileSync(testTasksPath, 'utf8'));
console.log(
chalk.green(
`\n✅ Tasks file created with ${tasksData.tasks.length} tasks`
)
);
// Verify task structure
const firstTask = tasksData.tasks[0];
if (firstTask && firstTask.id && firstTask.title && firstTask.description) {
console.log(chalk.green('✅ Task structure is valid'));
return true;
} else {
console.log(chalk.red('❌ Task structure is invalid'));
return false;
}
} else {
console.log(chalk.red('❌ Tasks file was not created'));
return false;
}
}
/**
* Print MCP-specific logs and validation
*/
function printMCPResults(mcpLogger, progressReporter) {
// Print progress summary
progressReporter.printSummary();
// Print MCP logs
console.log(chalk.cyan('\n=== MCP Logs ==='));
const logs = mcpLogger.getLogs();
logs.forEach((log, index) => {
const color =
{
info: chalk.blue,
warn: chalk.yellow,
error: chalk.red,
debug: chalk.gray,
success: chalk.green
}[log.level] || chalk.white;
console.log(
`${index + 1}. ${color(`[${log.level.toUpperCase()}]`)} ${log.message}`
);
});
// Verify MCP-specific message formats (should use emoji indicators)
const hasEmojiIndicators = progressReporter
.getProgressHistory()
.some((entry) => /[🔴🟠🟢]/u.test(entry.message));
console.log(chalk.cyan('\n=== MCP-Specific Validation ==='));
console.log(
`✅ Emoji priority indicators: ${hasEmojiIndicators ? 'PASS' : 'FAIL'}`
);
return { hasEmojiIndicators, logs };
}
/**
* Test MCP streaming with proper MCP context
*/
async function testMCPStreaming(numTasks = 10) {
console.log(chalk.cyan('🧪 Testing MCP Streaming Functionality\n'));
const { testPRDPath, testTasksPath, configPath } = setupTestFiles('mcp');
const progressReporter = new MockProgressReporter(true);
const mcpLogger = new MockMCPLogger(true); // Enable debug for MCP context
try {
console.log(chalk.yellow('Starting MCP streaming test...'));
const { result, duration } = await runParsePRD(
testPRDPath,
testTasksPath,
numTasks,
{
reportProgress: progressReporter.reportProgress.bind(progressReporter),
mcpLog: mcpLogger // Add MCP context - this is the key difference
}
);
console.log(
chalk.green(`\n✅ MCP streaming test completed in ${duration}ms`)
);
const { hasEmojiIndicators, logs } = printMCPResults(
mcpLogger,
progressReporter
);
const isValidStructure = verifyTaskResults(testTasksPath);
return {
success: true,
duration,
progressHistory: progressReporter.getProgressHistory(),
mcpLogs: logs,
hasEmojiIndicators,
result
};
} catch (error) {
console.error(chalk.red(`❌ MCP streaming test failed: ${error.message}`));
return {
success: false,
error: error.message
};
} finally {
cleanupTestFiles(testTasksPath, configPath);
}
}
/**
* Test CLI streaming (no reportProgress)
*/
async function testCLIStreaming(numTasks = 10) {
console.log(chalk.cyan('🧪 Testing CLI Streaming (No Progress Reporter)\n'));
const { testPRDPath, testTasksPath, configPath } = setupTestFiles('cli');
try {
console.log(chalk.yellow('Starting CLI streaming test...'));
// No reportProgress provided; CLI text mode uses the default streaming reporter
const { result, duration } = await runParsePRD(
testPRDPath,
testTasksPath,
numTasks
);
console.log(
chalk.green(`\n✅ CLI streaming test completed in ${duration}ms`)
);
const isValidStructure = verifyTaskResults(testTasksPath);
return {
success: true,
duration,
result
};
} catch (error) {
console.error(chalk.red(`❌ CLI streaming test failed: ${error.message}`));
return {
success: false,
error: error.message
};
} finally {
cleanupTestFiles(testTasksPath, configPath);
}
}
/**
* Test non-streaming functionality
*/
async function testNonStreaming(numTasks = 10) {
console.log(chalk.cyan('🧪 Testing Non-Streaming Functionality\n'));
const { testPRDPath, testTasksPath, configPath } =
setupTestFiles('non-streaming');
try {
console.log(chalk.yellow('Starting non-streaming test...'));
// Force non-streaming by not providing reportProgress
const { result, duration } = await runParsePRD(
testPRDPath,
testTasksPath,
numTasks
);
console.log(
chalk.green(`\n✅ Non-streaming test completed in ${duration}ms`)
);
const isValidStructure = verifyTaskResults(testTasksPath);
return {
success: true,
duration,
result
};
} catch (error) {
console.error(chalk.red(`❌ Non-streaming test failed: ${error.message}`));
return {
success: false,
error: error.message
};
} finally {
cleanupTestFiles(testTasksPath, configPath);
}
}
/**
* Compare results between streaming and non-streaming
*/
function compareResults(streamingResult, nonStreamingResult) {
console.log(chalk.cyan('\n=== Results Comparison ==='));
if (!streamingResult.success || !nonStreamingResult.success) {
console.log(chalk.red('❌ Cannot compare - one or both tests failed'));
return;
}
console.log(`Streaming duration: ${streamingResult.duration}ms`);
console.log(`Non-streaming duration: ${nonStreamingResult.duration}ms`);
const durationDiff = Math.abs(
streamingResult.duration - nonStreamingResult.duration
);
const durationDiffPercent = Math.round(
(durationDiff /
Math.max(streamingResult.duration, nonStreamingResult.duration)) *
100
);
console.log(
`Duration difference: ${durationDiff}ms (${durationDiffPercent}%)`
);
if (streamingResult.progressHistory) {
console.log(
`Streaming progress reports: ${streamingResult.progressHistory.length}`
);
}
console.log(chalk.green('✅ Both methods completed successfully'));
}
/**
* Main test runner
*/
async function main() {
const args = process.argv.slice(2);
const testType = args[0] || 'streaming';
const numTasks = parseInt(args[1]) || 8;
console.log(chalk.bold.cyan('🚀 Task Master PRD Streaming Tests\n'));
console.log(chalk.blue(`Test type: ${testType}`));
console.log(chalk.blue(`Number of tasks: ${numTasks}\n`));
try {
switch (testType.toLowerCase()) {
case 'mcp':
case 'mcp-streaming':
await testMCPStreaming(numTasks);
break;
case 'cli':
case 'cli-streaming':
await testCLIStreaming(numTasks);
break;
case 'non-streaming':
case 'non':
await testNonStreaming(numTasks);
break;
case 'both': {
console.log(
chalk.yellow(
'Running both MCP streaming and non-streaming tests...\n'
)
);
const mcpStreamingResult = await testMCPStreaming(numTasks);
console.log('\n' + '='.repeat(60) + '\n');
const nonStreamingResult = await testNonStreaming(numTasks);
compareResults(mcpStreamingResult, nonStreamingResult);
break;
}
case 'all': {
console.log(chalk.yellow('Running all test types...\n'));
const mcpResult = await testMCPStreaming(numTasks);
console.log('\n' + '='.repeat(60) + '\n');
const cliResult = await testCLIStreaming(numTasks);
console.log('\n' + '='.repeat(60) + '\n');
const nonStreamResult = await testNonStreaming(numTasks);
console.log(chalk.cyan('\n=== All Tests Summary ==='));
console.log(
`MCP Streaming: ${mcpResult.success ? '✅ PASS' : '❌ FAIL'} ${mcpResult.hasEmojiIndicators ? '(✅ Emojis)' : '(❌ No Emojis)'}`
);
console.log(
`CLI Streaming: ${cliResult.success ? '✅ PASS' : '❌ FAIL'}`
);
console.log(
`Non-streaming: ${nonStreamResult.success ? '✅ PASS' : '❌ FAIL'}`
);
break;
}
default:
console.log(chalk.red(`Unknown test type: ${testType}`));
console.log(
chalk.yellow(
'Available options: mcp-streaming, cli-streaming, non-streaming, both, all'
)
);
process.exit(1);
}
console.log(chalk.green('\n🎉 Tests completed successfully!'));
} catch (error) {
console.error(chalk.red(`\n❌ Test failed: ${error.message}`));
console.error(chalk.red(error.stack));
process.exit(1);
}
}
// Run if called directly
if (import.meta.url === `file://${process.argv[1]}`) {
main();
}