feat: CLI & MCP progress tracking for parse-prd command (#1048)
* initial cutover * update log to debug * update tracker to pass units * update test to match new base tracker format * add streamTextService mocks * remove unused imports * Ensure the CLI waits for async main() completion * refactor to reduce code duplication * update comment * reuse function * ensure targetTag is defined in streaming mode * avoid throwing inside process.exit spy * check for null * remove reference to generate * fix formatting * fix textStream assignment * ensure no division by 0 * fix jest chalk mocks * refactor for maintainability * Improve bar chart calculation logic for consistent visual representation * use custom streaming error types; fix mocks * Update streamText extraction in parse-prd.js to match actual service response * remove check - doesn't belong here * update mocks * remove streaming test that wasn't really doing anything * add comment * make parsing logic more DRY * fix formatting * Fix textStream extraction to match actual service response * fix mock * Add a cleanup method to ensure proper resource disposal and prevent memory leaks * debounce progress updates to reduce UI flicker during rapid updates * Implement timeout protection for streaming operations (60-second timeout) with automatic fallback to non-streaming mode. * clear timeout properly * Add a maximum buffer size limit (1MB) to prevent unbounded memory growth with very large streaming responses. * fix formatting * remove duplicate mock * better docs * fix formatting * sanitize the dynamic property name * Fix incorrect remaining progress calculation * Use onError callback instead of console.warn * Remove unused chalk import * Add missing custom validator in fallback parsing configuration * add custom validator parameter in fallback parsing * chore: fix package-lock.json * chore: large code refactor * chore: increase timeout from 1 minute to 3 minutes * fix: refactor and fix streaming * Merge remote-tracking branch 'origin/next' into joedanz/parse-prd-progress * fix: cleanup and fix unit tests * chore: fix unit tests * chore: fix format * chore: run format * chore: fix weird CI unit test error * chore: fix format --------- Co-authored-by: Ralph Khreish <35776126+Crunchyman-ralph@users.noreply.github.com>
This commit is contained in:
@@ -1,395 +0,0 @@
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import chalk from 'chalk';
|
||||
import boxen from 'boxen';
|
||||
import { z } from 'zod';
|
||||
|
||||
import {
|
||||
log,
|
||||
writeJSON,
|
||||
enableSilentMode,
|
||||
disableSilentMode,
|
||||
isSilentMode,
|
||||
readJSON,
|
||||
findTaskById,
|
||||
ensureTagMetadata,
|
||||
getCurrentTag
|
||||
} from '../utils.js';
|
||||
|
||||
import { generateObjectService } from '../ai-services-unified.js';
|
||||
import {
|
||||
getDebugFlag,
|
||||
getMainProvider,
|
||||
getResearchProvider,
|
||||
getDefaultPriority
|
||||
} from '../config-manager.js';
|
||||
import { getPromptManager } from '../prompt-manager.js';
|
||||
import { displayAiUsageSummary } from '../ui.js';
|
||||
import { CUSTOM_PROVIDERS } from '../../../src/constants/providers.js';
|
||||
|
||||
// Define the Zod schema for a SINGLE task object
|
||||
const prdSingleTaskSchema = z.object({
|
||||
id: z.number(),
|
||||
title: z.string().min(1),
|
||||
description: z.string().min(1),
|
||||
details: z.string(),
|
||||
testStrategy: z.string(),
|
||||
priority: z.enum(['high', 'medium', 'low']),
|
||||
dependencies: z.array(z.number()),
|
||||
status: z.string()
|
||||
});
|
||||
|
||||
// Define the Zod schema for the ENTIRE expected AI response object
|
||||
const prdResponseSchema = z.object({
|
||||
tasks: z.array(prdSingleTaskSchema),
|
||||
metadata: z.object({
|
||||
projectName: z.string(),
|
||||
totalTasks: z.number(),
|
||||
sourceFile: z.string(),
|
||||
generatedAt: z.string()
|
||||
})
|
||||
});
|
||||
|
||||
/**
|
||||
* Parse a PRD file and generate tasks
|
||||
* @param {string} prdPath - Path to the PRD file
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
* @param {number} numTasks - Number of tasks to generate
|
||||
* @param {Object} options - Additional options
|
||||
* @param {boolean} [options.force=false] - Whether to overwrite existing tasks.json.
|
||||
* @param {boolean} [options.append=false] - Append to existing tasks file.
|
||||
* @param {boolean} [options.research=false] - Use research model for enhanced PRD analysis.
|
||||
* @param {Object} [options.reportProgress] - Function to report progress (optional, likely unused).
|
||||
* @param {Object} [options.mcpLog] - MCP logger object (optional).
|
||||
* @param {Object} [options.session] - Session object from MCP server (optional).
|
||||
* @param {string} [options.projectRoot] - Project root path (for MCP/env fallback).
|
||||
* @param {string} [options.tag] - Target tag for task generation.
|
||||
* @param {string} [outputFormat='text'] - Output format ('text' or 'json').
|
||||
*/
|
||||
async function parsePRD(prdPath, tasksPath, numTasks, options = {}) {
|
||||
const {
|
||||
reportProgress,
|
||||
mcpLog,
|
||||
session,
|
||||
projectRoot,
|
||||
force = false,
|
||||
append = false,
|
||||
research = false,
|
||||
tag
|
||||
} = options;
|
||||
const isMCP = !!mcpLog;
|
||||
const outputFormat = isMCP ? 'json' : 'text';
|
||||
|
||||
// Use the provided tag, or the current active tag, or default to 'master'
|
||||
const targetTag = tag;
|
||||
|
||||
const logFn = mcpLog
|
||||
? mcpLog
|
||||
: {
|
||||
// Wrapper for CLI
|
||||
info: (...args) => log('info', ...args),
|
||||
warn: (...args) => log('warn', ...args),
|
||||
error: (...args) => log('error', ...args),
|
||||
debug: (...args) => log('debug', ...args),
|
||||
success: (...args) => log('success', ...args)
|
||||
};
|
||||
|
||||
// Create custom reporter using logFn
|
||||
const report = (message, level = 'info') => {
|
||||
// Check logFn directly
|
||||
if (logFn && typeof logFn[level] === 'function') {
|
||||
logFn[level](message);
|
||||
} else if (!isSilentMode() && outputFormat === 'text') {
|
||||
// Fallback to original log only if necessary and in CLI text mode
|
||||
log(level, message);
|
||||
}
|
||||
};
|
||||
|
||||
report(
|
||||
`Parsing PRD file: ${prdPath}, Force: ${force}, Append: ${append}, Research: ${research}`
|
||||
);
|
||||
|
||||
let existingTasks = [];
|
||||
let nextId = 1;
|
||||
let aiServiceResponse = null;
|
||||
|
||||
try {
|
||||
// Check if there are existing tasks in the target tag
|
||||
let hasExistingTasksInTag = false;
|
||||
if (fs.existsSync(tasksPath)) {
|
||||
try {
|
||||
// Read the entire file to check if the tag exists
|
||||
const existingFileContent = fs.readFileSync(tasksPath, 'utf8');
|
||||
const allData = JSON.parse(existingFileContent);
|
||||
|
||||
// Check if the target tag exists and has tasks
|
||||
if (
|
||||
allData[targetTag] &&
|
||||
Array.isArray(allData[targetTag].tasks) &&
|
||||
allData[targetTag].tasks.length > 0
|
||||
) {
|
||||
hasExistingTasksInTag = true;
|
||||
existingTasks = allData[targetTag].tasks;
|
||||
nextId = Math.max(...existingTasks.map((t) => t.id || 0)) + 1;
|
||||
}
|
||||
} catch (error) {
|
||||
// If we can't read the file or parse it, assume no existing tasks in this tag
|
||||
hasExistingTasksInTag = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Handle file existence and overwrite/append logic based on target tag
|
||||
if (hasExistingTasksInTag) {
|
||||
if (append) {
|
||||
report(
|
||||
`Append mode enabled. Found ${existingTasks.length} existing tasks in tag '${targetTag}'. Next ID will be ${nextId}.`,
|
||||
'info'
|
||||
);
|
||||
} else if (!force) {
|
||||
// Not appending and not forcing overwrite, and there are existing tasks in the target tag
|
||||
const overwriteError = new Error(
|
||||
`Tag '${targetTag}' already contains ${existingTasks.length} tasks. Use --force to overwrite or --append to add to existing tasks.`
|
||||
);
|
||||
report(overwriteError.message, 'error');
|
||||
if (outputFormat === 'text') {
|
||||
console.error(chalk.red(overwriteError.message));
|
||||
}
|
||||
throw overwriteError;
|
||||
} else {
|
||||
// Force overwrite is true
|
||||
report(
|
||||
`Force flag enabled. Overwriting existing tasks in tag '${targetTag}'.`,
|
||||
'info'
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// No existing tasks in target tag, proceed without confirmation
|
||||
report(
|
||||
`Tag '${targetTag}' is empty or doesn't exist. Creating/updating tag with new tasks.`,
|
||||
'info'
|
||||
);
|
||||
}
|
||||
|
||||
report(`Reading PRD content from ${prdPath}`, 'info');
|
||||
const prdContent = fs.readFileSync(prdPath, 'utf8');
|
||||
if (!prdContent) {
|
||||
throw new Error(`Input file ${prdPath} is empty or could not be read.`);
|
||||
}
|
||||
|
||||
// Load prompts using PromptManager
|
||||
const promptManager = getPromptManager();
|
||||
|
||||
// Get defaultTaskPriority from config
|
||||
const defaultTaskPriority = getDefaultPriority(projectRoot) || 'medium';
|
||||
|
||||
// Check if Claude Code is being used as the provider
|
||||
const currentProvider = research
|
||||
? getResearchProvider(projectRoot)
|
||||
: getMainProvider(projectRoot);
|
||||
const isClaudeCode = currentProvider === CUSTOM_PROVIDERS.CLAUDE_CODE;
|
||||
|
||||
const { systemPrompt, userPrompt } = await promptManager.loadPrompt(
|
||||
'parse-prd',
|
||||
{
|
||||
research,
|
||||
numTasks,
|
||||
nextId,
|
||||
prdContent,
|
||||
prdPath,
|
||||
defaultTaskPriority,
|
||||
isClaudeCode,
|
||||
projectRoot: projectRoot || ''
|
||||
}
|
||||
);
|
||||
|
||||
// Call the unified AI service
|
||||
report(
|
||||
`Calling AI service to generate tasks from PRD${research ? ' with research-backed analysis' : ''}...`,
|
||||
'info'
|
||||
);
|
||||
|
||||
// Call generateObjectService with the CORRECT schema and additional telemetry params
|
||||
aiServiceResponse = await generateObjectService({
|
||||
role: research ? 'research' : 'main', // Use research role if flag is set
|
||||
session: session,
|
||||
projectRoot: projectRoot,
|
||||
schema: prdResponseSchema,
|
||||
objectName: 'tasks_data',
|
||||
systemPrompt: systemPrompt,
|
||||
prompt: userPrompt,
|
||||
commandName: 'parse-prd',
|
||||
outputType: isMCP ? 'mcp' : 'cli'
|
||||
});
|
||||
|
||||
// Create the directory if it doesn't exist
|
||||
const tasksDir = path.dirname(tasksPath);
|
||||
if (!fs.existsSync(tasksDir)) {
|
||||
fs.mkdirSync(tasksDir, { recursive: true });
|
||||
}
|
||||
logFn.success(
|
||||
`Successfully parsed PRD via AI service${research ? ' with research-backed analysis' : ''}.`
|
||||
);
|
||||
|
||||
// Validate and Process Tasks
|
||||
// const generatedData = aiServiceResponse?.mainResult?.object;
|
||||
|
||||
// Robustly get the actual AI-generated object
|
||||
let generatedData = null;
|
||||
if (aiServiceResponse?.mainResult) {
|
||||
if (
|
||||
typeof aiServiceResponse.mainResult === 'object' &&
|
||||
aiServiceResponse.mainResult !== null &&
|
||||
'tasks' in aiServiceResponse.mainResult
|
||||
) {
|
||||
// If mainResult itself is the object with a 'tasks' property
|
||||
generatedData = aiServiceResponse.mainResult;
|
||||
} else if (
|
||||
typeof aiServiceResponse.mainResult.object === 'object' &&
|
||||
aiServiceResponse.mainResult.object !== null &&
|
||||
'tasks' in aiServiceResponse.mainResult.object
|
||||
) {
|
||||
// If mainResult.object is the object with a 'tasks' property
|
||||
generatedData = aiServiceResponse.mainResult.object;
|
||||
}
|
||||
}
|
||||
|
||||
if (!generatedData || !Array.isArray(generatedData.tasks)) {
|
||||
logFn.error(
|
||||
`Internal Error: generateObjectService returned unexpected data structure: ${JSON.stringify(generatedData)}`
|
||||
);
|
||||
throw new Error(
|
||||
'AI service returned unexpected data structure after validation.'
|
||||
);
|
||||
}
|
||||
|
||||
let currentId = nextId;
|
||||
const taskMap = new Map();
|
||||
const processedNewTasks = generatedData.tasks.map((task) => {
|
||||
const newId = currentId++;
|
||||
taskMap.set(task.id, newId);
|
||||
return {
|
||||
...task,
|
||||
id: newId,
|
||||
status: task.status || 'pending',
|
||||
priority: task.priority || 'medium',
|
||||
dependencies: Array.isArray(task.dependencies) ? task.dependencies : [],
|
||||
subtasks: [],
|
||||
// Ensure all required fields have values (even if empty strings)
|
||||
title: task.title || '',
|
||||
description: task.description || '',
|
||||
details: task.details || '',
|
||||
testStrategy: task.testStrategy || ''
|
||||
};
|
||||
});
|
||||
|
||||
// Remap dependencies for the NEWLY processed tasks
|
||||
processedNewTasks.forEach((task) => {
|
||||
task.dependencies = task.dependencies
|
||||
.map((depId) => taskMap.get(depId)) // Map old AI ID to new sequential ID
|
||||
.filter(
|
||||
(newDepId) =>
|
||||
newDepId != null && // Must exist
|
||||
newDepId < task.id && // Must be a lower ID (could be existing or newly generated)
|
||||
(findTaskById(existingTasks, newDepId) || // Check if it exists in old tasks OR
|
||||
processedNewTasks.some((t) => t.id === newDepId)) // check if it exists in new tasks
|
||||
);
|
||||
});
|
||||
|
||||
const finalTasks = append
|
||||
? [...existingTasks, ...processedNewTasks]
|
||||
: processedNewTasks;
|
||||
|
||||
// Read the existing file to preserve other tags
|
||||
let outputData = {};
|
||||
if (fs.existsSync(tasksPath)) {
|
||||
try {
|
||||
const existingFileContent = fs.readFileSync(tasksPath, 'utf8');
|
||||
outputData = JSON.parse(existingFileContent);
|
||||
} catch (error) {
|
||||
// If we can't read the existing file, start with empty object
|
||||
outputData = {};
|
||||
}
|
||||
}
|
||||
|
||||
// Update only the target tag, preserving other tags
|
||||
outputData[targetTag] = {
|
||||
tasks: finalTasks,
|
||||
metadata: {
|
||||
created:
|
||||
outputData[targetTag]?.metadata?.created || new Date().toISOString(),
|
||||
updated: new Date().toISOString(),
|
||||
description: `Tasks for ${targetTag} context`
|
||||
}
|
||||
};
|
||||
|
||||
// Ensure the target tag has proper metadata
|
||||
ensureTagMetadata(outputData[targetTag], {
|
||||
description: `Tasks for ${targetTag} context`
|
||||
});
|
||||
|
||||
// Write the complete data structure back to the file
|
||||
fs.writeFileSync(tasksPath, JSON.stringify(outputData, null, 2));
|
||||
report(
|
||||
`Successfully ${append ? 'appended' : 'generated'} ${processedNewTasks.length} tasks in ${tasksPath}${research ? ' with research-backed analysis' : ''}`,
|
||||
'success'
|
||||
);
|
||||
|
||||
// Generate markdown task files after writing tasks.json
|
||||
// await generateTaskFiles(tasksPath, path.dirname(tasksPath), { mcpLog });
|
||||
|
||||
// Handle CLI output (e.g., success message)
|
||||
if (outputFormat === 'text') {
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.green(
|
||||
`Successfully generated ${processedNewTasks.length} new tasks${research ? ' with research-backed analysis' : ''}. Total tasks in ${tasksPath}: ${finalTasks.length}`
|
||||
),
|
||||
{ padding: 1, borderColor: 'green', borderStyle: 'round' }
|
||||
)
|
||||
);
|
||||
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.white.bold('Next Steps:') +
|
||||
'\n\n' +
|
||||
`${chalk.cyan('1.')} Run ${chalk.yellow('task-master list')} to view all tasks\n` +
|
||||
`${chalk.cyan('2.')} Run ${chalk.yellow('task-master expand --id=<id>')} to break down a task into subtasks`,
|
||||
{
|
||||
padding: 1,
|
||||
borderColor: 'cyan',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1 }
|
||||
}
|
||||
)
|
||||
);
|
||||
|
||||
if (aiServiceResponse && aiServiceResponse.telemetryData) {
|
||||
displayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');
|
||||
}
|
||||
}
|
||||
|
||||
// Return telemetry data
|
||||
return {
|
||||
success: true,
|
||||
tasksPath,
|
||||
telemetryData: aiServiceResponse?.telemetryData,
|
||||
tagInfo: aiServiceResponse?.tagInfo
|
||||
};
|
||||
} catch (error) {
|
||||
report(`Error parsing PRD: ${error.message}`, 'error');
|
||||
|
||||
// Only show error UI for text output (CLI)
|
||||
if (outputFormat === 'text') {
|
||||
console.error(chalk.red(`Error: ${error.message}`));
|
||||
|
||||
if (getDebugFlag(projectRoot)) {
|
||||
// Use projectRoot for debug flag check
|
||||
console.error(error);
|
||||
}
|
||||
}
|
||||
|
||||
throw error; // Always re-throw for proper error handling
|
||||
}
|
||||
}
|
||||
|
||||
export default parsePRD;
|
||||
3
scripts/modules/task-manager/parse-prd/index.js
Normal file
3
scripts/modules/task-manager/parse-prd/index.js
Normal file
@@ -0,0 +1,3 @@
|
||||
// Main entry point for parse-prd module
|
||||
export { default } from './parse-prd.js';
|
||||
export { default as parsePRD } from './parse-prd.js';
|
||||
105
scripts/modules/task-manager/parse-prd/parse-prd-config.js
Normal file
105
scripts/modules/task-manager/parse-prd/parse-prd-config.js
Normal file
@@ -0,0 +1,105 @@
|
||||
/**
|
||||
* Configuration classes and schemas for PRD parsing
|
||||
*/
|
||||
|
||||
import { z } from 'zod';
|
||||
import { TASK_PRIORITY_OPTIONS } from '../../../../src/constants/task-priority.js';
|
||||
import { getCurrentTag, isSilentMode, log } from '../../utils.js';
|
||||
import { Duration } from '../../../../src/utils/timeout-manager.js';
|
||||
import { CUSTOM_PROVIDERS } from '../../../../src/constants/providers.js';
|
||||
import { getMainProvider, getResearchProvider } from '../../config-manager.js';
|
||||
|
||||
// ============================================================================
|
||||
// SCHEMAS
|
||||
// ============================================================================
|
||||
|
||||
// Define the Zod schema for a SINGLE task object
|
||||
export const prdSingleTaskSchema = z.object({
|
||||
id: z.number(),
|
||||
title: z.string().min(1),
|
||||
description: z.string().min(1),
|
||||
details: z.string(),
|
||||
testStrategy: z.string(),
|
||||
priority: z.enum(TASK_PRIORITY_OPTIONS),
|
||||
dependencies: z.array(z.number()),
|
||||
status: z.string()
|
||||
});
|
||||
|
||||
// Define the Zod schema for the ENTIRE expected AI response object
|
||||
export const prdResponseSchema = z.object({
|
||||
tasks: z.array(prdSingleTaskSchema),
|
||||
metadata: z.object({
|
||||
projectName: z.string(),
|
||||
totalTasks: z.number(),
|
||||
sourceFile: z.string(),
|
||||
generatedAt: z.string()
|
||||
})
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// CONFIGURATION CLASSES
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Configuration object for PRD parsing
|
||||
*/
|
||||
export class PrdParseConfig {
|
||||
constructor(prdPath, tasksPath, numTasks, options = {}) {
|
||||
this.prdPath = prdPath;
|
||||
this.tasksPath = tasksPath;
|
||||
this.numTasks = numTasks;
|
||||
this.force = options.force || false;
|
||||
this.append = options.append || false;
|
||||
this.research = options.research || false;
|
||||
this.reportProgress = options.reportProgress;
|
||||
this.mcpLog = options.mcpLog;
|
||||
this.session = options.session;
|
||||
this.projectRoot = options.projectRoot;
|
||||
this.tag = options.tag;
|
||||
this.streamingTimeout =
|
||||
options.streamingTimeout || Duration.seconds(180).milliseconds;
|
||||
|
||||
// Derived values
|
||||
this.targetTag = this.tag || getCurrentTag(this.projectRoot) || 'master';
|
||||
this.isMCP = !!this.mcpLog;
|
||||
this.outputFormat = this.isMCP && !this.reportProgress ? 'json' : 'text';
|
||||
this.useStreaming =
|
||||
typeof this.reportProgress === 'function' || this.outputFormat === 'text';
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if Claude Code is being used
|
||||
*/
|
||||
isClaudeCode() {
|
||||
const currentProvider = this.research
|
||||
? getResearchProvider(this.projectRoot)
|
||||
: getMainProvider(this.projectRoot);
|
||||
return currentProvider === CUSTOM_PROVIDERS.CLAUDE_CODE;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Logging configuration and utilities
|
||||
*/
|
||||
export class LoggingConfig {
|
||||
constructor(mcpLog, reportProgress) {
|
||||
this.isMCP = !!mcpLog;
|
||||
this.outputFormat = this.isMCP && !reportProgress ? 'json' : 'text';
|
||||
|
||||
this.logFn = mcpLog || {
|
||||
info: (...args) => log('info', ...args),
|
||||
warn: (...args) => log('warn', ...args),
|
||||
error: (...args) => log('error', ...args),
|
||||
debug: (...args) => log('debug', ...args),
|
||||
success: (...args) => log('success', ...args)
|
||||
};
|
||||
}
|
||||
|
||||
report(message, level = 'info') {
|
||||
if (this.logFn && typeof this.logFn[level] === 'function') {
|
||||
this.logFn[level](message);
|
||||
} else if (!isSilentMode() && this.outputFormat === 'text') {
|
||||
log(level, message);
|
||||
}
|
||||
}
|
||||
}
|
||||
384
scripts/modules/task-manager/parse-prd/parse-prd-helpers.js
Normal file
384
scripts/modules/task-manager/parse-prd/parse-prd-helpers.js
Normal file
@@ -0,0 +1,384 @@
|
||||
/**
|
||||
* Helper functions for PRD parsing
|
||||
*/
|
||||
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import boxen from 'boxen';
|
||||
import chalk from 'chalk';
|
||||
import { ensureTagMetadata, findTaskById } from '../../utils.js';
|
||||
import { getPriorityIndicators } from '../../../../src/ui/indicators.js';
|
||||
import { displayParsePrdSummary } from '../../../../src/ui/parse-prd.js';
|
||||
import { TimeoutManager } from '../../../../src/utils/timeout-manager.js';
|
||||
import { displayAiUsageSummary } from '../../ui.js';
|
||||
import { getPromptManager } from '../../prompt-manager.js';
|
||||
import { getDefaultPriority } from '../../config-manager.js';
|
||||
|
||||
/**
|
||||
* Estimate token count from text
|
||||
* @param {string} text - Text to estimate tokens for
|
||||
* @returns {number} Estimated token count
|
||||
*/
|
||||
export function estimateTokens(text) {
|
||||
// Common approximation: ~4 characters per token for English
|
||||
return Math.ceil(text.length / 4);
|
||||
}
|
||||
|
||||
/**
|
||||
* Read and validate PRD content
|
||||
* @param {string} prdPath - Path to PRD file
|
||||
* @returns {string} PRD content
|
||||
* @throws {Error} If file is empty or cannot be read
|
||||
*/
|
||||
export function readPrdContent(prdPath) {
|
||||
const prdContent = fs.readFileSync(prdPath, 'utf8');
|
||||
if (!prdContent) {
|
||||
throw new Error(`Input file ${prdPath} is empty or could not be read.`);
|
||||
}
|
||||
return prdContent;
|
||||
}
|
||||
|
||||
/**
|
||||
* Load existing tasks from file
|
||||
* @param {string} tasksPath - Path to tasks file
|
||||
* @param {string} targetTag - Target tag to load from
|
||||
* @returns {{tasks: Array, nextId: number}} Existing tasks and next ID
|
||||
*/
|
||||
export function loadExistingTasks(tasksPath, targetTag) {
|
||||
let existingTasks = [];
|
||||
let nextId = 1;
|
||||
|
||||
if (!fs.existsSync(tasksPath)) {
|
||||
return { existingTasks, nextId };
|
||||
}
|
||||
|
||||
try {
|
||||
const existingFileContent = fs.readFileSync(tasksPath, 'utf8');
|
||||
const allData = JSON.parse(existingFileContent);
|
||||
|
||||
if (allData[targetTag]?.tasks && Array.isArray(allData[targetTag].tasks)) {
|
||||
existingTasks = allData[targetTag].tasks;
|
||||
if (existingTasks.length > 0) {
|
||||
nextId = Math.max(...existingTasks.map((t) => t.id || 0)) + 1;
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
// If we can't read the file or parse it, assume no existing tasks
|
||||
return { existingTasks: [], nextId: 1 };
|
||||
}
|
||||
|
||||
return { existingTasks, nextId };
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate overwrite/append operations
|
||||
* @param {Object} params
|
||||
* @returns {void}
|
||||
* @throws {Error} If validation fails
|
||||
*/
|
||||
export function validateFileOperations({
|
||||
existingTasks,
|
||||
targetTag,
|
||||
append,
|
||||
force,
|
||||
isMCP,
|
||||
logger
|
||||
}) {
|
||||
const hasExistingTasks = existingTasks.length > 0;
|
||||
|
||||
if (!hasExistingTasks) {
|
||||
logger.report(
|
||||
`Tag '${targetTag}' is empty or doesn't exist. Creating/updating tag with new tasks.`,
|
||||
'info'
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
if (append) {
|
||||
logger.report(
|
||||
`Append mode enabled. Found ${existingTasks.length} existing tasks in tag '${targetTag}'.`,
|
||||
'info'
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!force) {
|
||||
const errorMessage = `Tag '${targetTag}' already contains ${existingTasks.length} tasks. Use --force to overwrite or --append to add to existing tasks.`;
|
||||
logger.report(errorMessage, 'error');
|
||||
|
||||
if (isMCP) {
|
||||
throw new Error(errorMessage);
|
||||
} else {
|
||||
console.error(chalk.red(errorMessage));
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
logger.report(
|
||||
`Force flag enabled. Overwriting existing tasks in tag '${targetTag}'.`,
|
||||
'debug'
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Process and transform tasks with ID remapping
|
||||
* @param {Array} rawTasks - Raw tasks from AI
|
||||
* @param {number} startId - Starting ID for new tasks
|
||||
* @param {Array} existingTasks - Existing tasks for dependency validation
|
||||
* @param {string} defaultPriority - Default priority for tasks
|
||||
* @returns {Array} Processed tasks with remapped IDs
|
||||
*/
|
||||
export function processTasks(
|
||||
rawTasks,
|
||||
startId,
|
||||
existingTasks,
|
||||
defaultPriority
|
||||
) {
|
||||
let currentId = startId;
|
||||
const taskMap = new Map();
|
||||
|
||||
// First pass: assign new IDs and create mapping
|
||||
const processedTasks = rawTasks.map((task) => {
|
||||
const newId = currentId++;
|
||||
taskMap.set(task.id, newId);
|
||||
|
||||
return {
|
||||
...task,
|
||||
id: newId,
|
||||
status: task.status || 'pending',
|
||||
priority: task.priority || defaultPriority,
|
||||
dependencies: Array.isArray(task.dependencies) ? task.dependencies : [],
|
||||
subtasks: task.subtasks || [],
|
||||
// Ensure all required fields have values
|
||||
title: task.title || '',
|
||||
description: task.description || '',
|
||||
details: task.details || '',
|
||||
testStrategy: task.testStrategy || ''
|
||||
};
|
||||
});
|
||||
|
||||
// Second pass: remap dependencies
|
||||
processedTasks.forEach((task) => {
|
||||
task.dependencies = task.dependencies
|
||||
.map((depId) => taskMap.get(depId))
|
||||
.filter(
|
||||
(newDepId) =>
|
||||
newDepId != null &&
|
||||
newDepId < task.id &&
|
||||
(findTaskById(existingTasks, newDepId) ||
|
||||
processedTasks.some((t) => t.id === newDepId))
|
||||
);
|
||||
});
|
||||
|
||||
return processedTasks;
|
||||
}
|
||||
|
||||
/**
|
||||
* Save tasks to file with tag support
|
||||
* @param {string} tasksPath - Path to save tasks
|
||||
* @param {Array} tasks - Tasks to save
|
||||
* @param {string} targetTag - Target tag
|
||||
* @param {Object} logger - Logger instance
|
||||
*/
|
||||
export function saveTasksToFile(tasksPath, tasks, targetTag, logger) {
|
||||
// Create directory if it doesn't exist
|
||||
const tasksDir = path.dirname(tasksPath);
|
||||
if (!fs.existsSync(tasksDir)) {
|
||||
fs.mkdirSync(tasksDir, { recursive: true });
|
||||
}
|
||||
|
||||
// Read existing file to preserve other tags
|
||||
let outputData = {};
|
||||
if (fs.existsSync(tasksPath)) {
|
||||
try {
|
||||
const existingFileContent = fs.readFileSync(tasksPath, 'utf8');
|
||||
outputData = JSON.parse(existingFileContent);
|
||||
} catch (error) {
|
||||
outputData = {};
|
||||
}
|
||||
}
|
||||
|
||||
// Update only the target tag
|
||||
outputData[targetTag] = {
|
||||
tasks: tasks,
|
||||
metadata: {
|
||||
created:
|
||||
outputData[targetTag]?.metadata?.created || new Date().toISOString(),
|
||||
updated: new Date().toISOString(),
|
||||
description: `Tasks for ${targetTag} context`
|
||||
}
|
||||
};
|
||||
|
||||
// Ensure proper metadata
|
||||
ensureTagMetadata(outputData[targetTag], {
|
||||
description: `Tasks for ${targetTag} context`
|
||||
});
|
||||
|
||||
// Write back to file
|
||||
fs.writeFileSync(tasksPath, JSON.stringify(outputData, null, 2));
|
||||
|
||||
logger.report(
|
||||
`Successfully saved ${tasks.length} tasks to ${tasksPath}`,
|
||||
'debug'
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Build prompts for AI service
|
||||
* @param {Object} config - Configuration object
|
||||
* @param {string} prdContent - PRD content
|
||||
* @param {number} nextId - Next task ID
|
||||
* @returns {Promise<{systemPrompt: string, userPrompt: string}>}
|
||||
*/
|
||||
export async function buildPrompts(config, prdContent, nextId) {
|
||||
const promptManager = getPromptManager();
|
||||
const defaultTaskPriority =
|
||||
getDefaultPriority(config.projectRoot) || 'medium';
|
||||
|
||||
return promptManager.loadPrompt('parse-prd', {
|
||||
research: config.research,
|
||||
numTasks: config.numTasks,
|
||||
nextId,
|
||||
prdContent,
|
||||
prdPath: config.prdPath,
|
||||
defaultTaskPriority,
|
||||
isClaudeCode: config.isClaudeCode(),
|
||||
projectRoot: config.projectRoot || ''
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle progress reporting for both CLI and MCP
|
||||
* @param {Object} params
|
||||
*/
|
||||
export async function reportTaskProgress({
|
||||
task,
|
||||
currentCount,
|
||||
totalTasks,
|
||||
estimatedTokens,
|
||||
progressTracker,
|
||||
reportProgress,
|
||||
priorityMap,
|
||||
defaultPriority,
|
||||
estimatedInputTokens
|
||||
}) {
|
||||
const priority = task.priority || defaultPriority;
|
||||
const priorityIndicator = priorityMap[priority] || priorityMap.medium;
|
||||
|
||||
// CLI progress tracker
|
||||
if (progressTracker) {
|
||||
progressTracker.addTaskLine(currentCount, task.title, priority);
|
||||
if (estimatedTokens) {
|
||||
progressTracker.updateTokens(estimatedInputTokens, estimatedTokens);
|
||||
}
|
||||
}
|
||||
|
||||
// MCP progress reporting
|
||||
if (reportProgress) {
|
||||
try {
|
||||
const outputTokens = estimatedTokens
|
||||
? Math.floor(estimatedTokens / totalTasks)
|
||||
: 0;
|
||||
|
||||
await reportProgress({
|
||||
progress: currentCount,
|
||||
total: totalTasks,
|
||||
message: `${priorityIndicator} Task ${currentCount}/${totalTasks} - ${task.title} | ~Output: ${outputTokens} tokens`
|
||||
});
|
||||
} catch (error) {
|
||||
// Ignore progress reporting errors
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Display completion summary for CLI
|
||||
* @param {Object} params
|
||||
*/
|
||||
export async function displayCliSummary({
|
||||
processedTasks,
|
||||
nextId,
|
||||
summary,
|
||||
prdPath,
|
||||
tasksPath,
|
||||
usedFallback,
|
||||
aiServiceResponse
|
||||
}) {
|
||||
// Generate task file names
|
||||
const taskFilesGenerated = (() => {
|
||||
if (!Array.isArray(processedTasks) || processedTasks.length === 0) {
|
||||
return `task_${String(nextId).padStart(3, '0')}.txt`;
|
||||
}
|
||||
const firstNewTaskId = processedTasks[0].id;
|
||||
const lastNewTaskId = processedTasks[processedTasks.length - 1].id;
|
||||
if (processedTasks.length === 1) {
|
||||
return `task_${String(firstNewTaskId).padStart(3, '0')}.txt`;
|
||||
}
|
||||
return `task_${String(firstNewTaskId).padStart(3, '0')}.txt -> task_${String(lastNewTaskId).padStart(3, '0')}.txt`;
|
||||
})();
|
||||
|
||||
displayParsePrdSummary({
|
||||
totalTasks: processedTasks.length,
|
||||
taskPriorities: summary.taskPriorities,
|
||||
prdFilePath: prdPath,
|
||||
outputPath: tasksPath,
|
||||
elapsedTime: summary.elapsedTime,
|
||||
usedFallback,
|
||||
taskFilesGenerated,
|
||||
actionVerb: summary.actionVerb
|
||||
});
|
||||
|
||||
// Display telemetry
|
||||
if (aiServiceResponse?.telemetryData) {
|
||||
// For streaming, wait briefly to allow usage data to be captured
|
||||
if (aiServiceResponse.mainResult?.usage) {
|
||||
// Give the usage promise a short time to resolve
|
||||
await TimeoutManager.withSoftTimeout(
|
||||
aiServiceResponse.mainResult.usage,
|
||||
1000,
|
||||
undefined
|
||||
);
|
||||
}
|
||||
displayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Display non-streaming CLI output
|
||||
* @param {Object} params
|
||||
*/
|
||||
export function displayNonStreamingCliOutput({
|
||||
processedTasks,
|
||||
research,
|
||||
finalTasks,
|
||||
tasksPath,
|
||||
aiServiceResponse
|
||||
}) {
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.green(
|
||||
`Successfully generated ${processedTasks.length} new tasks${research ? ' with research-backed analysis' : ''}. Total tasks in ${tasksPath}: ${finalTasks.length}`
|
||||
),
|
||||
{ padding: 1, borderColor: 'green', borderStyle: 'round' }
|
||||
)
|
||||
);
|
||||
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.white.bold('Next Steps:') +
|
||||
'\n\n' +
|
||||
`${chalk.cyan('1.')} Run ${chalk.yellow('task-master list')} to view all tasks\n` +
|
||||
`${chalk.cyan('2.')} Run ${chalk.yellow('task-master expand --id=<id>')} to break down a task into subtasks`,
|
||||
{
|
||||
padding: 1,
|
||||
borderColor: 'cyan',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1 }
|
||||
}
|
||||
)
|
||||
);
|
||||
|
||||
if (aiServiceResponse?.telemetryData) {
|
||||
displayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,85 @@
|
||||
/**
|
||||
* Non-streaming handler for PRD parsing
|
||||
*/
|
||||
|
||||
import ora from 'ora';
|
||||
import { generateObjectService } from '../../ai-services-unified.js';
|
||||
import { LoggingConfig, prdResponseSchema } from './parse-prd-config.js';
|
||||
import { estimateTokens } from './parse-prd-helpers.js';
|
||||
|
||||
/**
|
||||
* Handle non-streaming AI service call
|
||||
* @param {Object} config - Configuration object
|
||||
* @param {Object} prompts - System and user prompts
|
||||
* @returns {Promise<Object>} Generated tasks and telemetry
|
||||
*/
|
||||
export async function handleNonStreamingService(config, prompts) {
|
||||
const logger = new LoggingConfig(config.mcpLog, config.reportProgress);
|
||||
const { systemPrompt, userPrompt } = prompts;
|
||||
const estimatedInputTokens = estimateTokens(systemPrompt + userPrompt);
|
||||
|
||||
// Initialize spinner for CLI
|
||||
let spinner = null;
|
||||
if (config.outputFormat === 'text' && !config.isMCP) {
|
||||
spinner = ora('Parsing PRD and generating tasks...\n').start();
|
||||
}
|
||||
|
||||
try {
|
||||
// Call AI service
|
||||
logger.report(
|
||||
`Calling AI service to generate tasks from PRD${config.research ? ' with research-backed analysis' : ''}...`,
|
||||
'info'
|
||||
);
|
||||
|
||||
const aiServiceResponse = await generateObjectService({
|
||||
role: config.research ? 'research' : 'main',
|
||||
session: config.session,
|
||||
projectRoot: config.projectRoot,
|
||||
schema: prdResponseSchema,
|
||||
objectName: 'tasks_data',
|
||||
systemPrompt,
|
||||
prompt: userPrompt,
|
||||
commandName: 'parse-prd',
|
||||
outputType: config.isMCP ? 'mcp' : 'cli'
|
||||
});
|
||||
|
||||
// Extract generated data
|
||||
let generatedData = null;
|
||||
if (aiServiceResponse?.mainResult) {
|
||||
if (
|
||||
typeof aiServiceResponse.mainResult === 'object' &&
|
||||
aiServiceResponse.mainResult !== null &&
|
||||
'tasks' in aiServiceResponse.mainResult
|
||||
) {
|
||||
generatedData = aiServiceResponse.mainResult;
|
||||
} else if (
|
||||
typeof aiServiceResponse.mainResult.object === 'object' &&
|
||||
aiServiceResponse.mainResult.object !== null &&
|
||||
'tasks' in aiServiceResponse.mainResult.object
|
||||
) {
|
||||
generatedData = aiServiceResponse.mainResult.object;
|
||||
}
|
||||
}
|
||||
|
||||
if (!generatedData || !Array.isArray(generatedData.tasks)) {
|
||||
throw new Error(
|
||||
'AI service returned unexpected data structure after validation.'
|
||||
);
|
||||
}
|
||||
|
||||
if (spinner) {
|
||||
spinner.succeed('Tasks generated successfully!');
|
||||
}
|
||||
|
||||
return {
|
||||
parsedTasks: generatedData.tasks,
|
||||
aiServiceResponse,
|
||||
estimatedInputTokens
|
||||
};
|
||||
} catch (error) {
|
||||
if (spinner) {
|
||||
spinner.fail(`Error parsing PRD: ${error.message}`);
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
653
scripts/modules/task-manager/parse-prd/parse-prd-streaming.js
Normal file
653
scripts/modules/task-manager/parse-prd/parse-prd-streaming.js
Normal file
@@ -0,0 +1,653 @@
|
||||
/**
|
||||
* Streaming handler for PRD parsing
|
||||
*/
|
||||
|
||||
import { createParsePrdTracker } from '../../../../src/progress/parse-prd-tracker.js';
|
||||
import { displayParsePrdStart } from '../../../../src/ui/parse-prd.js';
|
||||
import { getPriorityIndicators } from '../../../../src/ui/indicators.js';
|
||||
import { TimeoutManager } from '../../../../src/utils/timeout-manager.js';
|
||||
import {
|
||||
streamObjectService,
|
||||
generateObjectService
|
||||
} from '../../ai-services-unified.js';
|
||||
import {
|
||||
getMainModelId,
|
||||
getParametersForRole,
|
||||
getResearchModelId,
|
||||
getDefaultPriority
|
||||
} from '../../config-manager.js';
|
||||
import { LoggingConfig, prdResponseSchema } from './parse-prd-config.js';
|
||||
import { estimateTokens, reportTaskProgress } from './parse-prd-helpers.js';
|
||||
|
||||
/**
|
||||
* Extract a readable stream from various stream result formats
|
||||
* @param {any} streamResult - The stream result object from AI service
|
||||
* @returns {AsyncIterable|ReadableStream} The extracted stream
|
||||
* @throws {StreamingError} If no valid stream can be extracted
|
||||
*/
|
||||
function extractStreamFromResult(streamResult) {
|
||||
if (!streamResult) {
|
||||
throw new StreamingError(
|
||||
'Stream result is null or undefined',
|
||||
STREAMING_ERROR_CODES.NOT_ASYNC_ITERABLE
|
||||
);
|
||||
}
|
||||
|
||||
// Try extraction strategies in priority order
|
||||
const stream = tryExtractStream(streamResult);
|
||||
|
||||
if (!stream) {
|
||||
throw new StreamingError(
|
||||
'Stream object is not async iterable or readable',
|
||||
STREAMING_ERROR_CODES.NOT_ASYNC_ITERABLE
|
||||
);
|
||||
}
|
||||
|
||||
return stream;
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to extract stream using various strategies
|
||||
*/
|
||||
function tryExtractStream(streamResult) {
|
||||
const streamExtractors = [
|
||||
{ key: 'partialObjectStream', extractor: (obj) => obj.partialObjectStream },
|
||||
{ key: 'textStream', extractor: (obj) => extractCallable(obj.textStream) },
|
||||
{ key: 'stream', extractor: (obj) => extractCallable(obj.stream) },
|
||||
{ key: 'baseStream', extractor: (obj) => obj.baseStream }
|
||||
];
|
||||
|
||||
for (const { key, extractor } of streamExtractors) {
|
||||
const stream = extractor(streamResult);
|
||||
if (stream && isStreamable(stream)) {
|
||||
return stream;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if already streamable
|
||||
return isStreamable(streamResult) ? streamResult : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract a property that might be a function or direct value
|
||||
*/
|
||||
function extractCallable(property) {
|
||||
if (!property) return null;
|
||||
return typeof property === 'function' ? property() : property;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if object is streamable (async iterable or readable stream)
|
||||
*/
|
||||
function isStreamable(obj) {
|
||||
return (
|
||||
obj &&
|
||||
(typeof obj[Symbol.asyncIterator] === 'function' ||
|
||||
(obj.getReader && typeof obj.getReader === 'function'))
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle streaming AI service call and parsing
|
||||
* @param {Object} config - Configuration object
|
||||
* @param {Object} prompts - System and user prompts
|
||||
* @param {number} numTasks - Number of tasks to generate
|
||||
* @returns {Promise<Object>} Parsed tasks and telemetry
|
||||
*/
|
||||
export async function handleStreamingService(config, prompts, numTasks) {
|
||||
const context = createStreamingContext(config, prompts, numTasks);
|
||||
|
||||
await initializeProgress(config, numTasks, context.estimatedInputTokens);
|
||||
|
||||
const aiServiceResponse = await callAIServiceWithTimeout(
|
||||
config,
|
||||
prompts,
|
||||
config.streamingTimeout
|
||||
);
|
||||
|
||||
const { progressTracker, priorityMap } = await setupProgressTracking(
|
||||
config,
|
||||
numTasks
|
||||
);
|
||||
|
||||
const streamingResult = await processStreamResponse(
|
||||
aiServiceResponse.mainResult,
|
||||
config,
|
||||
prompts,
|
||||
numTasks,
|
||||
progressTracker,
|
||||
priorityMap,
|
||||
context.defaultPriority,
|
||||
context.estimatedInputTokens,
|
||||
context.logger
|
||||
);
|
||||
|
||||
validateStreamingResult(streamingResult);
|
||||
|
||||
// If we have usage data from streaming, log telemetry now
|
||||
if (streamingResult.usage && config.projectRoot) {
|
||||
const { logAiUsage } = await import('../../ai-services-unified.js');
|
||||
const { getUserId } = await import('../../config-manager.js');
|
||||
const userId = getUserId(config.projectRoot);
|
||||
|
||||
if (userId && aiServiceResponse.providerName && aiServiceResponse.modelId) {
|
||||
try {
|
||||
const telemetryData = await logAiUsage({
|
||||
userId,
|
||||
commandName: 'parse-prd',
|
||||
providerName: aiServiceResponse.providerName,
|
||||
modelId: aiServiceResponse.modelId,
|
||||
inputTokens: streamingResult.usage.promptTokens || 0,
|
||||
outputTokens: streamingResult.usage.completionTokens || 0,
|
||||
outputType: config.isMCP ? 'mcp' : 'cli'
|
||||
});
|
||||
|
||||
// Add telemetry to the response
|
||||
if (telemetryData) {
|
||||
aiServiceResponse.telemetryData = telemetryData;
|
||||
}
|
||||
} catch (telemetryError) {
|
||||
context.logger.report(
|
||||
`Failed to log telemetry: ${telemetryError.message}`,
|
||||
'debug'
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return prepareFinalResult(
|
||||
streamingResult,
|
||||
aiServiceResponse,
|
||||
context.estimatedInputTokens,
|
||||
progressTracker
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create streaming context with common values
|
||||
*/
|
||||
function createStreamingContext(config, prompts, numTasks) {
|
||||
const { systemPrompt, userPrompt } = prompts;
|
||||
return {
|
||||
logger: new LoggingConfig(config.mcpLog, config.reportProgress),
|
||||
estimatedInputTokens: estimateTokens(systemPrompt + userPrompt),
|
||||
defaultPriority: getDefaultPriority(config.projectRoot) || 'medium'
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate streaming result has tasks
|
||||
*/
|
||||
function validateStreamingResult(streamingResult) {
|
||||
if (streamingResult.parsedTasks.length === 0) {
|
||||
throw new Error('No tasks were generated from the PRD');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize progress reporting
|
||||
*/
|
||||
async function initializeProgress(config, numTasks, estimatedInputTokens) {
|
||||
if (config.reportProgress) {
|
||||
await config.reportProgress({
|
||||
progress: 0,
|
||||
total: numTasks,
|
||||
message: `Starting PRD analysis (Input: ${estimatedInputTokens} tokens)${config.research ? ' with research' : ''}...`
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Call AI service with timeout
|
||||
*/
|
||||
async function callAIServiceWithTimeout(config, prompts, timeout) {
|
||||
const { systemPrompt, userPrompt } = prompts;
|
||||
|
||||
return await TimeoutManager.withTimeout(
|
||||
streamObjectService({
|
||||
role: config.research ? 'research' : 'main',
|
||||
session: config.session,
|
||||
projectRoot: config.projectRoot,
|
||||
schema: prdResponseSchema,
|
||||
systemPrompt,
|
||||
prompt: userPrompt,
|
||||
commandName: 'parse-prd',
|
||||
outputType: config.isMCP ? 'mcp' : 'cli'
|
||||
}),
|
||||
timeout,
|
||||
'Streaming operation'
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup progress tracking for CLI output
|
||||
*/
|
||||
async function setupProgressTracking(config, numTasks) {
|
||||
const priorityMap = getPriorityIndicators(config.isMCP);
|
||||
let progressTracker = null;
|
||||
|
||||
if (config.outputFormat === 'text' && !config.isMCP) {
|
||||
progressTracker = createParsePrdTracker({
|
||||
numUnits: numTasks,
|
||||
unitName: 'task',
|
||||
append: config.append
|
||||
});
|
||||
|
||||
const modelId = config.research ? getResearchModelId() : getMainModelId();
|
||||
const parameters = getParametersForRole(
|
||||
config.research ? 'research' : 'main'
|
||||
);
|
||||
|
||||
displayParsePrdStart({
|
||||
prdFilePath: config.prdPath,
|
||||
outputPath: config.tasksPath,
|
||||
numTasks,
|
||||
append: config.append,
|
||||
research: config.research,
|
||||
force: config.force,
|
||||
existingTasks: [],
|
||||
nextId: 1,
|
||||
model: modelId || 'Default',
|
||||
temperature: parameters?.temperature || 0.7
|
||||
});
|
||||
|
||||
progressTracker.start();
|
||||
}
|
||||
|
||||
return { progressTracker, priorityMap };
|
||||
}
|
||||
|
||||
/**
|
||||
* Process stream response based on stream type
|
||||
*/
|
||||
async function processStreamResponse(
|
||||
streamResult,
|
||||
config,
|
||||
prompts,
|
||||
numTasks,
|
||||
progressTracker,
|
||||
priorityMap,
|
||||
defaultPriority,
|
||||
estimatedInputTokens,
|
||||
logger
|
||||
) {
|
||||
const { systemPrompt, userPrompt } = prompts;
|
||||
const context = {
|
||||
config: {
|
||||
...config,
|
||||
schema: prdResponseSchema // Add the schema for generateObject fallback
|
||||
},
|
||||
numTasks,
|
||||
progressTracker,
|
||||
priorityMap,
|
||||
defaultPriority,
|
||||
estimatedInputTokens,
|
||||
prompt: userPrompt,
|
||||
systemPrompt: systemPrompt
|
||||
};
|
||||
|
||||
try {
|
||||
const streamingState = {
|
||||
lastPartialObject: null,
|
||||
taskCount: 0,
|
||||
estimatedOutputTokens: 0,
|
||||
usage: null
|
||||
};
|
||||
|
||||
await processPartialStream(
|
||||
streamResult.partialObjectStream,
|
||||
streamingState,
|
||||
context
|
||||
);
|
||||
|
||||
// Wait for usage data if available
|
||||
if (streamResult.usage) {
|
||||
try {
|
||||
streamingState.usage = await streamResult.usage;
|
||||
} catch (usageError) {
|
||||
logger.report(
|
||||
`Failed to get usage data: ${usageError.message}`,
|
||||
'debug'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return finalizeStreamingResults(streamingState, context);
|
||||
} catch (error) {
|
||||
logger.report(
|
||||
`StreamObject processing failed: ${error.message}. Falling back to generateObject.`,
|
||||
'debug'
|
||||
);
|
||||
return await processWithGenerateObject(context, logger);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Process the partial object stream
|
||||
*/
|
||||
async function processPartialStream(partialStream, state, context) {
|
||||
for await (const partialObject of partialStream) {
|
||||
state.lastPartialObject = partialObject;
|
||||
|
||||
if (partialObject) {
|
||||
state.estimatedOutputTokens = estimateTokens(
|
||||
JSON.stringify(partialObject)
|
||||
);
|
||||
}
|
||||
|
||||
await processStreamingTasks(partialObject, state, context);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Process tasks from a streaming partial object
|
||||
*/
|
||||
async function processStreamingTasks(partialObject, state, context) {
|
||||
if (!partialObject?.tasks || !Array.isArray(partialObject.tasks)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const newTaskCount = partialObject.tasks.length;
|
||||
|
||||
if (newTaskCount > state.taskCount) {
|
||||
await processNewTasks(
|
||||
partialObject.tasks,
|
||||
state.taskCount,
|
||||
newTaskCount,
|
||||
state.estimatedOutputTokens,
|
||||
context
|
||||
);
|
||||
state.taskCount = newTaskCount;
|
||||
} else if (context.progressTracker && state.estimatedOutputTokens > 0) {
|
||||
context.progressTracker.updateTokens(
|
||||
context.estimatedInputTokens,
|
||||
state.estimatedOutputTokens,
|
||||
true
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Process newly appeared tasks in the stream
|
||||
*/
|
||||
async function processNewTasks(
|
||||
tasks,
|
||||
startIndex,
|
||||
endIndex,
|
||||
estimatedOutputTokens,
|
||||
context
|
||||
) {
|
||||
for (let i = startIndex; i < endIndex; i++) {
|
||||
const task = tasks[i] || {};
|
||||
|
||||
if (task.title) {
|
||||
await reportTaskProgress({
|
||||
task,
|
||||
currentCount: i + 1,
|
||||
totalTasks: context.numTasks,
|
||||
estimatedTokens: estimatedOutputTokens,
|
||||
progressTracker: context.progressTracker,
|
||||
reportProgress: context.config.reportProgress,
|
||||
priorityMap: context.priorityMap,
|
||||
defaultPriority: context.defaultPriority,
|
||||
estimatedInputTokens: context.estimatedInputTokens
|
||||
});
|
||||
} else {
|
||||
await reportPlaceholderTask(i + 1, estimatedOutputTokens, context);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Report a placeholder task while it's being generated
|
||||
*/
|
||||
async function reportPlaceholderTask(
|
||||
taskNumber,
|
||||
estimatedOutputTokens,
|
||||
context
|
||||
) {
|
||||
const {
|
||||
progressTracker,
|
||||
config,
|
||||
numTasks,
|
||||
defaultPriority,
|
||||
estimatedInputTokens
|
||||
} = context;
|
||||
|
||||
if (progressTracker) {
|
||||
progressTracker.addTaskLine(
|
||||
taskNumber,
|
||||
`Generating task ${taskNumber}...`,
|
||||
defaultPriority
|
||||
);
|
||||
progressTracker.updateTokens(
|
||||
estimatedInputTokens,
|
||||
estimatedOutputTokens,
|
||||
true
|
||||
);
|
||||
}
|
||||
|
||||
if (config.reportProgress && !progressTracker) {
|
||||
await config.reportProgress({
|
||||
progress: taskNumber,
|
||||
total: numTasks,
|
||||
message: `Generating task ${taskNumber}/${numTasks}...`
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Finalize streaming results and update progress display
|
||||
*/
|
||||
async function finalizeStreamingResults(state, context) {
|
||||
const { lastPartialObject, estimatedOutputTokens, taskCount, usage } = state;
|
||||
|
||||
if (!lastPartialObject?.tasks || !Array.isArray(lastPartialObject.tasks)) {
|
||||
throw new Error('No tasks generated from streamObject');
|
||||
}
|
||||
|
||||
// Use actual token counts if available, otherwise use estimates
|
||||
const finalOutputTokens = usage?.completionTokens || estimatedOutputTokens;
|
||||
const finalInputTokens = usage?.promptTokens || context.estimatedInputTokens;
|
||||
|
||||
if (context.progressTracker) {
|
||||
await updateFinalProgress(
|
||||
lastPartialObject.tasks,
|
||||
taskCount,
|
||||
usage ? finalOutputTokens : estimatedOutputTokens,
|
||||
context,
|
||||
usage ? finalInputTokens : null
|
||||
);
|
||||
}
|
||||
|
||||
return {
|
||||
parsedTasks: lastPartialObject.tasks,
|
||||
estimatedOutputTokens: finalOutputTokens,
|
||||
actualInputTokens: finalInputTokens,
|
||||
usage,
|
||||
usedFallback: false
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Update progress tracker with final task content
|
||||
*/
|
||||
async function updateFinalProgress(
|
||||
tasks,
|
||||
taskCount,
|
||||
outputTokens,
|
||||
context,
|
||||
actualInputTokens = null
|
||||
) {
|
||||
const { progressTracker, defaultPriority, estimatedInputTokens } = context;
|
||||
|
||||
if (taskCount > 0) {
|
||||
updateTaskLines(tasks, progressTracker, defaultPriority);
|
||||
} else {
|
||||
await reportAllTasks(tasks, outputTokens, context);
|
||||
}
|
||||
|
||||
progressTracker.updateTokens(
|
||||
actualInputTokens || estimatedInputTokens,
|
||||
outputTokens,
|
||||
false
|
||||
);
|
||||
progressTracker.stop();
|
||||
}
|
||||
|
||||
/**
|
||||
* Update task lines in progress tracker with final content
|
||||
*/
|
||||
function updateTaskLines(tasks, progressTracker, defaultPriority) {
|
||||
for (let i = 0; i < tasks.length; i++) {
|
||||
const task = tasks[i];
|
||||
if (task?.title) {
|
||||
progressTracker.addTaskLine(
|
||||
i + 1,
|
||||
task.title,
|
||||
task.priority || defaultPriority
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Report all tasks that were not streamed incrementally
|
||||
*/
|
||||
async function reportAllTasks(tasks, estimatedOutputTokens, context) {
|
||||
for (let i = 0; i < tasks.length; i++) {
|
||||
const task = tasks[i];
|
||||
if (task?.title) {
|
||||
await reportTaskProgress({
|
||||
task,
|
||||
currentCount: i + 1,
|
||||
totalTasks: context.numTasks,
|
||||
estimatedTokens: estimatedOutputTokens,
|
||||
progressTracker: context.progressTracker,
|
||||
reportProgress: context.config.reportProgress,
|
||||
priorityMap: context.priorityMap,
|
||||
defaultPriority: context.defaultPriority,
|
||||
estimatedInputTokens: context.estimatedInputTokens
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Process with generateObject as fallback when streaming fails
|
||||
*/
|
||||
async function processWithGenerateObject(context, logger) {
|
||||
logger.report('Using generateObject fallback for PRD parsing', 'info');
|
||||
|
||||
// Show placeholder tasks while generating
|
||||
if (context.progressTracker) {
|
||||
for (let i = 0; i < context.numTasks; i++) {
|
||||
context.progressTracker.addTaskLine(
|
||||
i + 1,
|
||||
`Generating task ${i + 1}...`,
|
||||
context.defaultPriority
|
||||
);
|
||||
context.progressTracker.updateTokens(
|
||||
context.estimatedInputTokens,
|
||||
0,
|
||||
true
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Use generateObjectService instead of streaming
|
||||
const result = await generateObjectService({
|
||||
role: context.config.research ? 'research' : 'main',
|
||||
commandName: 'parse-prd',
|
||||
prompt: context.prompt,
|
||||
systemPrompt: context.systemPrompt,
|
||||
schema: context.config.schema,
|
||||
outputFormat: context.config.outputFormat || 'text',
|
||||
projectRoot: context.config.projectRoot,
|
||||
session: context.config.session
|
||||
});
|
||||
|
||||
// Extract tasks from the result (handle both direct tasks and mainResult.tasks)
|
||||
const tasks = result?.mainResult || result;
|
||||
|
||||
// Process the generated tasks
|
||||
if (tasks && Array.isArray(tasks.tasks)) {
|
||||
// Update progress tracker with final tasks
|
||||
if (context.progressTracker) {
|
||||
for (let i = 0; i < tasks.tasks.length; i++) {
|
||||
const task = tasks.tasks[i];
|
||||
if (task && task.title) {
|
||||
context.progressTracker.addTaskLine(
|
||||
i + 1,
|
||||
task.title,
|
||||
task.priority || context.defaultPriority
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Final token update - use actual telemetry if available
|
||||
const outputTokens =
|
||||
result.telemetryData?.outputTokens ||
|
||||
estimateTokens(JSON.stringify(tasks));
|
||||
const inputTokens =
|
||||
result.telemetryData?.inputTokens || context.estimatedInputTokens;
|
||||
|
||||
context.progressTracker.updateTokens(inputTokens, outputTokens, false);
|
||||
}
|
||||
|
||||
return {
|
||||
parsedTasks: tasks.tasks,
|
||||
estimatedOutputTokens:
|
||||
result.telemetryData?.outputTokens ||
|
||||
estimateTokens(JSON.stringify(tasks)),
|
||||
actualInputTokens: result.telemetryData?.inputTokens,
|
||||
telemetryData: result.telemetryData,
|
||||
usedFallback: true
|
||||
};
|
||||
}
|
||||
|
||||
throw new Error('Failed to generate tasks using generateObject fallback');
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare final result with cleanup
|
||||
*/
|
||||
function prepareFinalResult(
|
||||
streamingResult,
|
||||
aiServiceResponse,
|
||||
estimatedInputTokens,
|
||||
progressTracker
|
||||
) {
|
||||
let summary = null;
|
||||
if (progressTracker) {
|
||||
summary = progressTracker.getSummary();
|
||||
progressTracker.cleanup();
|
||||
}
|
||||
|
||||
// If we have actual usage data from streaming, update the AI service response
|
||||
if (streamingResult.usage && aiServiceResponse) {
|
||||
// Map the Vercel AI SDK usage format to our telemetry format
|
||||
const usage = streamingResult.usage;
|
||||
if (!aiServiceResponse.usage) {
|
||||
aiServiceResponse.usage = {
|
||||
promptTokens: usage.promptTokens || 0,
|
||||
completionTokens: usage.completionTokens || 0,
|
||||
totalTokens: usage.totalTokens || 0
|
||||
};
|
||||
}
|
||||
|
||||
// The telemetry should have been logged in the unified service runner
|
||||
// but if not, the usage is now available for telemetry calculation
|
||||
}
|
||||
|
||||
return {
|
||||
parsedTasks: streamingResult.parsedTasks,
|
||||
aiServiceResponse,
|
||||
estimatedInputTokens:
|
||||
streamingResult.actualInputTokens || estimatedInputTokens,
|
||||
estimatedOutputTokens: streamingResult.estimatedOutputTokens,
|
||||
usedFallback: streamingResult.usedFallback,
|
||||
progressTracker,
|
||||
summary
|
||||
};
|
||||
}
|
||||
272
scripts/modules/task-manager/parse-prd/parse-prd.js
Normal file
272
scripts/modules/task-manager/parse-prd/parse-prd.js
Normal file
@@ -0,0 +1,272 @@
|
||||
import chalk from 'chalk';
|
||||
import {
|
||||
StreamingError,
|
||||
STREAMING_ERROR_CODES
|
||||
} from '../../../../src/utils/stream-parser.js';
|
||||
import { TimeoutManager } from '../../../../src/utils/timeout-manager.js';
|
||||
import { getDebugFlag, getDefaultPriority } from '../../config-manager.js';
|
||||
|
||||
// Import configuration classes
|
||||
import { PrdParseConfig, LoggingConfig } from './parse-prd-config.js';
|
||||
|
||||
// Import helper functions
|
||||
import {
|
||||
readPrdContent,
|
||||
loadExistingTasks,
|
||||
validateFileOperations,
|
||||
processTasks,
|
||||
saveTasksToFile,
|
||||
buildPrompts,
|
||||
displayCliSummary,
|
||||
displayNonStreamingCliOutput
|
||||
} from './parse-prd-helpers.js';
|
||||
|
||||
// Import handlers
|
||||
import { handleStreamingService } from './parse-prd-streaming.js';
|
||||
import { handleNonStreamingService } from './parse-prd-non-streaming.js';
|
||||
|
||||
// ============================================================================
|
||||
// MAIN PARSING FUNCTIONS (Simplified after refactoring)
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Shared parsing logic for both streaming and non-streaming
|
||||
* @param {PrdParseConfig} config - Configuration object
|
||||
* @param {Function} serviceHandler - Handler function for AI service
|
||||
* @param {boolean} isStreaming - Whether this is streaming mode
|
||||
* @returns {Promise<Object>} Result object with success status and telemetry
|
||||
*/
|
||||
async function parsePRDCore(config, serviceHandler, isStreaming) {
|
||||
const logger = new LoggingConfig(config.mcpLog, config.reportProgress);
|
||||
|
||||
logger.report(
|
||||
`Parsing PRD file: ${config.prdPath}, Force: ${config.force}, Append: ${config.append}, Research: ${config.research}`,
|
||||
'debug'
|
||||
);
|
||||
|
||||
try {
|
||||
// Load existing tasks
|
||||
const { existingTasks, nextId } = loadExistingTasks(
|
||||
config.tasksPath,
|
||||
config.targetTag
|
||||
);
|
||||
|
||||
// Validate operations
|
||||
validateFileOperations({
|
||||
existingTasks,
|
||||
targetTag: config.targetTag,
|
||||
append: config.append,
|
||||
force: config.force,
|
||||
isMCP: config.isMCP,
|
||||
logger
|
||||
});
|
||||
|
||||
// Read PRD content and build prompts
|
||||
const prdContent = readPrdContent(config.prdPath);
|
||||
const prompts = await buildPrompts(config, prdContent, nextId);
|
||||
|
||||
// Call the appropriate service handler
|
||||
const serviceResult = await serviceHandler(
|
||||
config,
|
||||
prompts,
|
||||
config.numTasks
|
||||
);
|
||||
|
||||
// Process tasks
|
||||
const defaultPriority = getDefaultPriority(config.projectRoot) || 'medium';
|
||||
const processedNewTasks = processTasks(
|
||||
serviceResult.parsedTasks,
|
||||
nextId,
|
||||
existingTasks,
|
||||
defaultPriority
|
||||
);
|
||||
|
||||
// Combine with existing if appending
|
||||
const finalTasks = config.append
|
||||
? [...existingTasks, ...processedNewTasks]
|
||||
: processedNewTasks;
|
||||
|
||||
// Save to file
|
||||
saveTasksToFile(config.tasksPath, finalTasks, config.targetTag, logger);
|
||||
|
||||
// Handle completion reporting
|
||||
await handleCompletionReporting(
|
||||
config,
|
||||
serviceResult,
|
||||
processedNewTasks,
|
||||
finalTasks,
|
||||
nextId,
|
||||
isStreaming
|
||||
);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
tasksPath: config.tasksPath,
|
||||
telemetryData: serviceResult.aiServiceResponse?.telemetryData,
|
||||
tagInfo: serviceResult.aiServiceResponse?.tagInfo
|
||||
};
|
||||
} catch (error) {
|
||||
logger.report(`Error parsing PRD: ${error.message}`, 'error');
|
||||
|
||||
if (!config.isMCP) {
|
||||
console.error(chalk.red(`Error: ${error.message}`));
|
||||
if (getDebugFlag(config.projectRoot)) {
|
||||
console.error(error);
|
||||
}
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle completion reporting for both CLI and MCP
|
||||
* @param {PrdParseConfig} config - Configuration object
|
||||
* @param {Object} serviceResult - Result from service handler
|
||||
* @param {Array} processedNewTasks - New tasks that were processed
|
||||
* @param {Array} finalTasks - All tasks after processing
|
||||
* @param {number} nextId - Next available task ID
|
||||
* @param {boolean} isStreaming - Whether this was streaming mode
|
||||
*/
|
||||
async function handleCompletionReporting(
|
||||
config,
|
||||
serviceResult,
|
||||
processedNewTasks,
|
||||
finalTasks,
|
||||
nextId,
|
||||
isStreaming
|
||||
) {
|
||||
const { aiServiceResponse, estimatedInputTokens, estimatedOutputTokens } =
|
||||
serviceResult;
|
||||
|
||||
// MCP progress reporting
|
||||
if (config.reportProgress) {
|
||||
const hasValidTelemetry =
|
||||
aiServiceResponse?.telemetryData &&
|
||||
(aiServiceResponse.telemetryData.inputTokens > 0 ||
|
||||
aiServiceResponse.telemetryData.outputTokens > 0);
|
||||
|
||||
let completionMessage;
|
||||
if (hasValidTelemetry) {
|
||||
const cost = aiServiceResponse.telemetryData.totalCost || 0;
|
||||
const currency = aiServiceResponse.telemetryData.currency || 'USD';
|
||||
completionMessage = `✅ Task Generation Completed | Tokens (I/O): ${aiServiceResponse.telemetryData.inputTokens}/${aiServiceResponse.telemetryData.outputTokens} | Cost: ${currency === 'USD' ? '$' : currency}${cost.toFixed(4)}`;
|
||||
} else {
|
||||
const outputTokens = isStreaming ? estimatedOutputTokens : 'unknown';
|
||||
completionMessage = `✅ Task Generation Completed | ~Tokens (I/O): ${estimatedInputTokens}/${outputTokens} | Cost: ~$0.00`;
|
||||
}
|
||||
|
||||
await config.reportProgress({
|
||||
progress: config.numTasks,
|
||||
total: config.numTasks,
|
||||
message: completionMessage
|
||||
});
|
||||
}
|
||||
|
||||
// CLI output
|
||||
if (config.outputFormat === 'text' && !config.isMCP) {
|
||||
if (isStreaming && serviceResult.summary) {
|
||||
await displayCliSummary({
|
||||
processedTasks: processedNewTasks,
|
||||
nextId,
|
||||
summary: serviceResult.summary,
|
||||
prdPath: config.prdPath,
|
||||
tasksPath: config.tasksPath,
|
||||
usedFallback: serviceResult.usedFallback,
|
||||
aiServiceResponse
|
||||
});
|
||||
} else if (!isStreaming) {
|
||||
displayNonStreamingCliOutput({
|
||||
processedTasks: processedNewTasks,
|
||||
research: config.research,
|
||||
finalTasks,
|
||||
tasksPath: config.tasksPath,
|
||||
aiServiceResponse
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse PRD with streaming progress reporting
|
||||
*/
|
||||
async function parsePRDWithStreaming(
|
||||
prdPath,
|
||||
tasksPath,
|
||||
numTasks,
|
||||
options = {}
|
||||
) {
|
||||
const config = new PrdParseConfig(prdPath, tasksPath, numTasks, options);
|
||||
return parsePRDCore(config, handleStreamingService, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse PRD without streaming (fallback)
|
||||
*/
|
||||
async function parsePRDWithoutStreaming(
|
||||
prdPath,
|
||||
tasksPath,
|
||||
numTasks,
|
||||
options = {}
|
||||
) {
|
||||
const config = new PrdParseConfig(prdPath, tasksPath, numTasks, options);
|
||||
return parsePRDCore(config, handleNonStreamingService, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Main entry point - decides between streaming and non-streaming
|
||||
*/
|
||||
async function parsePRD(prdPath, tasksPath, numTasks, options = {}) {
|
||||
const config = new PrdParseConfig(prdPath, tasksPath, numTasks, options);
|
||||
|
||||
if (config.useStreaming) {
|
||||
try {
|
||||
return await parsePRDWithStreaming(prdPath, tasksPath, numTasks, options);
|
||||
} catch (streamingError) {
|
||||
// Check if this is a streaming-specific error (including timeout)
|
||||
const isStreamingError =
|
||||
streamingError instanceof StreamingError ||
|
||||
streamingError.code === STREAMING_ERROR_CODES.NOT_ASYNC_ITERABLE ||
|
||||
streamingError.code ===
|
||||
STREAMING_ERROR_CODES.STREAM_PROCESSING_FAILED ||
|
||||
streamingError.code === STREAMING_ERROR_CODES.STREAM_NOT_ITERABLE ||
|
||||
TimeoutManager.isTimeoutError(streamingError);
|
||||
|
||||
if (isStreamingError) {
|
||||
const logger = new LoggingConfig(config.mcpLog, config.reportProgress);
|
||||
|
||||
// Show fallback message
|
||||
if (config.outputFormat === 'text' && !config.isMCP) {
|
||||
console.log(
|
||||
chalk.yellow(
|
||||
`⚠️ Streaming operation ${streamingError.message.includes('timed out') ? 'timed out' : 'failed'}. Falling back to non-streaming mode...`
|
||||
)
|
||||
);
|
||||
} else {
|
||||
logger.report(
|
||||
`Streaming failed (${streamingError.message}), falling back to non-streaming mode...`,
|
||||
'warn'
|
||||
);
|
||||
}
|
||||
|
||||
// Fallback to non-streaming
|
||||
return await parsePRDWithoutStreaming(
|
||||
prdPath,
|
||||
tasksPath,
|
||||
numTasks,
|
||||
options
|
||||
);
|
||||
} else {
|
||||
throw streamingError;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return await parsePRDWithoutStreaming(
|
||||
prdPath,
|
||||
tasksPath,
|
||||
numTasks,
|
||||
options
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export default parsePRD;
|
||||
Reference in New Issue
Block a user