diff --git a/mcp-server/src/core/direct-functions/parse-prd.js b/mcp-server/src/core/direct-functions/parse-prd.js index 2a5ac33f..1c93cd92 100644 --- a/mcp-server/src/core/direct-functions/parse-prd.js +++ b/mcp-server/src/core/direct-functions/parse-prd.js @@ -8,9 +8,11 @@ import fs from 'fs'; import { parsePRD } from '../../../../scripts/modules/task-manager.js'; import { enableSilentMode, - disableSilentMode + disableSilentMode, + isSilentMode } from '../../../../scripts/modules/utils.js'; import { createLogWrapper } from '../../tools/utils.js'; +import { getDefaultNumTasks } from '../../../../scripts/modules/config-manager.js'; /** * Direct function wrapper for parsing PRD documents and generating tasks. @@ -21,177 +23,160 @@ import { createLogWrapper } from '../../tools/utils.js'; * @returns {Promise} - Result object with success status and data/error information. */ export async function parsePRDDirect(args, log, context = {}) { - const { session } = context; // Only extract session + const { session } = context; + // Extract projectRoot from args + const { + input: inputArg, + output: outputArg, + numTasks: numTasksArg, + force, + append, + projectRoot + } = args; - try { - log.info(`Parsing PRD document with args: ${JSON.stringify(args)}`); + const logWrapper = createLogWrapper(log); - // Validate required parameters - if (!args.projectRoot) { - const errorMessage = 'Project root is required for parsePRDDirect'; - log.error(errorMessage); - return { - success: false, - error: { code: 'MISSING_PROJECT_ROOT', message: errorMessage }, - fromCache: false - }; - } - if (!args.input) { - const errorMessage = 'Input file path is required for parsePRDDirect'; - log.error(errorMessage); - return { - success: false, - error: { code: 'MISSING_INPUT_PATH', message: errorMessage }, - fromCache: false - }; - } - if (!args.output) { - const errorMessage = 'Output file path is required for parsePRDDirect'; - log.error(errorMessage); - return { - success: false, - error: { code: 'MISSING_OUTPUT_PATH', message: errorMessage }, - fromCache: false - }; - } - - // Resolve input path (expecting absolute path or path relative to project root) - const projectRoot = args.projectRoot; - const inputPath = path.isAbsolute(args.input) - ? args.input - : path.resolve(projectRoot, args.input); - - // Verify input file exists - if (!fs.existsSync(inputPath)) { - const errorMessage = `Input file not found: ${inputPath}`; - log.error(errorMessage); - return { - success: false, - error: { - code: 'INPUT_FILE_NOT_FOUND', - message: errorMessage, - details: `Checked path: ${inputPath}\nProject root: ${projectRoot}\nInput argument: ${args.input}` - }, - fromCache: false - }; - } - - // Resolve output path (expecting absolute path or path relative to project root) - const outputPath = path.isAbsolute(args.output) - ? args.output - : path.resolve(projectRoot, args.output); - - // Ensure output directory exists - const outputDir = path.dirname(outputPath); - if (!fs.existsSync(outputDir)) { - log.info(`Creating output directory: ${outputDir}`); - fs.mkdirSync(outputDir, { recursive: true }); - } - - // Parse number of tasks - handle both string and number values - let numTasks = 10; // Default - if (args.numTasks) { - numTasks = - typeof args.numTasks === 'string' - ? parseInt(args.numTasks, 10) - : args.numTasks; - if (isNaN(numTasks)) { - numTasks = 10; // Fallback to default if parsing fails - log.warn(`Invalid numTasks value: ${args.numTasks}. Using default: 10`); - } - } - - // Extract the append flag from args - const append = Boolean(args.append) === true; - - // Log key parameters including append flag - log.info( - `Preparing to parse PRD from ${inputPath} and output to ${outputPath} with ${numTasks} tasks, append mode: ${append}` + // --- Input Validation and Path Resolution --- + if (!projectRoot || !path.isAbsolute(projectRoot)) { + logWrapper.error( + 'parsePRDDirect requires an absolute projectRoot argument.' ); - - // --- Logger Wrapper --- - const mcpLog = createLogWrapper(log); - - // Prepare options for the core function - const options = { - mcpLog, - session - }; - - // Enable silent mode to prevent console logs from interfering with JSON response - enableSilentMode(); - try { - // Make sure the output directory exists - const outputDir = path.dirname(outputPath); - if (!fs.existsSync(outputDir)) { - log.info(`Creating output directory: ${outputDir}`); - fs.mkdirSync(outputDir, { recursive: true }); - } - - // Execute core parsePRD function with AI client - const tasksDataResult = await parsePRD( - inputPath, - outputPath, - numTasks, - { - mcpLog: logWrapper, - session, - append - }, - aiClient, - modelConfig - ); - - // Since parsePRD doesn't return a value but writes to a file, we'll read the result - // to return it to the caller - if (fs.existsSync(outputPath)) { - const tasksData = JSON.parse(fs.readFileSync(outputPath, 'utf8')); - const actionVerb = append ? 'appended' : 'generated'; - const message = `Successfully ${actionVerb} ${tasksData.tasks?.length || 0} tasks from PRD`; - - if (!tasksDataResult || !tasksDataResult.tasks || !tasksData) { - throw new Error( - 'Core parsePRD function did not return valid task data.' - ); - } - - log.info(message); - - return { - success: true, - data: { - message, - taskCount: tasksDataResult.tasks?.length || 0, - outputPath, - appended: append - }, - fromCache: false // This operation always modifies state and should never be cached - }; - } else { - const errorMessage = `Tasks file was not created at ${outputPath}`; - log.error(errorMessage); - return { - success: false, - error: { code: 'OUTPUT_FILE_NOT_CREATED', message: errorMessage }, - fromCache: false - }; - } - } finally { - // Always restore normal logging - disableSilentMode(); - } - } catch (error) { - // Make sure to restore normal logging even if there's an error - disableSilentMode(); - - log.error(`Error parsing PRD: ${error.message}`); return { success: false, error: { - code: error.code || 'PARSE_PRD_ERROR', // Use error code if available - message: error.message || 'Unknown error parsing PRD' - }, - fromCache: false + code: 'MISSING_ARGUMENT', + message: 'projectRoot is required and must be absolute.' + } }; } + if (!inputArg) { + logWrapper.error('parsePRDDirect called without input path'); + return { + success: false, + error: { code: 'MISSING_ARGUMENT', message: 'Input path is required' } + }; + } + + // Resolve input and output paths relative to projectRoot if they aren't absolute + const inputPath = path.resolve(projectRoot, inputArg); + const outputPath = outputArg + ? path.resolve(projectRoot, outputArg) + : path.resolve(projectRoot, 'tasks', 'tasks.json'); // Default output path + + // Check if input file exists + if (!fs.existsSync(inputPath)) { + const errorMsg = `Input PRD file not found at resolved path: ${inputPath}`; + logWrapper.error(errorMsg); + return { + success: false, + error: { code: 'FILE_NOT_FOUND', message: errorMsg } + }; + } + + const outputDir = path.dirname(outputPath); + try { + if (!fs.existsSync(outputDir)) { + logWrapper.info(`Creating output directory: ${outputDir}`); + fs.mkdirSync(outputDir, { recursive: true }); + } + } catch (dirError) { + logWrapper.error( + `Failed to create output directory ${outputDir}: ${dirError.message}` + ); + // Return an error response immediately if dir creation fails + return { + success: false, + error: { + code: 'DIRECTORY_CREATION_ERROR', + message: `Failed to create output directory: ${dirError.message}` + } + }; + } + + let numTasks = getDefaultNumTasks(projectRoot); + if (numTasksArg) { + numTasks = + typeof numTasksArg === 'string' ? parseInt(numTasksArg, 10) : numTasksArg; + if (isNaN(numTasks) || numTasks <= 0) { + // Ensure positive number + numTasks = getDefaultNumTasks(projectRoot); // Fallback to default if parsing fails or invalid + logWrapper.warn( + `Invalid numTasks value: ${numTasksArg}. Using default: 10` + ); + } + } + + const useForce = force === true; + const useAppend = append === true; + if (useAppend) { + logWrapper.info('Append mode enabled.'); + if (useForce) { + logWrapper.warn( + 'Both --force and --append flags were provided. --force takes precedence; append mode will be ignored.' + ); + } + } + + logWrapper.info( + `Parsing PRD via direct function. Input: ${inputPath}, Output: ${outputPath}, NumTasks: ${numTasks}, Force: ${useForce}, Append: ${useAppend}, ProjectRoot: ${projectRoot}` + ); + + const wasSilent = isSilentMode(); + if (!wasSilent) { + enableSilentMode(); + } + + try { + // Call the core parsePRD function + const result = await parsePRD( + inputPath, + outputPath, + numTasks, + { session, mcpLog: logWrapper, projectRoot, useForce, useAppend }, + 'json' + ); + + // parsePRD returns { success: true, tasks: processedTasks } on success + if (result && result.success && Array.isArray(result.tasks)) { + logWrapper.success( + `Successfully parsed PRD. Generated ${result.tasks.length} tasks.` + ); + return { + success: true, + data: { + message: `Successfully parsed PRD and generated ${result.tasks.length} tasks.`, + outputPath: outputPath, + taskCount: result.tasks.length + // Optionally include tasks if needed by client: tasks: result.tasks + } + }; + } else { + // Handle case where core function didn't return expected success structure + logWrapper.error( + 'Core parsePRD function did not return a successful structure.' + ); + return { + success: false, + error: { + code: 'CORE_FUNCTION_ERROR', + message: + result?.message || + 'Core function failed to parse PRD or returned unexpected result.' + } + }; + } + } catch (error) { + logWrapper.error(`Error executing core parsePRD: ${error.message}`); + return { + success: false, + error: { + code: 'PARSE_PRD_CORE_ERROR', + message: error.message || 'Unknown error parsing PRD' + } + }; + } finally { + if (!wasSilent && isSilentMode()) { + disableSilentMode(); + } + } } diff --git a/mcp-server/src/tools/parse-prd.js b/mcp-server/src/tools/parse-prd.js index 909e4c9c..7cd36855 100644 --- a/mcp-server/src/tools/parse-prd.js +++ b/mcp-server/src/tools/parse-prd.js @@ -4,16 +4,12 @@ */ import { z } from 'zod'; -import { - getProjectRootFromSession, - handleApiResult, - createErrorResponse -} from './utils.js'; +import path from 'path'; +import { handleApiResult, createErrorResponse } from './utils.js'; import { parsePRDDirect } from '../core/task-master-core.js'; -import { resolveProjectPaths } from '../core/utils/path-utils.js'; /** - * Register the parsePRD tool with the MCP server + * Register the parse_prd tool * @param {Object} server - FastMCP server instance */ export function registerParsePRDTool(server) { @@ -42,71 +38,64 @@ export function registerParsePRDTool(server) { force: z .boolean() .optional() - .describe('Allow overwriting an existing tasks.json file.'), + .default(false) + .describe('Overwrite existing output file without prompting.'), append: z .boolean() .optional() - .describe( - 'Append new tasks to existing tasks.json instead of overwriting' - ), + .default(false) + .describe('Append generated tasks to existing file.'), projectRoot: z .string() - .describe('The directory of the project. Must be absolute path.') + .describe('The directory of the project. Must be an absolute path.') }), execute: async (args, { log, session }) => { + const toolName = 'parse_prd'; try { - log.info(`Parsing PRD with args: ${JSON.stringify(args)}`); - - // Get project root from args or session - const rootFolder = - args.projectRoot || getProjectRootFromSession(session, log); - - if (!rootFolder) { - return createErrorResponse( - 'Could not determine project root. Please provide it explicitly or ensure your session contains valid root information.' - ); - } - - // Resolve input (PRD) and output (tasks.json) paths using the utility - const { projectRoot, prdPath, tasksJsonPath } = resolveProjectPaths( - rootFolder, - args, - log + log.info( + `Executing ${toolName} tool with args: ${JSON.stringify(args)}` ); - // Check if PRD path was found (resolveProjectPaths returns null if not found and not provided) - if (!prdPath) { + // 1. Get Project Root + const rootFolder = args.projectRoot; + if (!rootFolder || !path.isAbsolute(rootFolder)) { + log.error( + `${toolName}: projectRoot is required and must be absolute.` + ); return createErrorResponse( - 'No PRD document found or provided. Please ensure a PRD file exists (e.g., PRD.md) or provide a valid input file path.' + 'projectRoot is required and must be absolute.' ); } + log.info(`${toolName}: Project root: ${rootFolder}`); - // Call the direct function with fully resolved paths + // 2. Call Direct Function - Pass relevant args including projectRoot + // Path resolution (input/output) is handled within the direct function now const result = await parsePRDDirect( { - projectRoot: projectRoot, - input: prdPath, - output: tasksJsonPath, - numTasks: args.numTasks, + // Pass args directly needed by the direct function + input: args.input, // Pass relative or absolute path + output: args.output, // Pass relative or absolute path + numTasks: args.numTasks, // Pass number (direct func handles default) force: args.force, - append: args.append + append: args.append, + projectRoot: rootFolder }, log, - { session } + { session } // Pass context object with session ); - if (result.success) { - log.info(`Successfully parsed PRD: ${result.data.message}`); - } else { - log.error( - `Failed to parse PRD: ${result.error?.message || 'Unknown error'}` - ); - } - + // 3. Handle Result + log.info( + `${toolName}: Direct function result: success=${result.success}` + ); return handleApiResult(result, log, 'Error parsing PRD'); } catch (error) { - log.error(`Error in parse-prd tool: ${error.message}`); - return createErrorResponse(error.message); + log.error( + `Critical error in ${toolName} tool execute: ${error.message}` + ); + return createErrorResponse( + `Internal tool error (${toolName}): ${error.message}` + ); } } }); diff --git a/scripts/modules/config-manager.js b/scripts/modules/config-manager.js index 64f98b13..e9302d08 100644 --- a/scripts/modules/config-manager.js +++ b/scripts/modules/config-manager.js @@ -345,6 +345,12 @@ function getDefaultSubtasks(explicitRoot = null) { return isNaN(parsedVal) ? DEFAULTS.global.defaultSubtasks : parsedVal; } +function getDefaultNumTasks(explicitRoot = null) { + const val = getGlobalConfig(explicitRoot).defaultNumTasks; + const parsedVal = parseInt(val, 10); + return isNaN(parsedVal) ? DEFAULTS.global.defaultNumTasks : parsedVal; +} + function getDefaultPriority(explicitRoot = null) { // Directly return value from config return getGlobalConfig(explicitRoot).defaultPriority; @@ -702,6 +708,7 @@ export { // Global setting getters (No env var overrides) getLogLevel, getDebugFlag, + getDefaultNumTasks, getDefaultSubtasks, getDefaultPriority, getProjectName, diff --git a/scripts/modules/task-manager/parse-prd.js b/scripts/modules/task-manager/parse-prd.js index a4d79697..a5197943 100644 --- a/scripts/modules/task-manager/parse-prd.js +++ b/scripts/modules/task-manager/parse-prd.js @@ -9,28 +9,30 @@ import { writeJSON, enableSilentMode, disableSilentMode, - isSilentMode + isSilentMode, + readJSON, + findTaskById } from '../utils.js'; import { generateObjectService } from '../ai-services-unified.js'; import { getDebugFlag } from '../config-manager.js'; import generateTaskFiles from './generate-task-files.js'; -// Define Zod schema for task validation -const TaskSchema = z.object({ - id: z.number(), - title: z.string(), - description: z.string(), - status: z.string().default('pending'), - dependencies: z.array(z.number()).default([]), - priority: z.string().default('medium'), - details: z.string().optional(), - testStrategy: z.string().optional() +// Define the Zod schema for a SINGLE task object +const prdSingleTaskSchema = z.object({ + id: z.number().int().positive(), + title: z.string().min(1), + description: z.string().min(1), + details: z.string().optional().default(''), + testStrategy: z.string().optional().default(''), + priority: z.enum(['high', 'medium', 'low']).default('medium'), + dependencies: z.array(z.number().int().positive()).optional().default([]), + status: z.string().optional().default('pending') }); -// Define Zod schema for the complete tasks data -const TasksDataSchema = z.object({ - tasks: z.array(TaskSchema), +// Define the Zod schema for the ENTIRE expected AI response object +const prdResponseSchema = z.object({ + tasks: z.array(prdSingleTaskSchema), metadata: z.object({ projectName: z.string(), totalTasks: z.number(), @@ -45,35 +47,114 @@ const TasksDataSchema = z.object({ * @param {string} tasksPath - Path to the tasks.json file * @param {number} numTasks - Number of tasks to generate * @param {Object} options - Additional options - * @param {Object} options.reportProgress - Function to report progress to MCP server (optional) - * @param {Object} options.mcpLog - MCP logger object (optional) - * @param {Object} options.session - Session object from MCP server (optional) + * @param {boolean} [options.useForce=false] - Whether to overwrite existing tasks.json. + * @param {boolean} [options.useAppend=false] - Append to existing tasks file. + * @param {Object} [options.reportProgress] - Function to report progress (optional, likely unused). + * @param {Object} [options.mcpLog] - MCP logger object (optional). + * @param {Object} [options.session] - Session object from MCP server (optional). + * @param {string} [options.projectRoot] - Project root path (for MCP/env fallback). + * @param {string} [outputFormat='text'] - Output format ('text' or 'json'). */ async function parsePRD(prdPath, tasksPath, numTasks, options = {}) { - const { reportProgress, mcpLog, session } = options; + const { + reportProgress, + mcpLog, + session, + projectRoot, + useForce = false, + useAppend = false + } = options; + const isMCP = !!mcpLog; + const outputFormat = isMCP ? 'json' : 'text'; - // Determine output format based on mcpLog presence (simplification) - const outputFormat = mcpLog ? 'json' : 'text'; + const logFn = mcpLog + ? mcpLog + : { + // Wrapper for CLI + info: (...args) => log('info', ...args), + warn: (...args) => log('warn', ...args), + error: (...args) => log('error', ...args), + debug: (...args) => log('debug', ...args), + success: (...args) => log('success', ...args) + }; - // Create custom reporter that checks for MCP log and silent mode + // Create custom reporter using logFn const report = (message, level = 'info') => { - if (mcpLog) { - mcpLog[level](message); + // Check logFn directly + if (logFn && typeof logFn[level] === 'function') { + logFn[level](message); } else if (!isSilentMode() && outputFormat === 'text') { - // Only log to console if not in silent mode and outputFormat is 'text' + // Fallback to original log only if necessary and in CLI text mode log(level, message); } }; - try { - report(`Parsing PRD file: ${prdPath}`, 'info'); + report( + `Parsing PRD file: ${prdPath}, Force: ${useForce}, Append: ${useAppend}` + ); - // Read the PRD content + let existingTasks = []; + let nextId = 1; + + try { + // Handle file existence and overwrite/append logic + if (fs.existsSync(tasksPath)) { + if (useAppend) { + report( + `Append mode enabled. Reading existing tasks from ${tasksPath}`, + 'info' + ); + const existingData = readJSON(tasksPath); // Use readJSON utility + if (existingData && Array.isArray(existingData.tasks)) { + existingTasks = existingData.tasks; + if (existingTasks.length > 0) { + nextId = Math.max(...existingTasks.map((t) => t.id || 0)) + 1; + report( + `Found ${existingTasks.length} existing tasks. Next ID will be ${nextId}.`, + 'info' + ); + } + } else { + report( + `Could not read existing tasks from ${tasksPath} or format is invalid. Proceeding without appending.`, + 'warn' + ); + existingTasks = []; // Reset if read fails + } + } else if (!useForce) { + // Not appending and not forcing overwrite + const overwriteError = new Error( + `Output file ${tasksPath} already exists. Use --force to overwrite or --append.` + ); + report(overwriteError.message, 'error'); + if (outputFormat === 'text') { + console.error(chalk.red(overwriteError.message)); + process.exit(1); + } else { + throw overwriteError; + } + } else { + // Force overwrite is true + report( + `Force flag enabled. Overwriting existing file: ${tasksPath}`, + 'info' + ); + } + } + + report(`Reading PRD content from ${prdPath}`, 'info'); const prdContent = fs.readFileSync(prdPath, 'utf8'); + if (!prdContent) { + throw new Error(`Input file ${prdPath} is empty or could not be read.`); + } // Build system prompt for PRD parsing - const systemPrompt = `You are an AI assistant helping to break down a Product Requirements Document (PRD) into a set of sequential development tasks. -Your goal is to create ${numTasks} well-structured, actionable development tasks based on the PRD provided. + const systemPrompt = `You are an AI assistant specialized in analyzing Product Requirements Documents (PRDs) and generating a structured, logically ordered, dependency-aware and sequenced list of development tasks in JSON format. +Analyze the provided PRD content and generate approximately ${numTasks} top-level development tasks. If the complexity or the level of detail of the PRD is high, generate more tasks relative to the complexity of the PRD +Each task should represent a logical unit of work needed to implement the requirements and focus on the most direct and effective way to implement the requirements without unnecessary complexity or overengineering. Include pseudo-code, implementation details, and test strategy for each task. Find the most up to date information to implement each task. +Assign sequential IDs starting from ${nextId}. Infer title, description, details, and test strategy for each task based *only* on the PRD content. +Set status to 'pending', dependencies to an empty array [], and priority to 'medium' initially for all tasks. +Respond ONLY with a valid JSON object containing a single key "tasks", where the value is an array of task objects adhering to the provided Zod schema. Do not include any explanation or markdown formatting. Each task should follow this JSON structure: { @@ -88,12 +169,12 @@ Each task should follow this JSON structure: } Guidelines: -1. Create exactly ${numTasks} tasks, numbered from 1 to ${numTasks} -2. Each task should be atomic and focused on a single responsibility +1. Unless complexity warrants otherwise, create exactly ${numTasks} tasks, numbered sequentially starting from ${nextId} +2. Each task should be atomic and focused on a single responsibility following the most up to date best practices and standards 3. Order tasks logically - consider dependencies and implementation sequence 4. Early tasks should focus on setup, core functionality first, then advanced features 5. Include clear validation/testing approach for each task -6. Set appropriate dependency IDs (a task can only depend on tasks with lower IDs) +6. Set appropriate dependency IDs (a task can only depend on tasks with lower IDs, potentially including existing tasks with IDs less than ${nextId} if applicable) 7. Assign priority (high/medium/low) based on criticality and dependency order 8. Include detailed implementation guidance in the "details" field 9. If the PRD contains specific requirements for libraries, database schemas, frameworks, tech stacks, or any other implementation details, STRICTLY ADHERE to these requirements in your task breakdown and do not discard them under any circumstance @@ -101,41 +182,40 @@ Guidelines: 11. Always aim to provide the most direct path to implementation, avoiding over-engineering or roundabout approaches`; // Build user prompt with PRD content - const userPrompt = `Here's the Product Requirements Document (PRD) to break down into ${numTasks} tasks: + const userPrompt = `Here's the Product Requirements Document (PRD) to break down into approximately ${numTasks} tasks, starting IDs from ${nextId}:\n\n${prdContent}\n\n -${prdContent} - -Return your response in this format: + Return your response in this format: { - "tasks": [ - { - "id": 1, - "title": "Setup Project Repository", - "description": "...", - ... - }, - ... - ], - "metadata": { - "projectName": "PRD Implementation", - "totalTasks": ${numTasks}, - "sourceFile": "${prdPath}", - "generatedAt": "YYYY-MM-DD" - } + "tasks": [ + { + "id": 1, + "title": "Setup Project Repository", + "description": "...", + ... + }, + ... + ], + "metadata": { + "projectName": "PRD Implementation", + "totalTasks": ${numTasks}, + "sourceFile": "${prdPath}", + "generatedAt": "YYYY-MM-DD" + } }`; // Call the unified AI service report('Calling AI service to generate tasks from PRD...', 'info'); - // Call generateObjectService with proper parameters - const tasksData = await generateObjectService({ - role: 'main', // Use 'main' role to get the model from config - session: session, // Pass session for API key resolution - schema: TasksDataSchema, // Pass the schema for validation - objectName: 'tasks_data', // Name the generated object - systemPrompt: systemPrompt, // System instructions - prompt: userPrompt, // User prompt with PRD content - reportProgress // Progress reporting function + // Call generateObjectService with the CORRECT schema + const generatedData = await generateObjectService({ + role: 'main', + session: session, + projectRoot: projectRoot, + schema: prdResponseSchema, + objectName: 'tasks_data', + systemPrompt: systemPrompt, + prompt: userPrompt, + reportProgress }); // Create the directory if it doesn't exist @@ -143,11 +223,58 @@ Return your response in this format: if (!fs.existsSync(tasksDir)) { fs.mkdirSync(tasksDir, { recursive: true }); } + logFn.success('Successfully parsed PRD via AI service.'); // Assumes generateObjectService validated + + // Validate and Process Tasks + if (!generatedData || !Array.isArray(generatedData.tasks)) { + // This error *shouldn't* happen if generateObjectService enforced prdResponseSchema + // But keep it as a safeguard + logFn.error( + `Internal Error: generateObjectService returned unexpected data structure: ${JSON.stringify(generatedData)}` + ); + throw new Error( + 'AI service returned unexpected data structure after validation.' + ); + } + + let currentId = nextId; + const taskMap = new Map(); + const processedNewTasks = generatedData.tasks.map((task) => { + const newId = currentId++; + taskMap.set(task.id, newId); + return { + ...task, + id: newId, + status: 'pending', + priority: task.priority || 'medium', + dependencies: Array.isArray(task.dependencies) ? task.dependencies : [], + subtasks: [] + }; + }); + + // Remap dependencies for the NEWLY processed tasks + processedNewTasks.forEach((task) => { + task.dependencies = task.dependencies + .map((depId) => taskMap.get(depId)) // Map old AI ID to new sequential ID + .filter( + (newDepId) => + newDepId != null && // Must exist + newDepId < task.id && // Must be a lower ID (could be existing or newly generated) + (findTaskById(existingTasks, newDepId) || // Check if it exists in old tasks OR + processedNewTasks.some((t) => t.id === newDepId)) // check if it exists in new tasks + ); + }); + + const allTasks = useAppend + ? [...existingTasks, ...processedNewTasks] + : processedNewTasks; + + const finalTaskData = { tasks: allTasks }; // Use the combined list // Write the tasks to the file - writeJSON(tasksPath, tasksData); + writeJSON(tasksPath, finalTaskData); report( - `Successfully generated ${tasksData.tasks.length} tasks from PRD`, + `Successfully wrote ${allTasks.length} total tasks to ${tasksPath} (${processedNewTasks.length} new).`, 'success' ); report(`Tasks saved to: ${tasksPath}`, 'info'); @@ -156,10 +283,10 @@ Return your response in this format: if (reportProgress && mcpLog) { // Enable silent mode when being called from MCP server enableSilentMode(); - await generateTaskFiles(tasksPath, tasksDir); + await generateTaskFiles(tasksPath, path.dirname(tasksPath)); disableSilentMode(); } else { - await generateTaskFiles(tasksPath, tasksDir); + await generateTaskFiles(tasksPath, path.dirname(tasksPath)); } // Only show success boxes for text output (CLI) @@ -167,7 +294,7 @@ Return your response in this format: console.log( boxen( chalk.green( - `Successfully generated ${tasksData.tasks.length} tasks from PRD` + `Successfully generated ${processedNewTasks.length} new tasks. Total tasks in ${tasksPath}: ${allTasks.length}` ), { padding: 1, borderColor: 'green', borderStyle: 'round' } ) @@ -189,7 +316,7 @@ Return your response in this format: ); } - return tasksData; + return { success: true, tasks: processedNewTasks }; } catch (error) { report(`Error parsing PRD: ${error.message}`, 'error'); @@ -197,8 +324,8 @@ Return your response in this format: if (outputFormat === 'text') { console.error(chalk.red(`Error: ${error.message}`)); - if (getDebugFlag(session)) { - // Use getter + if (getDebugFlag(projectRoot)) { + // Use projectRoot for debug flag check console.error(error); } diff --git a/tests/unit/ai-services-unified.test.js b/tests/unit/ai-services-unified.test.js index 827dc728..59e3d32b 100644 --- a/tests/unit/ai-services-unified.test.js +++ b/tests/unit/ai-services-unified.test.js @@ -40,12 +40,14 @@ jest.unstable_mockModule('../../src/ai-providers/perplexity.js', () => ({ // ... Mock other providers (google, openai, etc.) similarly ... -// Mock utils logger and API key resolver +// Mock utils logger, API key resolver, AND findProjectRoot const mockLog = jest.fn(); const mockResolveEnvVariable = jest.fn(); +const mockFindProjectRoot = jest.fn(); jest.unstable_mockModule('../../scripts/modules/utils.js', () => ({ log: mockLog, - resolveEnvVariable: mockResolveEnvVariable + resolveEnvVariable: mockResolveEnvVariable, + findProjectRoot: mockFindProjectRoot })); // Import the module to test (AFTER mocks) @@ -54,6 +56,8 @@ const { generateTextService } = await import( ); describe('Unified AI Services', () => { + const fakeProjectRoot = '/fake/project/root'; // Define for reuse + beforeEach(() => { // Clear mocks before each test jest.clearAllMocks(); // Clears all mocks @@ -76,6 +80,9 @@ describe('Unified AI Services', () => { if (key === 'PERPLEXITY_API_KEY') return 'mock-perplexity-key'; return null; }); + + // Set a default behavior for the new mock + mockFindProjectRoot.mockReturnValue(fakeProjectRoot); }); describe('generateTextService', () => { @@ -91,12 +98,16 @@ describe('Unified AI Services', () => { const result = await generateTextService(params); expect(result).toBe('Main provider response'); - expect(mockGetMainProvider).toHaveBeenCalled(); - expect(mockGetMainModelId).toHaveBeenCalled(); - expect(mockGetParametersForRole).toHaveBeenCalledWith('main'); + expect(mockGetMainProvider).toHaveBeenCalledWith(fakeProjectRoot); + expect(mockGetMainModelId).toHaveBeenCalledWith(fakeProjectRoot); + expect(mockGetParametersForRole).toHaveBeenCalledWith( + 'main', + fakeProjectRoot + ); expect(mockResolveEnvVariable).toHaveBeenCalledWith( 'ANTHROPIC_API_KEY', - params.session + params.session, + fakeProjectRoot ); expect(mockGenerateAnthropicText).toHaveBeenCalledTimes(1); expect(mockGenerateAnthropicText).toHaveBeenCalledWith({ @@ -109,26 +120,43 @@ describe('Unified AI Services', () => { { role: 'user', content: 'Test' } ] }); - // Verify other providers NOT called expect(mockGeneratePerplexityText).not.toHaveBeenCalled(); }); test('should fall back to fallback provider if main fails', async () => { const mainError = new Error('Main provider failed'); mockGenerateAnthropicText - .mockRejectedValueOnce(mainError) // Main fails first - .mockResolvedValueOnce('Fallback provider response'); // Fallback succeeds + .mockRejectedValueOnce(mainError) + .mockResolvedValueOnce('Fallback provider response'); - const params = { role: 'main', prompt: 'Fallback test' }; + const explicitRoot = '/explicit/test/root'; + const params = { + role: 'main', + prompt: 'Fallback test', + projectRoot: explicitRoot + }; const result = await generateTextService(params); expect(result).toBe('Fallback provider response'); - expect(mockGetMainProvider).toHaveBeenCalled(); - expect(mockGetFallbackProvider).toHaveBeenCalled(); // Fallback was tried - expect(mockGenerateAnthropicText).toHaveBeenCalledTimes(2); // Called for main (fail) and fallback (success) - expect(mockGeneratePerplexityText).not.toHaveBeenCalled(); // Research not called + expect(mockGetMainProvider).toHaveBeenCalledWith(explicitRoot); + expect(mockGetFallbackProvider).toHaveBeenCalledWith(explicitRoot); + expect(mockGetParametersForRole).toHaveBeenCalledWith( + 'main', + explicitRoot + ); + expect(mockGetParametersForRole).toHaveBeenCalledWith( + 'fallback', + explicitRoot + ); - // Check log messages for fallback attempt + expect(mockResolveEnvVariable).toHaveBeenCalledWith( + 'ANTHROPIC_API_KEY', + undefined, + explicitRoot + ); + + expect(mockGenerateAnthropicText).toHaveBeenCalledTimes(2); + expect(mockGeneratePerplexityText).not.toHaveBeenCalled(); expect(mockLog).toHaveBeenCalledWith( 'error', expect.stringContaining('Service call failed for role main') @@ -153,12 +181,40 @@ describe('Unified AI Services', () => { const result = await generateTextService(params); expect(result).toBe('Research provider response'); - expect(mockGetMainProvider).toHaveBeenCalled(); - expect(mockGetFallbackProvider).toHaveBeenCalled(); - expect(mockGetResearchProvider).toHaveBeenCalled(); // Research was tried - expect(mockGenerateAnthropicText).toHaveBeenCalledTimes(2); // main, fallback - expect(mockGeneratePerplexityText).toHaveBeenCalledTimes(1); // research + expect(mockGetMainProvider).toHaveBeenCalledWith(fakeProjectRoot); + expect(mockGetFallbackProvider).toHaveBeenCalledWith(fakeProjectRoot); + expect(mockGetResearchProvider).toHaveBeenCalledWith(fakeProjectRoot); + expect(mockGetParametersForRole).toHaveBeenCalledWith( + 'main', + fakeProjectRoot + ); + expect(mockGetParametersForRole).toHaveBeenCalledWith( + 'fallback', + fakeProjectRoot + ); + expect(mockGetParametersForRole).toHaveBeenCalledWith( + 'research', + fakeProjectRoot + ); + expect(mockResolveEnvVariable).toHaveBeenCalledWith( + 'ANTHROPIC_API_KEY', + undefined, + fakeProjectRoot + ); + expect(mockResolveEnvVariable).toHaveBeenCalledWith( + 'ANTHROPIC_API_KEY', + undefined, + fakeProjectRoot + ); + expect(mockResolveEnvVariable).toHaveBeenCalledWith( + 'PERPLEXITY_API_KEY', + undefined, + fakeProjectRoot + ); + + expect(mockGenerateAnthropicText).toHaveBeenCalledTimes(2); + expect(mockGeneratePerplexityText).toHaveBeenCalledTimes(1); expect(mockLog).toHaveBeenCalledWith( 'error', expect.stringContaining('Service call failed for role fallback') @@ -204,6 +260,23 @@ describe('Unified AI Services', () => { ); }); + test('should use default project root or handle null if findProjectRoot returns null', async () => { + mockFindProjectRoot.mockReturnValue(null); // Simulate not finding root + mockGenerateAnthropicText.mockResolvedValue('Response with no root'); + + const params = { role: 'main', prompt: 'No root test' }; // No explicit root passed + await generateTextService(params); + + expect(mockGetMainProvider).toHaveBeenCalledWith(null); + expect(mockGetParametersForRole).toHaveBeenCalledWith('main', null); + expect(mockResolveEnvVariable).toHaveBeenCalledWith( + 'ANTHROPIC_API_KEY', + undefined, + null + ); + expect(mockGenerateAnthropicText).toHaveBeenCalledTimes(1); + }); + // Add more tests for edge cases: // - Missing API keys (should throw from _resolveApiKey) // - Unsupported provider configured (should skip and log)