refactor(expand/all): Implement additive expansion and complexity report integration

Refactors the `expandTask` and `expandAllTasks` features to complete subtask 61.38 and enhance functionality based on subtask 61.37's refactor.

Key Changes:

- **Additive Expansion (`expandTask`, `expandAllTasks`):**

    - Modified `expandTask` default behavior to append newly generated subtasks to any existing ones.

    - Added a `force` flag (passed down from CLI/MCP via `--force` option/parameter) to `expandTask` and `expandAllTasks`. When `force` is true, existing subtasks are cleared before generating new ones.

    - Updated relevant CLI command (`expand`), MCP tool (`expand_task`, `expand_all`), and direct function wrappers (`expandTaskDirect`, `expandAllTasksDirect`) to handle and pass the `force` flag.

- **Complexity Report Integration (`expandTask`):**

    - `expandTask` now reads `scripts/task-complexity-report.json`.

    - If an analysis entry exists for the target task:

        - `recommendedSubtasks` is used to determine the number of subtasks to generate (unless `--num` is explicitly provided).

        - `expansionPrompt` is used as the primary prompt content for the AI.

        - `reasoning` is appended to any additional context provided.

    - If no report entry exists or the report is missing, it falls back to default subtask count (from config) and standard prompt generation.

- **`expandAllTasks` Orchestration:**

    - Refactored `expandAllTasks` to primarily iterate through eligible tasks (pending/in-progress, considering `force` flag and existing subtasks) and call the updated `expandTask` function for each.

    - Removed redundant logic (like complexity reading or explicit subtask clearing) now handled within `expandTask`.

    - Ensures correct context (`session`, `mcpLog`) and flags (`useResearch`, `force`) are passed down.

- **Configuration & Cleanup:**

    - Updated `.cursor/mcp.json` with new Perplexity/Anthropic API keys (old ones invalidated).

    - Completed refactoring of `expandTask` started in 61.37, confirming usage of `generateTextService` and appropriate prompts.

- **Task Management:**

    - Marked subtask 61.37 as complete.

    - Updated `.changeset/cuddly-zebras-matter.md` to reflect user-facing changes.

These changes finalize the refactoring of the task expansion features, making them more robust, configurable via complexity analysis, and aligned with the unified AI service architecture.
This commit is contained in:
Eyal Toledano
2025-04-25 02:57:08 -04:00
parent 99b1a0ad7a
commit ef782ff5bd
12 changed files with 1068 additions and 542 deletions

View File

@@ -1,334 +1,178 @@
import fs from 'fs';
import path from 'path';
import chalk from 'chalk';
import boxen from 'boxen';
import { log, readJSON, writeJSON, truncate, isSilentMode } from '../utils.js';
import {
displayBanner,
startLoadingIndicator,
stopLoadingIndicator
} from '../ui.js';
import { getDefaultSubtasks } from '../config-manager.js';
import generateTaskFiles from './generate-task-files.js';
import { log, readJSON, writeJSON, isSilentMode } from '../utils.js';
import { startLoadingIndicator, stopLoadingIndicator } from '../ui.js';
import expandTask from './expand-task.js';
import { getDebugFlag } from '../config-manager.js';
/**
* Expand all pending tasks with subtasks
* Expand all eligible pending or in-progress tasks using the expandTask function.
* @param {string} tasksPath - Path to the tasks.json file
* @param {number} numSubtasks - Number of subtasks per task
* @param {boolean} useResearch - Whether to use research (Perplexity)
* @param {string} additionalContext - Additional context
* @param {boolean} forceFlag - Force regeneration for tasks with subtasks
* @param {Object} options - Options for expanding tasks
* @param {function} options.reportProgress - Function to report progress
* @param {Object} options.mcpLog - MCP logger object
* @param {Object} options.session - Session object from MCP
* @param {string} outputFormat - Output format (text or json)
* @param {number} [numSubtasks] - Optional: Target number of subtasks per task.
* @param {boolean} [useResearch=false] - Whether to use the research AI role.
* @param {string} [additionalContext=''] - Optional additional context.
* @param {boolean} [force=false] - Force expansion even if tasks already have subtasks.
* @param {Object} context - Context object containing session and mcpLog.
* @param {Object} [context.session] - Session object from MCP.
* @param {Object} [context.mcpLog] - MCP logger object.
* @param {string} [outputFormat='text'] - Output format ('text' or 'json'). MCP calls should use 'json'.
* @returns {Promise<{success: boolean, expandedCount: number, failedCount: number, skippedCount: number, tasksToExpand: number, message?: string}>} - Result summary.
*/
async function expandAllTasks(
tasksPath,
numSubtasks = getDefaultSubtasks(), // Use getter
numSubtasks, // Keep this signature, expandTask handles defaults
useResearch = false,
additionalContext = '',
forceFlag = false,
{ reportProgress, mcpLog, session } = {},
outputFormat = 'text'
force = false, // Keep force here for the filter logic
context = {},
outputFormat = 'text' // Assume text default for CLI
) {
// Create custom reporter that checks for MCP log and silent mode
const report = (message, level = 'info') => {
if (mcpLog) {
mcpLog[level](message);
} else if (!isSilentMode() && outputFormat === 'text') {
// Only log to console if not in silent mode and outputFormat is 'text'
log(level, message);
}
};
const { session, mcpLog } = context;
const isMCPCall = !!mcpLog; // Determine if called from MCP
// Only display banner and UI elements for text output (CLI)
if (outputFormat === 'text') {
displayBanner();
}
// Parse numSubtasks as integer if it's a string
if (typeof numSubtasks === 'string') {
numSubtasks = parseInt(numSubtasks, 10);
if (isNaN(numSubtasks)) {
numSubtasks = getDefaultSubtasks(); // Use getter
}
}
report(`Expanding all pending tasks with ${numSubtasks} subtasks each...`);
if (useResearch) {
report('Using research-backed AI for more detailed subtasks');
}
// Load tasks
let data;
try {
data = readJSON(tasksPath);
if (!data || !data.tasks) {
throw new Error('No valid tasks found');
}
} catch (error) {
report(`Error loading tasks: ${error.message}`, 'error');
throw error;
}
// Get all tasks that are pending/in-progress and don't have subtasks (or force regeneration)
const tasksToExpand = data.tasks.filter(
(task) =>
(task.status === 'pending' || task.status === 'in-progress') &&
(!task.subtasks || task.subtasks.length === 0 || forceFlag)
);
if (tasksToExpand.length === 0) {
report(
'No tasks eligible for expansion. Tasks should be in pending/in-progress status and not have subtasks already.',
'info'
);
// Return structured result for MCP
return {
success: true,
expandedCount: 0,
tasksToExpand: 0,
message: 'No tasks eligible for expansion'
};
}
report(`Found ${tasksToExpand.length} tasks to expand`);
// Check if we have a complexity report to prioritize complex tasks
let complexityReport;
const reportPath = path.join(
path.dirname(tasksPath),
'../scripts/task-complexity-report.json'
);
if (fs.existsSync(reportPath)) {
try {
complexityReport = readJSON(reportPath);
report('Using complexity analysis to prioritize tasks');
} catch (error) {
report(`Could not read complexity report: ${error.message}`, 'warn');
}
}
// Only create loading indicator if not in silent mode and outputFormat is 'text'
let loadingIndicator = null;
if (!isSilentMode() && outputFormat === 'text') {
loadingIndicator = startLoadingIndicator(
`Expanding ${tasksToExpand.length} tasks with ${numSubtasks} subtasks each`
);
}
let expandedCount = 0;
let expansionErrors = 0;
try {
// Sort tasks by complexity if report exists, otherwise by ID
if (complexityReport && complexityReport.complexityAnalysis) {
report('Sorting tasks by complexity...');
// Create a map of task IDs to complexity scores
const complexityMap = new Map();
complexityReport.complexityAnalysis.forEach((analysis) => {
complexityMap.set(analysis.taskId, analysis.complexityScore);
});
// Sort tasks by complexity score (high to low)
tasksToExpand.sort((a, b) => {
const scoreA = complexityMap.get(a.id) || 0;
const scoreB = complexityMap.get(b.id) || 0;
return scoreB - scoreA;
});
}
// Process each task
for (const task of tasksToExpand) {
if (loadingIndicator && outputFormat === 'text') {
loadingIndicator.text = `Expanding task ${task.id}: ${truncate(task.title, 30)} (${expandedCount + 1}/${tasksToExpand.length})`;
}
// Report progress to MCP if available
if (reportProgress) {
reportProgress({
status: 'processing',
current: expandedCount + 1,
total: tasksToExpand.length,
message: `Expanding task ${task.id}: ${truncate(task.title, 30)}`
// Use mcpLog if available, otherwise use the default console log wrapper respecting silent mode
const logger =
mcpLog ||
(outputFormat === 'json'
? {
// Basic logger for JSON output mode
info: (msg) => {},
warn: (msg) => {},
error: (msg) => console.error(`ERROR: ${msg}`), // Still log errors
debug: (msg) => {}
}
: {
// CLI logger respecting silent mode
info: (msg) => !isSilentMode() && log('info', msg),
warn: (msg) => !isSilentMode() && log('warn', msg),
error: (msg) => !isSilentMode() && log('error', msg),
debug: (msg) =>
!isSilentMode() && getDebugFlag(session) && log('debug', msg)
});
}
report(`Expanding task ${task.id}: ${truncate(task.title, 50)}`);
let loadingIndicator = null;
let expandedCount = 0;
let failedCount = 0;
// No skipped count needed now as the filter handles it upfront
let tasksToExpandCount = 0; // Renamed for clarity
// Check if task already has subtasks and forceFlag is enabled
if (task.subtasks && task.subtasks.length > 0 && forceFlag) {
report(
`Task ${task.id} already has ${task.subtasks.length} subtasks. Clearing them for regeneration.`
if (!isMCPCall && outputFormat === 'text') {
loadingIndicator = startLoadingIndicator(
'Analyzing tasks for expansion...'
);
}
try {
logger.info(`Reading tasks from ${tasksPath}`);
const data = readJSON(tasksPath);
if (!data || !data.tasks) {
throw new Error(`Invalid tasks data in ${tasksPath}`);
}
// --- Restore Original Filtering Logic ---
const tasksToExpand = data.tasks.filter(
(task) =>
(task.status === 'pending' || task.status === 'in-progress') && // Include 'in-progress'
(!task.subtasks || task.subtasks.length === 0 || force) // Check subtasks/force here
);
tasksToExpandCount = tasksToExpand.length; // Get the count from the filtered array
logger.info(`Found ${tasksToExpandCount} tasks eligible for expansion.`);
// --- End Restored Filtering Logic ---
if (loadingIndicator) {
stopLoadingIndicator(loadingIndicator, 'Analysis complete.');
}
if (tasksToExpandCount === 0) {
logger.info('No tasks eligible for expansion.');
// --- Fix: Restore success: true and add message ---
return {
success: true, // Indicate overall success despite no action
expandedCount: 0,
failedCount: 0,
skippedCount: 0,
tasksToExpand: 0,
message: 'No tasks eligible for expansion.'
};
// --- End Fix ---
}
// Iterate over the already filtered tasks
for (const task of tasksToExpand) {
// --- Remove Redundant Check ---
// The check below is no longer needed as the initial filter handles it
/*
if (task.subtasks && task.subtasks.length > 0 && !force) {
logger.info(
`Skipping task ${task.id}: Already has subtasks. Use --force to overwrite.`
);
task.subtasks = [];
skippedCount++;
continue;
}
*/
// --- End Removed Redundant Check ---
// Start indicator for individual task expansion in CLI mode
let taskIndicator = null;
if (!isMCPCall && outputFormat === 'text') {
taskIndicator = startLoadingIndicator(`Expanding task ${task.id}...`);
}
try {
// Get complexity analysis for this task if available
let taskAnalysis;
if (complexityReport && complexityReport.complexityAnalysis) {
taskAnalysis = complexityReport.complexityAnalysis.find(
(a) => a.taskId === task.id
);
}
let thisNumSubtasks = numSubtasks;
// Use recommended number of subtasks from complexity analysis if available
if (taskAnalysis && taskAnalysis.recommendedSubtasks) {
report(
`Using recommended ${taskAnalysis.recommendedSubtasks} subtasks based on complexity score ${taskAnalysis.complexityScore}/10 for task ${task.id}`
);
thisNumSubtasks = taskAnalysis.recommendedSubtasks;
}
// Generate prompt for subtask creation based on task details
const prompt = generateSubtaskPrompt(
task,
thisNumSubtasks,
additionalContext,
taskAnalysis
);
// Use AI to generate subtasks
const aiResponse = await getSubtasksFromAI(
prompt,
// Call the refactored expandTask function
await expandTask(
tasksPath,
task.id,
numSubtasks, // Pass numSubtasks, expandTask handles defaults/complexity
useResearch,
session,
mcpLog
additionalContext,
context, // Pass the whole context object { session, mcpLog }
force // Pass the force flag down
);
if (
aiResponse &&
aiResponse.subtasks &&
Array.isArray(aiResponse.subtasks) &&
aiResponse.subtasks.length > 0
) {
// Process and add the subtasks to the task
task.subtasks = aiResponse.subtasks.map((subtask, index) => ({
id: index + 1,
title: subtask.title || `Subtask ${index + 1}`,
description: subtask.description || 'No description provided',
status: 'pending',
dependencies: subtask.dependencies || [],
details: subtask.details || ''
}));
report(`Added ${task.subtasks.length} subtasks to task ${task.id}`);
expandedCount++;
} else if (aiResponse && aiResponse.error) {
// Handle error response
const errorMsg = `Failed to generate subtasks for task ${task.id}: ${aiResponse.error}`;
report(errorMsg, 'error');
// Add task ID to error info and provide actionable guidance
const suggestion = aiResponse.suggestion.replace('<id>', task.id);
report(`Suggestion: ${suggestion}`, 'info');
expansionErrors++;
} else {
report(`Failed to generate subtasks for task ${task.id}`, 'error');
report(
`Suggestion: Run 'task-master update-task --id=${task.id} --prompt="Generate subtasks for this task"' to manually create subtasks.`,
'info'
);
expansionErrors++;
expandedCount++;
if (taskIndicator) {
stopLoadingIndicator(taskIndicator, `Task ${task.id} expanded.`);
}
logger.info(`Successfully expanded task ${task.id}.`);
} catch (error) {
report(`Error expanding task ${task.id}: ${error.message}`, 'error');
expansionErrors++;
failedCount++;
if (taskIndicator) {
stopLoadingIndicator(
taskIndicator,
`Failed to expand task ${task.id}.`,
false
);
}
logger.error(`Failed to expand task ${task.id}: ${error.message}`);
// Continue to the next task
}
// Small delay to prevent rate limiting
await new Promise((resolve) => setTimeout(resolve, 100));
}
// Save the updated tasks
writeJSON(tasksPath, data);
// Log final summary (removed skipped count from message)
logger.info(
`Expansion complete: ${expandedCount} expanded, ${failedCount} failed.`
);
// Generate task files
if (outputFormat === 'text') {
// Only perform file generation for CLI (text) mode
const outputDir = path.dirname(tasksPath);
await generateTaskFiles(tasksPath, outputDir);
}
// Return structured result for MCP
// Return summary (skippedCount is now 0) - Add success: true here as well for consistency
return {
success: true,
success: true, // Indicate overall success
expandedCount,
tasksToExpand: tasksToExpand.length,
expansionErrors,
message: `Successfully expanded ${expandedCount} out of ${tasksToExpand.length} tasks${expansionErrors > 0 ? ` (${expansionErrors} errors)` : ''}`
failedCount,
skippedCount: 0,
tasksToExpand: tasksToExpandCount
};
} catch (error) {
report(`Error expanding tasks: ${error.message}`, 'error');
throw error;
} finally {
// Stop the loading indicator if it was created
if (loadingIndicator && outputFormat === 'text') {
stopLoadingIndicator(loadingIndicator);
}
// Final progress report
if (reportProgress) {
reportProgress({
status: 'completed',
current: expandedCount,
total: tasksToExpand.length,
message: `Completed expanding ${expandedCount} out of ${tasksToExpand.length} tasks`
});
}
// Display completion message for CLI mode
if (outputFormat === 'text') {
console.log(
boxen(
chalk.white.bold(`Task Expansion Completed`) +
'\n\n' +
chalk.white(
`Expanded ${expandedCount} out of ${tasksToExpand.length} tasks`
) +
'\n' +
chalk.white(
`Each task now has detailed subtasks to guide implementation`
),
{
padding: 1,
borderColor: 'green',
borderStyle: 'round',
margin: { top: 1 }
}
)
);
// Suggest next actions
if (expandedCount > 0) {
console.log(chalk.bold('\nNext Steps:'));
console.log(
chalk.cyan(
`1. Run ${chalk.yellow('task-master list --with-subtasks')} to see all tasks with their subtasks`
)
);
console.log(
chalk.cyan(
`2. Run ${chalk.yellow('task-master next')} to find the next task to work on`
)
);
console.log(
chalk.cyan(
`3. Run ${chalk.yellow('task-master set-status --id=<taskId> --status=in-progress')} to start working on a task`
)
);
}
if (loadingIndicator)
stopLoadingIndicator(loadingIndicator, 'Error.', false);
logger.error(`Error during expand all operation: ${error.message}`);
if (!isMCPCall && getDebugFlag(session)) {
console.error(error); // Log full stack in debug CLI mode
}
// Re-throw error for the caller to handle, the direct function will format it
throw error; // Let direct function wrapper handle formatting
/* Original re-throw:
throw new Error(`Failed to expand all tasks: ${error.message}`);
*/
}
}

View File

@@ -312,14 +312,18 @@ function parseSubtasksFromText(
/**
* Expand a task into subtasks using the unified AI service (generateTextService).
* Appends new subtasks by default. Replaces existing subtasks if force=true.
* Integrates complexity report to determine subtask count and prompt if available,
* unless numSubtasks is explicitly provided.
* @param {string} tasksPath - Path to the tasks.json file
* @param {number} taskId - Task ID to expand
* @param {number} [numSubtasks] - Optional: Target number of subtasks. Uses config default if not provided.
* @param {number | null | undefined} [numSubtasks] - Optional: Explicit target number of subtasks. If null/undefined, check complexity report or config default.
* @param {boolean} [useResearch=false] - Whether to use the research AI role.
* @param {string} [additionalContext=''] - Optional additional context.
* @param {Object} context - Context object containing session and mcpLog.
* @param {Object} [context.session] - Session object from MCP.
* @param {Object} [context.mcpLog] - MCP logger object.
* @param {boolean} [force=false] - If true, replace existing subtasks; otherwise, append.
* @returns {Promise<Object>} The updated parent task object with new subtasks.
* @throws {Error} If task not found, AI service fails, or parsing fails.
*/
@@ -329,7 +333,8 @@ async function expandTask(
numSubtasks,
useResearch = false,
additionalContext = '',
context = {}
context = {},
force = false
) {
const { session, mcpLog } = context;
const outputFormat = mcpLog ? 'json' : 'text';
@@ -361,56 +366,142 @@ async function expandTask(
logger.info(`Expanding task ${taskId}: ${task.title}`);
// --- End Task Loading/Filtering ---
// --- Subtask Count & Complexity Check (Unchanged) ---
let subtaskCount = parseInt(numSubtasks, 10);
if (isNaN(subtaskCount) || subtaskCount <= 0) {
subtaskCount = getDefaultSubtasks(session); // Pass session
logger.info(`Using default number of subtasks: ${subtaskCount}`);
// --- Handle Force Flag: Clear existing subtasks if force=true ---
if (force && Array.isArray(task.subtasks) && task.subtasks.length > 0) {
logger.info(
`Force flag set. Clearing existing ${task.subtasks.length} subtasks for task ${taskId}.`
);
task.subtasks = []; // Clear existing subtasks
}
// ... (complexity report check logic remains) ...
// --- End Subtask Count & Complexity Check ---
// --- End Force Flag Handling ---
// --- AI Subtask Generation using generateTextService ---
let generatedSubtasks = [];
const nextSubtaskId = (task.subtasks?.length || 0) + 1;
// --- Complexity Report Integration ---
let finalSubtaskCount;
let promptContent = '';
let complexityReasoningContext = '';
let systemPrompt; // Declare systemPrompt here
let loadingIndicator = null;
if (outputFormat === 'text') {
loadingIndicator = startLoadingIndicator(
`Generating ${subtaskCount} subtasks...`
const projectRoot = path.dirname(path.dirname(tasksPath));
const complexityReportPath = path.join(
projectRoot,
'scripts/task-complexity-report.json'
);
let taskAnalysis = null;
try {
if (fs.existsSync(complexityReportPath)) {
const complexityReport = readJSON(complexityReportPath);
taskAnalysis = complexityReport?.complexityAnalysis?.find(
(a) => a.taskId === task.id
);
if (taskAnalysis) {
logger.info(
`Found complexity analysis for task ${task.id}: Score ${taskAnalysis.complexityScore}`
);
if (taskAnalysis.reasoning) {
complexityReasoningContext = `\nComplexity Analysis Reasoning: ${taskAnalysis.reasoning}`;
}
} else {
logger.info(
`No complexity analysis found for task ${task.id} in report.`
);
}
} else {
logger.info(
`Complexity report not found at ${complexityReportPath}. Skipping complexity check.`
);
}
} catch (reportError) {
logger.warn(
`Could not read or parse complexity report: ${reportError.message}. Proceeding without it.`
);
}
let responseText = ''; // To store the raw text response
// Determine final subtask count
const explicitNumSubtasks = parseInt(numSubtasks, 10);
if (!isNaN(explicitNumSubtasks) && explicitNumSubtasks > 0) {
finalSubtaskCount = explicitNumSubtasks;
logger.info(
`Using explicitly provided subtask count: ${finalSubtaskCount}`
);
} else if (taskAnalysis?.recommendedSubtasks) {
finalSubtaskCount = parseInt(taskAnalysis.recommendedSubtasks, 10);
logger.info(
`Using subtask count from complexity report: ${finalSubtaskCount}`
);
} else {
finalSubtaskCount = getDefaultSubtasks(session);
logger.info(`Using default number of subtasks: ${finalSubtaskCount}`);
}
if (isNaN(finalSubtaskCount) || finalSubtaskCount <= 0) {
logger.warn(
`Invalid subtask count determined (${finalSubtaskCount}), defaulting to 3.`
);
finalSubtaskCount = 3;
}
// Determine prompt content AND system prompt
const nextSubtaskId = (task.subtasks?.length || 0) + 1;
if (taskAnalysis?.expansionPrompt) {
// Use prompt from complexity report
promptContent = taskAnalysis.expansionPrompt;
// Append additional context and reasoning
promptContent += `\n\n${additionalContext}`.trim();
promptContent += `${complexityReasoningContext}`.trim();
// --- Use Simplified System Prompt for Report Prompts ---
systemPrompt = `You are an AI assistant helping with task breakdown. Generate exactly ${finalSubtaskCount} subtasks based on the provided prompt and context. Respond ONLY with a valid JSON object containing a single key "subtasks" whose value is an array of the generated subtask objects. Each subtask object in the array must have keys: "id", "title", "description", "dependencies", "details", "status". Ensure the 'id' starts from ${nextSubtaskId} and is sequential. Ensure 'dependencies' only reference valid prior subtask IDs generated in this response (starting from ${nextSubtaskId}). Ensure 'status' is 'pending'. Do not include any other text or explanation.`;
logger.info(
`Using expansion prompt from complexity report and simplified system prompt for task ${task.id}.`
);
// --- End Simplified System Prompt ---
} else {
// Use standard prompt generation
const combinedAdditionalContext =
`${additionalContext}${complexityReasoningContext}`.trim();
if (useResearch) {
promptContent = generateResearchUserPrompt(
task,
finalSubtaskCount,
combinedAdditionalContext,
nextSubtaskId
);
// Use the specific research system prompt if needed, or a standard one
systemPrompt = `You are an AI assistant that responds ONLY with valid JSON objects as requested. The object should contain a 'subtasks' array.`; // Or keep generateResearchSystemPrompt if it exists
} else {
promptContent = generateMainUserPrompt(
task,
finalSubtaskCount,
combinedAdditionalContext,
nextSubtaskId
);
// Use the original detailed system prompt for standard generation
systemPrompt = generateMainSystemPrompt(finalSubtaskCount);
}
logger.info(`Using standard prompt generation for task ${task.id}.`);
}
// --- End Complexity Report / Prompt Logic ---
// --- AI Subtask Generation using generateTextService ---
let generatedSubtasks = [];
let loadingIndicator = null;
if (outputFormat === 'text') {
loadingIndicator = startLoadingIndicator(
`Generating ${finalSubtaskCount} subtasks...`
);
}
let responseText = '';
try {
// 1. Determine Role and Generate Prompts
const role = useResearch ? 'research' : 'main';
logger.info(`Using AI service with role: ${role}`);
let prompt;
let systemPrompt;
if (useResearch) {
prompt = generateResearchUserPrompt(
task,
subtaskCount,
additionalContext,
nextSubtaskId
);
systemPrompt = `You are an AI assistant that responds ONLY with valid JSON objects as requested. The object should contain a 'subtasks' array.`;
} else {
prompt = generateMainUserPrompt(
task,
subtaskCount,
additionalContext,
nextSubtaskId
);
systemPrompt = generateMainSystemPrompt(subtaskCount);
}
// 2. Call generateTextService
// Call generateTextService with the determined prompts
responseText = await generateTextService({
prompt,
systemPrompt,
prompt: promptContent,
systemPrompt: systemPrompt, // Use the determined system prompt
role,
session
});
@@ -419,46 +510,45 @@ async function expandTask(
'success'
);
// 3. Parse Subtasks from Text Response
try {
generatedSubtasks = parseSubtasksFromText(
responseText,
nextSubtaskId,
subtaskCount,
task.id,
logger // Pass the logger
);
logger.info(
`Successfully parsed ${generatedSubtasks.length} subtasks from AI response.`
);
} catch (parseError) {
// Log error and throw
logger.error(
`Failed to parse subtasks from AI response: ${parseError.message}`
);
if (getDebugFlag(session)) {
// Use getter with session
logger.error(`Raw AI Response:\n${responseText}`);
}
throw new Error(
`Failed to parse valid subtasks from AI response: ${parseError.message}`
);
}
// --- End AI Subtask Generation ---
// Parse Subtasks
generatedSubtasks = parseSubtasksFromText(
responseText,
nextSubtaskId,
finalSubtaskCount,
task.id,
logger
);
logger.info(
`Successfully parsed ${generatedSubtasks.length} subtasks from AI response.`
);
} catch (error) {
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
logger.error(
`Error generating subtasks via AI service: ${error.message}`,
`Error during AI call or parsing for task ${taskId}: ${error.message}`, // Added task ID context
'error'
);
throw error; // Re-throw AI service error
// Log raw response in debug mode if parsing failed
if (
error.message.includes('Failed to parse valid subtasks') &&
getDebugFlag(session)
) {
logger.error(`Raw AI Response that failed parsing:\n${responseText}`);
}
throw error;
} finally {
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
}
// --- Task Update & File Writing (Unchanged) ---
task.subtasks = generatedSubtasks;
data.tasks[taskIndex] = task;
// --- Task Update & File Writing ---
// Ensure task.subtasks is an array before appending
if (!Array.isArray(task.subtasks)) {
task.subtasks = [];
}
// Append the newly generated and validated subtasks
task.subtasks.push(...generatedSubtasks);
// --- End Change: Append instead of replace ---
data.tasks[taskIndex] = task; // Assign the modified task back
logger.info(`Writing updated tasks to ${tasksPath}`);
writeJSON(tasksPath, data);
logger.info(`Generating individual task files...`);
@@ -471,7 +561,6 @@ async function expandTask(
// Catches errors from file reading, parsing, AI call etc.
logger.error(`Error expanding task ${taskId}: ${error.message}`, 'error');
if (outputFormat === 'text' && getDebugFlag(session)) {
// Use getter with session
console.error(error); // Log full stack in debug CLI mode
}
throw error; // Re-throw for the caller