refactor(expand/all): Implement additive expansion and complexity report integration
Refactors the `expandTask` and `expandAllTasks` features to complete subtask 61.38 and enhance functionality based on subtask 61.37's refactor.
Key Changes:
- **Additive Expansion (`expandTask`, `expandAllTasks`):**
- Modified `expandTask` default behavior to append newly generated subtasks to any existing ones.
- Added a `force` flag (passed down from CLI/MCP via `--force` option/parameter) to `expandTask` and `expandAllTasks`. When `force` is true, existing subtasks are cleared before generating new ones.
- Updated relevant CLI command (`expand`), MCP tool (`expand_task`, `expand_all`), and direct function wrappers (`expandTaskDirect`, `expandAllTasksDirect`) to handle and pass the `force` flag.
- **Complexity Report Integration (`expandTask`):**
- `expandTask` now reads `scripts/task-complexity-report.json`.
- If an analysis entry exists for the target task:
- `recommendedSubtasks` is used to determine the number of subtasks to generate (unless `--num` is explicitly provided).
- `expansionPrompt` is used as the primary prompt content for the AI.
- `reasoning` is appended to any additional context provided.
- If no report entry exists or the report is missing, it falls back to default subtask count (from config) and standard prompt generation.
- **`expandAllTasks` Orchestration:**
- Refactored `expandAllTasks` to primarily iterate through eligible tasks (pending/in-progress, considering `force` flag and existing subtasks) and call the updated `expandTask` function for each.
- Removed redundant logic (like complexity reading or explicit subtask clearing) now handled within `expandTask`.
- Ensures correct context (`session`, `mcpLog`) and flags (`useResearch`, `force`) are passed down.
- **Configuration & Cleanup:**
- Updated `.cursor/mcp.json` with new Perplexity/Anthropic API keys (old ones invalidated).
- Completed refactoring of `expandTask` started in 61.37, confirming usage of `generateTextService` and appropriate prompts.
- **Task Management:**
- Marked subtask 61.37 as complete.
- Updated `.changeset/cuddly-zebras-matter.md` to reflect user-facing changes.
These changes finalize the refactoring of the task expansion features, making them more robust, configurable via complexity analysis, and aligned with the unified AI service architecture.
This commit is contained in:
8
.changeset/cuddly-zebras-matter.md
Normal file
8
.changeset/cuddly-zebras-matter.md
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
'task-master-ai': patch
|
||||
---
|
||||
|
||||
feat(expand): Enhance `expand` and `expand-all` commands
|
||||
|
||||
- Integrate `task-complexity-report.json` to automatically determine the number of subtasks and use tailored prompts for expansion based on prior analysis. You no longer need to try copy-pasting the recommended prompt. If it exists, it will use it for you. You can just run `task-master update --id=[id of task] --research` and it will use that prompt automatically. No extra prompt needed.
|
||||
- Change default behavior to *append* new subtasks to existing ones. Use the `--force` flag to clear existing subtasks before expanding. This is helpful if you need to add more subtasks to a task but you want to do it by the batch from a given prompt. Use force if you want to start fresh with a task's subtasks.
|
||||
@@ -8,135 +8,91 @@ import {
|
||||
disableSilentMode,
|
||||
isSilentMode
|
||||
} from '../../../../scripts/modules/utils.js';
|
||||
import { getAnthropicClientForMCP } from '../utils/ai-client-utils.js';
|
||||
import path from 'path';
|
||||
import fs from 'fs';
|
||||
|
||||
/**
|
||||
* Expand all pending tasks with subtasks
|
||||
* Expand all pending tasks with subtasks (Direct Function Wrapper)
|
||||
* @param {Object} args - Function arguments
|
||||
* @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.
|
||||
* @param {number|string} [args.num] - Number of subtasks to generate
|
||||
* @param {boolean} [args.research] - Enable Perplexity AI for research-backed subtask generation
|
||||
* @param {boolean} [args.research] - Enable research-backed subtask generation
|
||||
* @param {string} [args.prompt] - Additional context to guide subtask generation
|
||||
* @param {boolean} [args.force] - Force regeneration of subtasks for tasks that already have them
|
||||
* @param {Object} log - Logger object
|
||||
* @param {Object} log - Logger object from FastMCP
|
||||
* @param {Object} context - Context object containing session
|
||||
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
||||
*/
|
||||
export async function expandAllTasksDirect(args, log, context = {}) {
|
||||
const { session } = context; // Only extract session, not reportProgress
|
||||
const { session } = context; // Extract session
|
||||
// Destructure expected args
|
||||
const { tasksJsonPath, num, research, prompt, force } = args;
|
||||
|
||||
try {
|
||||
log.info(`Expanding all tasks with args: ${JSON.stringify(args)}`);
|
||||
// Create the standard logger wrapper
|
||||
const logWrapper = {
|
||||
info: (message, ...args) => log.info(message, ...args),
|
||||
warn: (message, ...args) => log.warn(message, ...args),
|
||||
error: (message, ...args) => log.error(message, ...args),
|
||||
debug: (message, ...args) => log.debug && log.debug(message, ...args), // Handle optional debug
|
||||
success: (message, ...args) => log.info(message, ...args) // Map success to info if needed
|
||||
};
|
||||
|
||||
// Check if tasksJsonPath was provided
|
||||
if (!tasksJsonPath) {
|
||||
log.error('expandAllTasksDirect called without tasksJsonPath');
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'MISSING_ARGUMENT',
|
||||
message: 'tasksJsonPath is required'
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Enable silent mode early to prevent any console output
|
||||
enableSilentMode();
|
||||
|
||||
try {
|
||||
// Remove internal path finding
|
||||
/*
|
||||
const tasksPath = findTasksJsonPath(args, log);
|
||||
*/
|
||||
// Use provided path
|
||||
const tasksPath = tasksJsonPath;
|
||||
|
||||
// Parse parameters
|
||||
const numSubtasks = num ? parseInt(num, 10) : undefined;
|
||||
const useResearch = research === true;
|
||||
const additionalContext = prompt || '';
|
||||
const forceFlag = force === true;
|
||||
|
||||
log.info(
|
||||
`Expanding all tasks with ${numSubtasks || 'default'} subtasks each...`
|
||||
);
|
||||
|
||||
if (useResearch) {
|
||||
log.info('Using Perplexity AI for research-backed subtask generation');
|
||||
|
||||
// Initialize AI client for research-backed expansion
|
||||
try {
|
||||
await getAnthropicClientForMCP(session, log);
|
||||
} catch (error) {
|
||||
// Ensure silent mode is disabled before returning error
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Failed to initialize AI client: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'AI_CLIENT_ERROR',
|
||||
message: `Cannot initialize AI client: ${error.message}`
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
if (additionalContext) {
|
||||
log.info(`Additional context: "${additionalContext}"`);
|
||||
}
|
||||
if (forceFlag) {
|
||||
log.info('Force regeneration of subtasks is enabled');
|
||||
}
|
||||
|
||||
// Call the core function with session context for AI operations
|
||||
// and outputFormat as 'json' to prevent UI elements
|
||||
const result = await expandAllTasks(
|
||||
tasksPath,
|
||||
numSubtasks,
|
||||
useResearch,
|
||||
additionalContext,
|
||||
forceFlag,
|
||||
{ mcpLog: log, session },
|
||||
'json' // Use JSON output format to prevent UI elements
|
||||
);
|
||||
|
||||
// The expandAllTasks function now returns a result object
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
message: 'Successfully expanded all pending tasks with subtasks',
|
||||
details: {
|
||||
numSubtasks: numSubtasks,
|
||||
research: useResearch,
|
||||
prompt: additionalContext,
|
||||
force: forceFlag,
|
||||
tasksExpanded: result.expandedCount,
|
||||
totalEligibleTasks: result.tasksToExpand
|
||||
}
|
||||
}
|
||||
};
|
||||
} finally {
|
||||
// Restore normal logging in finally block to ensure it runs even if there's an error
|
||||
disableSilentMode();
|
||||
}
|
||||
} catch (error) {
|
||||
// Ensure silent mode is disabled if an error occurs
|
||||
if (isSilentMode()) {
|
||||
disableSilentMode();
|
||||
}
|
||||
|
||||
log.error(`Error in expandAllTasksDirect: ${error.message}`);
|
||||
if (!tasksJsonPath) {
|
||||
log.error('expandAllTasksDirect called without tasksJsonPath');
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'CORE_FUNCTION_ERROR',
|
||||
message: error.message
|
||||
code: 'MISSING_ARGUMENT',
|
||||
message: 'tasksJsonPath is required'
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
enableSilentMode(); // Enable silent mode for the core function call
|
||||
try {
|
||||
log.info(
|
||||
`Calling core expandAllTasks with args: ${JSON.stringify({ num, research, prompt, force })}`
|
||||
);
|
||||
|
||||
// Parse parameters (ensure correct types)
|
||||
const numSubtasks = num ? parseInt(num, 10) : undefined;
|
||||
const useResearch = research === true;
|
||||
const additionalContext = prompt || '';
|
||||
const forceFlag = force === true;
|
||||
|
||||
// Call the core function, passing the logger wrapper and session
|
||||
const result = await expandAllTasks(
|
||||
tasksJsonPath, // Use the provided path
|
||||
numSubtasks,
|
||||
useResearch,
|
||||
additionalContext,
|
||||
forceFlag,
|
||||
{ mcpLog: logWrapper, session }, // Pass the wrapper and session
|
||||
'json' // Explicitly request JSON output format
|
||||
);
|
||||
|
||||
// Core function now returns a summary object
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
message: `Expand all operation completed. Expanded: ${result.expandedCount}, Failed: ${result.failedCount}, Skipped: ${result.skippedCount}`,
|
||||
details: result // Include the full result details
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
// Log the error using the MCP logger
|
||||
log.error(`Error during core expandAllTasks execution: ${error.message}`);
|
||||
// Optionally log stack trace if available and debug enabled
|
||||
// if (error.stack && log.debug) { log.debug(error.stack); }
|
||||
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'CORE_FUNCTION_ERROR', // Or a more specific code if possible
|
||||
message: error.message
|
||||
}
|
||||
};
|
||||
} finally {
|
||||
disableSilentMode(); // IMPORTANT: Ensure silent mode is always disabled
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,22 +19,27 @@ import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
||||
export function registerExpandAllTool(server) {
|
||||
server.addTool({
|
||||
name: 'expand_all',
|
||||
description: 'Expand all pending tasks into subtasks',
|
||||
description:
|
||||
'Expand all pending tasks into subtasks based on complexity or defaults',
|
||||
parameters: z.object({
|
||||
num: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe('Number of subtasks to generate for each task'),
|
||||
.describe(
|
||||
'Target number of subtasks per task (uses complexity/defaults otherwise)'
|
||||
),
|
||||
research: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe(
|
||||
'Enable Perplexity AI for research-backed subtask generation'
|
||||
'Enable research-backed subtask generation (e.g., using Perplexity)'
|
||||
),
|
||||
prompt: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe('Additional context to guide subtask generation'),
|
||||
.describe(
|
||||
'Additional context to guide subtask generation for all tasks'
|
||||
),
|
||||
force: z
|
||||
.boolean()
|
||||
.optional()
|
||||
@@ -45,34 +50,37 @@ export function registerExpandAllTool(server) {
|
||||
.string()
|
||||
.optional()
|
||||
.describe(
|
||||
'Absolute path to the tasks file (default: tasks/tasks.json)'
|
||||
'Relative path to the tasks file from project root (default: tasks/tasks.json)'
|
||||
),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.describe('The directory of the project. Must be an absolute path.')
|
||||
.optional()
|
||||
.describe(
|
||||
'Absolute path to the project root directory (derived from session if possible)'
|
||||
)
|
||||
}),
|
||||
execute: async (args, { log, session }) => {
|
||||
try {
|
||||
log.info(`Expanding all tasks with args: ${JSON.stringify(args)}`);
|
||||
log.info(
|
||||
`Tool expand_all execution started with args: ${JSON.stringify(args)}`
|
||||
);
|
||||
|
||||
// Get project root from args or session
|
||||
const rootFolder =
|
||||
args.projectRoot || getProjectRootFromSession(session, log);
|
||||
|
||||
// Ensure project root was determined
|
||||
const rootFolder = getProjectRootFromSession(session, log);
|
||||
if (!rootFolder) {
|
||||
log.error('Could not determine project root from session.');
|
||||
return createErrorResponse(
|
||||
'Could not determine project root. Please provide it explicitly or ensure your session contains valid root information.'
|
||||
'Could not determine project root from session.'
|
||||
);
|
||||
}
|
||||
log.info(`Project root determined: ${rootFolder}`);
|
||||
|
||||
// Resolve the path to tasks.json
|
||||
let tasksJsonPath;
|
||||
try {
|
||||
tasksJsonPath = findTasksJsonPath(
|
||||
{ projectRoot: rootFolder, file: args.file },
|
||||
log
|
||||
);
|
||||
log.info(`Resolved tasks.json path: ${tasksJsonPath}`);
|
||||
} catch (error) {
|
||||
log.error(`Error finding tasks.json: ${error.message}`);
|
||||
return createErrorResponse(
|
||||
@@ -82,9 +90,7 @@ export function registerExpandAllTool(server) {
|
||||
|
||||
const result = await expandAllTasksDirect(
|
||||
{
|
||||
// Pass the explicitly resolved path
|
||||
tasksJsonPath: tasksJsonPath,
|
||||
// Pass other relevant args
|
||||
num: args.num,
|
||||
research: args.research,
|
||||
prompt: args.prompt,
|
||||
@@ -94,18 +100,17 @@ export function registerExpandAllTool(server) {
|
||||
{ session }
|
||||
);
|
||||
|
||||
if (result.success) {
|
||||
log.info(`Successfully expanded all tasks: ${result.data.message}`);
|
||||
} else {
|
||||
log.error(
|
||||
`Failed to expand all tasks: ${result.error?.message || 'Unknown error'}`
|
||||
);
|
||||
}
|
||||
|
||||
return handleApiResult(result, log, 'Error expanding all tasks');
|
||||
} catch (error) {
|
||||
log.error(`Error in expand-all tool: ${error.message}`);
|
||||
return createErrorResponse(error.message);
|
||||
log.error(
|
||||
`Unexpected error in expand_all tool execute: ${error.message}`
|
||||
);
|
||||
if (error.stack) {
|
||||
log.error(error.stack);
|
||||
}
|
||||
return createErrorResponse(
|
||||
`An unexpected error occurred: ${error.message}`
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
@@ -676,18 +676,32 @@ function registerCommands(programInstance) {
|
||||
|
||||
if (options.all) {
|
||||
// --- Handle expand --all ---
|
||||
// This currently calls expandAllTasks. If expandAllTasks internally calls
|
||||
// the refactored expandTask, it needs to be updated to pass the empty context {}.
|
||||
// For now, we assume expandAllTasks needs its own refactor (Subtask 61.38).
|
||||
// We'll add a placeholder log here.
|
||||
console.log(
|
||||
chalk.blue(
|
||||
'Expanding all pending tasks... (Requires expand-all-tasks.js refactor)'
|
||||
)
|
||||
);
|
||||
// Placeholder: await expandAllTasks(tasksPath, options.num, options.research, options.prompt, options.force, {});
|
||||
console.log(chalk.blue('Expanding all pending tasks...'));
|
||||
// Updated call to the refactored expandAllTasks
|
||||
try {
|
||||
const result = await expandAllTasks(
|
||||
tasksPath,
|
||||
options.num, // Pass num
|
||||
options.research, // Pass research flag
|
||||
options.prompt, // Pass additional context
|
||||
options.force, // Pass force flag
|
||||
{} // Pass empty context for CLI calls
|
||||
// outputFormat defaults to 'text' in expandAllTasks for CLI
|
||||
);
|
||||
// Optional: Display summary from result
|
||||
console.log(chalk.green(`Expansion Summary:`));
|
||||
console.log(chalk.green(` - Attempted: ${result.tasksToExpand}`));
|
||||
console.log(chalk.green(` - Expanded: ${result.expandedCount}`));
|
||||
console.log(chalk.yellow(` - Skipped: ${result.skippedCount}`));
|
||||
console.log(chalk.red(` - Failed: ${result.failedCount}`));
|
||||
} catch (error) {
|
||||
console.error(
|
||||
chalk.red(`Error expanding all tasks: ${error.message}`)
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
} else if (options.id) {
|
||||
// --- Handle expand --id <id> ---
|
||||
// --- Handle expand --id <id> (Should be correct from previous refactor) ---
|
||||
if (!options.id) {
|
||||
console.error(
|
||||
chalk.red('Error: Task ID is required unless using --all.')
|
||||
@@ -696,19 +710,24 @@ function registerCommands(programInstance) {
|
||||
}
|
||||
|
||||
console.log(chalk.blue(`Expanding task ${options.id}...`));
|
||||
|
||||
// Call the refactored expandTask function
|
||||
await expandTask(
|
||||
tasksPath,
|
||||
options.id,
|
||||
options.num, // Pass num (core function handles default)
|
||||
options.research,
|
||||
options.prompt,
|
||||
// Pass empty context for CLI calls
|
||||
{}
|
||||
// Note: The 'force' flag is now primarily handled by the Direct Function Wrapper
|
||||
// based on pre-checks, but the core function no longer explicitly needs it.
|
||||
);
|
||||
try {
|
||||
// Call the refactored expandTask function
|
||||
await expandTask(
|
||||
tasksPath,
|
||||
options.id,
|
||||
options.num,
|
||||
options.research,
|
||||
options.prompt,
|
||||
{}, // Pass empty context for CLI calls
|
||||
options.force // Pass the force flag down
|
||||
);
|
||||
// expandTask logs its own success/failure for single task
|
||||
} catch (error) {
|
||||
console.error(
|
||||
chalk.red(`Error expanding task ${options.id}: ${error.message}`)
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
} else {
|
||||
console.error(
|
||||
chalk.red('Error: You must specify either a task ID (--id) or --all.')
|
||||
|
||||
@@ -1,334 +1,178 @@
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import chalk from 'chalk';
|
||||
import boxen from 'boxen';
|
||||
|
||||
import { log, readJSON, writeJSON, truncate, isSilentMode } from '../utils.js';
|
||||
|
||||
import {
|
||||
displayBanner,
|
||||
startLoadingIndicator,
|
||||
stopLoadingIndicator
|
||||
} from '../ui.js';
|
||||
|
||||
import { getDefaultSubtasks } from '../config-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
import { log, readJSON, writeJSON, isSilentMode } from '../utils.js';
|
||||
import { startLoadingIndicator, stopLoadingIndicator } from '../ui.js';
|
||||
import expandTask from './expand-task.js';
|
||||
import { getDebugFlag } from '../config-manager.js';
|
||||
|
||||
/**
|
||||
* Expand all pending tasks with subtasks
|
||||
* Expand all eligible pending or in-progress tasks using the expandTask function.
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
* @param {number} numSubtasks - Number of subtasks per task
|
||||
* @param {boolean} useResearch - Whether to use research (Perplexity)
|
||||
* @param {string} additionalContext - Additional context
|
||||
* @param {boolean} forceFlag - Force regeneration for tasks with subtasks
|
||||
* @param {Object} options - Options for expanding tasks
|
||||
* @param {function} options.reportProgress - Function to report progress
|
||||
* @param {Object} options.mcpLog - MCP logger object
|
||||
* @param {Object} options.session - Session object from MCP
|
||||
* @param {string} outputFormat - Output format (text or json)
|
||||
* @param {number} [numSubtasks] - Optional: Target number of subtasks per task.
|
||||
* @param {boolean} [useResearch=false] - Whether to use the research AI role.
|
||||
* @param {string} [additionalContext=''] - Optional additional context.
|
||||
* @param {boolean} [force=false] - Force expansion even if tasks already have subtasks.
|
||||
* @param {Object} context - Context object containing session and mcpLog.
|
||||
* @param {Object} [context.session] - Session object from MCP.
|
||||
* @param {Object} [context.mcpLog] - MCP logger object.
|
||||
* @param {string} [outputFormat='text'] - Output format ('text' or 'json'). MCP calls should use 'json'.
|
||||
* @returns {Promise<{success: boolean, expandedCount: number, failedCount: number, skippedCount: number, tasksToExpand: number, message?: string}>} - Result summary.
|
||||
*/
|
||||
async function expandAllTasks(
|
||||
tasksPath,
|
||||
numSubtasks = getDefaultSubtasks(), // Use getter
|
||||
numSubtasks, // Keep this signature, expandTask handles defaults
|
||||
useResearch = false,
|
||||
additionalContext = '',
|
||||
forceFlag = false,
|
||||
{ reportProgress, mcpLog, session } = {},
|
||||
outputFormat = 'text'
|
||||
force = false, // Keep force here for the filter logic
|
||||
context = {},
|
||||
outputFormat = 'text' // Assume text default for CLI
|
||||
) {
|
||||
// Create custom reporter that checks for MCP log and silent mode
|
||||
const report = (message, level = 'info') => {
|
||||
if (mcpLog) {
|
||||
mcpLog[level](message);
|
||||
} else if (!isSilentMode() && outputFormat === 'text') {
|
||||
// Only log to console if not in silent mode and outputFormat is 'text'
|
||||
log(level, message);
|
||||
}
|
||||
};
|
||||
const { session, mcpLog } = context;
|
||||
const isMCPCall = !!mcpLog; // Determine if called from MCP
|
||||
|
||||
// Only display banner and UI elements for text output (CLI)
|
||||
if (outputFormat === 'text') {
|
||||
displayBanner();
|
||||
}
|
||||
|
||||
// Parse numSubtasks as integer if it's a string
|
||||
if (typeof numSubtasks === 'string') {
|
||||
numSubtasks = parseInt(numSubtasks, 10);
|
||||
if (isNaN(numSubtasks)) {
|
||||
numSubtasks = getDefaultSubtasks(); // Use getter
|
||||
}
|
||||
}
|
||||
|
||||
report(`Expanding all pending tasks with ${numSubtasks} subtasks each...`);
|
||||
if (useResearch) {
|
||||
report('Using research-backed AI for more detailed subtasks');
|
||||
}
|
||||
|
||||
// Load tasks
|
||||
let data;
|
||||
try {
|
||||
data = readJSON(tasksPath);
|
||||
if (!data || !data.tasks) {
|
||||
throw new Error('No valid tasks found');
|
||||
}
|
||||
} catch (error) {
|
||||
report(`Error loading tasks: ${error.message}`, 'error');
|
||||
throw error;
|
||||
}
|
||||
|
||||
// Get all tasks that are pending/in-progress and don't have subtasks (or force regeneration)
|
||||
const tasksToExpand = data.tasks.filter(
|
||||
(task) =>
|
||||
(task.status === 'pending' || task.status === 'in-progress') &&
|
||||
(!task.subtasks || task.subtasks.length === 0 || forceFlag)
|
||||
);
|
||||
|
||||
if (tasksToExpand.length === 0) {
|
||||
report(
|
||||
'No tasks eligible for expansion. Tasks should be in pending/in-progress status and not have subtasks already.',
|
||||
'info'
|
||||
);
|
||||
|
||||
// Return structured result for MCP
|
||||
return {
|
||||
success: true,
|
||||
expandedCount: 0,
|
||||
tasksToExpand: 0,
|
||||
message: 'No tasks eligible for expansion'
|
||||
};
|
||||
}
|
||||
|
||||
report(`Found ${tasksToExpand.length} tasks to expand`);
|
||||
|
||||
// Check if we have a complexity report to prioritize complex tasks
|
||||
let complexityReport;
|
||||
const reportPath = path.join(
|
||||
path.dirname(tasksPath),
|
||||
'../scripts/task-complexity-report.json'
|
||||
);
|
||||
if (fs.existsSync(reportPath)) {
|
||||
try {
|
||||
complexityReport = readJSON(reportPath);
|
||||
report('Using complexity analysis to prioritize tasks');
|
||||
} catch (error) {
|
||||
report(`Could not read complexity report: ${error.message}`, 'warn');
|
||||
}
|
||||
}
|
||||
|
||||
// Only create loading indicator if not in silent mode and outputFormat is 'text'
|
||||
let loadingIndicator = null;
|
||||
if (!isSilentMode() && outputFormat === 'text') {
|
||||
loadingIndicator = startLoadingIndicator(
|
||||
`Expanding ${tasksToExpand.length} tasks with ${numSubtasks} subtasks each`
|
||||
);
|
||||
}
|
||||
|
||||
let expandedCount = 0;
|
||||
let expansionErrors = 0;
|
||||
try {
|
||||
// Sort tasks by complexity if report exists, otherwise by ID
|
||||
if (complexityReport && complexityReport.complexityAnalysis) {
|
||||
report('Sorting tasks by complexity...');
|
||||
|
||||
// Create a map of task IDs to complexity scores
|
||||
const complexityMap = new Map();
|
||||
complexityReport.complexityAnalysis.forEach((analysis) => {
|
||||
complexityMap.set(analysis.taskId, analysis.complexityScore);
|
||||
});
|
||||
|
||||
// Sort tasks by complexity score (high to low)
|
||||
tasksToExpand.sort((a, b) => {
|
||||
const scoreA = complexityMap.get(a.id) || 0;
|
||||
const scoreB = complexityMap.get(b.id) || 0;
|
||||
return scoreB - scoreA;
|
||||
});
|
||||
}
|
||||
|
||||
// Process each task
|
||||
for (const task of tasksToExpand) {
|
||||
if (loadingIndicator && outputFormat === 'text') {
|
||||
loadingIndicator.text = `Expanding task ${task.id}: ${truncate(task.title, 30)} (${expandedCount + 1}/${tasksToExpand.length})`;
|
||||
}
|
||||
|
||||
// Report progress to MCP if available
|
||||
if (reportProgress) {
|
||||
reportProgress({
|
||||
status: 'processing',
|
||||
current: expandedCount + 1,
|
||||
total: tasksToExpand.length,
|
||||
message: `Expanding task ${task.id}: ${truncate(task.title, 30)}`
|
||||
// Use mcpLog if available, otherwise use the default console log wrapper respecting silent mode
|
||||
const logger =
|
||||
mcpLog ||
|
||||
(outputFormat === 'json'
|
||||
? {
|
||||
// Basic logger for JSON output mode
|
||||
info: (msg) => {},
|
||||
warn: (msg) => {},
|
||||
error: (msg) => console.error(`ERROR: ${msg}`), // Still log errors
|
||||
debug: (msg) => {}
|
||||
}
|
||||
: {
|
||||
// CLI logger respecting silent mode
|
||||
info: (msg) => !isSilentMode() && log('info', msg),
|
||||
warn: (msg) => !isSilentMode() && log('warn', msg),
|
||||
error: (msg) => !isSilentMode() && log('error', msg),
|
||||
debug: (msg) =>
|
||||
!isSilentMode() && getDebugFlag(session) && log('debug', msg)
|
||||
});
|
||||
}
|
||||
|
||||
report(`Expanding task ${task.id}: ${truncate(task.title, 50)}`);
|
||||
let loadingIndicator = null;
|
||||
let expandedCount = 0;
|
||||
let failedCount = 0;
|
||||
// No skipped count needed now as the filter handles it upfront
|
||||
let tasksToExpandCount = 0; // Renamed for clarity
|
||||
|
||||
// Check if task already has subtasks and forceFlag is enabled
|
||||
if (task.subtasks && task.subtasks.length > 0 && forceFlag) {
|
||||
report(
|
||||
`Task ${task.id} already has ${task.subtasks.length} subtasks. Clearing them for regeneration.`
|
||||
if (!isMCPCall && outputFormat === 'text') {
|
||||
loadingIndicator = startLoadingIndicator(
|
||||
'Analyzing tasks for expansion...'
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
logger.info(`Reading tasks from ${tasksPath}`);
|
||||
const data = readJSON(tasksPath);
|
||||
if (!data || !data.tasks) {
|
||||
throw new Error(`Invalid tasks data in ${tasksPath}`);
|
||||
}
|
||||
|
||||
// --- Restore Original Filtering Logic ---
|
||||
const tasksToExpand = data.tasks.filter(
|
||||
(task) =>
|
||||
(task.status === 'pending' || task.status === 'in-progress') && // Include 'in-progress'
|
||||
(!task.subtasks || task.subtasks.length === 0 || force) // Check subtasks/force here
|
||||
);
|
||||
tasksToExpandCount = tasksToExpand.length; // Get the count from the filtered array
|
||||
logger.info(`Found ${tasksToExpandCount} tasks eligible for expansion.`);
|
||||
// --- End Restored Filtering Logic ---
|
||||
|
||||
if (loadingIndicator) {
|
||||
stopLoadingIndicator(loadingIndicator, 'Analysis complete.');
|
||||
}
|
||||
|
||||
if (tasksToExpandCount === 0) {
|
||||
logger.info('No tasks eligible for expansion.');
|
||||
// --- Fix: Restore success: true and add message ---
|
||||
return {
|
||||
success: true, // Indicate overall success despite no action
|
||||
expandedCount: 0,
|
||||
failedCount: 0,
|
||||
skippedCount: 0,
|
||||
tasksToExpand: 0,
|
||||
message: 'No tasks eligible for expansion.'
|
||||
};
|
||||
// --- End Fix ---
|
||||
}
|
||||
|
||||
// Iterate over the already filtered tasks
|
||||
for (const task of tasksToExpand) {
|
||||
// --- Remove Redundant Check ---
|
||||
// The check below is no longer needed as the initial filter handles it
|
||||
/*
|
||||
if (task.subtasks && task.subtasks.length > 0 && !force) {
|
||||
logger.info(
|
||||
`Skipping task ${task.id}: Already has subtasks. Use --force to overwrite.`
|
||||
);
|
||||
task.subtasks = [];
|
||||
skippedCount++;
|
||||
continue;
|
||||
}
|
||||
*/
|
||||
// --- End Removed Redundant Check ---
|
||||
|
||||
// Start indicator for individual task expansion in CLI mode
|
||||
let taskIndicator = null;
|
||||
if (!isMCPCall && outputFormat === 'text') {
|
||||
taskIndicator = startLoadingIndicator(`Expanding task ${task.id}...`);
|
||||
}
|
||||
|
||||
try {
|
||||
// Get complexity analysis for this task if available
|
||||
let taskAnalysis;
|
||||
if (complexityReport && complexityReport.complexityAnalysis) {
|
||||
taskAnalysis = complexityReport.complexityAnalysis.find(
|
||||
(a) => a.taskId === task.id
|
||||
);
|
||||
}
|
||||
|
||||
let thisNumSubtasks = numSubtasks;
|
||||
|
||||
// Use recommended number of subtasks from complexity analysis if available
|
||||
if (taskAnalysis && taskAnalysis.recommendedSubtasks) {
|
||||
report(
|
||||
`Using recommended ${taskAnalysis.recommendedSubtasks} subtasks based on complexity score ${taskAnalysis.complexityScore}/10 for task ${task.id}`
|
||||
);
|
||||
thisNumSubtasks = taskAnalysis.recommendedSubtasks;
|
||||
}
|
||||
|
||||
// Generate prompt for subtask creation based on task details
|
||||
const prompt = generateSubtaskPrompt(
|
||||
task,
|
||||
thisNumSubtasks,
|
||||
additionalContext,
|
||||
taskAnalysis
|
||||
);
|
||||
|
||||
// Use AI to generate subtasks
|
||||
const aiResponse = await getSubtasksFromAI(
|
||||
prompt,
|
||||
// Call the refactored expandTask function
|
||||
await expandTask(
|
||||
tasksPath,
|
||||
task.id,
|
||||
numSubtasks, // Pass numSubtasks, expandTask handles defaults/complexity
|
||||
useResearch,
|
||||
session,
|
||||
mcpLog
|
||||
additionalContext,
|
||||
context, // Pass the whole context object { session, mcpLog }
|
||||
force // Pass the force flag down
|
||||
);
|
||||
|
||||
if (
|
||||
aiResponse &&
|
||||
aiResponse.subtasks &&
|
||||
Array.isArray(aiResponse.subtasks) &&
|
||||
aiResponse.subtasks.length > 0
|
||||
) {
|
||||
// Process and add the subtasks to the task
|
||||
task.subtasks = aiResponse.subtasks.map((subtask, index) => ({
|
||||
id: index + 1,
|
||||
title: subtask.title || `Subtask ${index + 1}`,
|
||||
description: subtask.description || 'No description provided',
|
||||
status: 'pending',
|
||||
dependencies: subtask.dependencies || [],
|
||||
details: subtask.details || ''
|
||||
}));
|
||||
|
||||
report(`Added ${task.subtasks.length} subtasks to task ${task.id}`);
|
||||
expandedCount++;
|
||||
} else if (aiResponse && aiResponse.error) {
|
||||
// Handle error response
|
||||
const errorMsg = `Failed to generate subtasks for task ${task.id}: ${aiResponse.error}`;
|
||||
report(errorMsg, 'error');
|
||||
|
||||
// Add task ID to error info and provide actionable guidance
|
||||
const suggestion = aiResponse.suggestion.replace('<id>', task.id);
|
||||
report(`Suggestion: ${suggestion}`, 'info');
|
||||
|
||||
expansionErrors++;
|
||||
} else {
|
||||
report(`Failed to generate subtasks for task ${task.id}`, 'error');
|
||||
report(
|
||||
`Suggestion: Run 'task-master update-task --id=${task.id} --prompt="Generate subtasks for this task"' to manually create subtasks.`,
|
||||
'info'
|
||||
);
|
||||
expansionErrors++;
|
||||
expandedCount++;
|
||||
if (taskIndicator) {
|
||||
stopLoadingIndicator(taskIndicator, `Task ${task.id} expanded.`);
|
||||
}
|
||||
logger.info(`Successfully expanded task ${task.id}.`);
|
||||
} catch (error) {
|
||||
report(`Error expanding task ${task.id}: ${error.message}`, 'error');
|
||||
expansionErrors++;
|
||||
failedCount++;
|
||||
if (taskIndicator) {
|
||||
stopLoadingIndicator(
|
||||
taskIndicator,
|
||||
`Failed to expand task ${task.id}.`,
|
||||
false
|
||||
);
|
||||
}
|
||||
logger.error(`Failed to expand task ${task.id}: ${error.message}`);
|
||||
// Continue to the next task
|
||||
}
|
||||
|
||||
// Small delay to prevent rate limiting
|
||||
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||
}
|
||||
|
||||
// Save the updated tasks
|
||||
writeJSON(tasksPath, data);
|
||||
// Log final summary (removed skipped count from message)
|
||||
logger.info(
|
||||
`Expansion complete: ${expandedCount} expanded, ${failedCount} failed.`
|
||||
);
|
||||
|
||||
// Generate task files
|
||||
if (outputFormat === 'text') {
|
||||
// Only perform file generation for CLI (text) mode
|
||||
const outputDir = path.dirname(tasksPath);
|
||||
await generateTaskFiles(tasksPath, outputDir);
|
||||
}
|
||||
|
||||
// Return structured result for MCP
|
||||
// Return summary (skippedCount is now 0) - Add success: true here as well for consistency
|
||||
return {
|
||||
success: true,
|
||||
success: true, // Indicate overall success
|
||||
expandedCount,
|
||||
tasksToExpand: tasksToExpand.length,
|
||||
expansionErrors,
|
||||
message: `Successfully expanded ${expandedCount} out of ${tasksToExpand.length} tasks${expansionErrors > 0 ? ` (${expansionErrors} errors)` : ''}`
|
||||
failedCount,
|
||||
skippedCount: 0,
|
||||
tasksToExpand: tasksToExpandCount
|
||||
};
|
||||
} catch (error) {
|
||||
report(`Error expanding tasks: ${error.message}`, 'error');
|
||||
throw error;
|
||||
} finally {
|
||||
// Stop the loading indicator if it was created
|
||||
if (loadingIndicator && outputFormat === 'text') {
|
||||
stopLoadingIndicator(loadingIndicator);
|
||||
}
|
||||
|
||||
// Final progress report
|
||||
if (reportProgress) {
|
||||
reportProgress({
|
||||
status: 'completed',
|
||||
current: expandedCount,
|
||||
total: tasksToExpand.length,
|
||||
message: `Completed expanding ${expandedCount} out of ${tasksToExpand.length} tasks`
|
||||
});
|
||||
}
|
||||
|
||||
// Display completion message for CLI mode
|
||||
if (outputFormat === 'text') {
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.white.bold(`Task Expansion Completed`) +
|
||||
'\n\n' +
|
||||
chalk.white(
|
||||
`Expanded ${expandedCount} out of ${tasksToExpand.length} tasks`
|
||||
) +
|
||||
'\n' +
|
||||
chalk.white(
|
||||
`Each task now has detailed subtasks to guide implementation`
|
||||
),
|
||||
{
|
||||
padding: 1,
|
||||
borderColor: 'green',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1 }
|
||||
}
|
||||
)
|
||||
);
|
||||
|
||||
// Suggest next actions
|
||||
if (expandedCount > 0) {
|
||||
console.log(chalk.bold('\nNext Steps:'));
|
||||
console.log(
|
||||
chalk.cyan(
|
||||
`1. Run ${chalk.yellow('task-master list --with-subtasks')} to see all tasks with their subtasks`
|
||||
)
|
||||
);
|
||||
console.log(
|
||||
chalk.cyan(
|
||||
`2. Run ${chalk.yellow('task-master next')} to find the next task to work on`
|
||||
)
|
||||
);
|
||||
console.log(
|
||||
chalk.cyan(
|
||||
`3. Run ${chalk.yellow('task-master set-status --id=<taskId> --status=in-progress')} to start working on a task`
|
||||
)
|
||||
);
|
||||
}
|
||||
if (loadingIndicator)
|
||||
stopLoadingIndicator(loadingIndicator, 'Error.', false);
|
||||
logger.error(`Error during expand all operation: ${error.message}`);
|
||||
if (!isMCPCall && getDebugFlag(session)) {
|
||||
console.error(error); // Log full stack in debug CLI mode
|
||||
}
|
||||
// Re-throw error for the caller to handle, the direct function will format it
|
||||
throw error; // Let direct function wrapper handle formatting
|
||||
/* Original re-throw:
|
||||
throw new Error(`Failed to expand all tasks: ${error.message}`);
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -312,14 +312,18 @@ function parseSubtasksFromText(
|
||||
|
||||
/**
|
||||
* Expand a task into subtasks using the unified AI service (generateTextService).
|
||||
* Appends new subtasks by default. Replaces existing subtasks if force=true.
|
||||
* Integrates complexity report to determine subtask count and prompt if available,
|
||||
* unless numSubtasks is explicitly provided.
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
* @param {number} taskId - Task ID to expand
|
||||
* @param {number} [numSubtasks] - Optional: Target number of subtasks. Uses config default if not provided.
|
||||
* @param {number | null | undefined} [numSubtasks] - Optional: Explicit target number of subtasks. If null/undefined, check complexity report or config default.
|
||||
* @param {boolean} [useResearch=false] - Whether to use the research AI role.
|
||||
* @param {string} [additionalContext=''] - Optional additional context.
|
||||
* @param {Object} context - Context object containing session and mcpLog.
|
||||
* @param {Object} [context.session] - Session object from MCP.
|
||||
* @param {Object} [context.mcpLog] - MCP logger object.
|
||||
* @param {boolean} [force=false] - If true, replace existing subtasks; otherwise, append.
|
||||
* @returns {Promise<Object>} The updated parent task object with new subtasks.
|
||||
* @throws {Error} If task not found, AI service fails, or parsing fails.
|
||||
*/
|
||||
@@ -329,7 +333,8 @@ async function expandTask(
|
||||
numSubtasks,
|
||||
useResearch = false,
|
||||
additionalContext = '',
|
||||
context = {}
|
||||
context = {},
|
||||
force = false
|
||||
) {
|
||||
const { session, mcpLog } = context;
|
||||
const outputFormat = mcpLog ? 'json' : 'text';
|
||||
@@ -361,56 +366,142 @@ async function expandTask(
|
||||
logger.info(`Expanding task ${taskId}: ${task.title}`);
|
||||
// --- End Task Loading/Filtering ---
|
||||
|
||||
// --- Subtask Count & Complexity Check (Unchanged) ---
|
||||
let subtaskCount = parseInt(numSubtasks, 10);
|
||||
if (isNaN(subtaskCount) || subtaskCount <= 0) {
|
||||
subtaskCount = getDefaultSubtasks(session); // Pass session
|
||||
logger.info(`Using default number of subtasks: ${subtaskCount}`);
|
||||
// --- Handle Force Flag: Clear existing subtasks if force=true ---
|
||||
if (force && Array.isArray(task.subtasks) && task.subtasks.length > 0) {
|
||||
logger.info(
|
||||
`Force flag set. Clearing existing ${task.subtasks.length} subtasks for task ${taskId}.`
|
||||
);
|
||||
task.subtasks = []; // Clear existing subtasks
|
||||
}
|
||||
// ... (complexity report check logic remains) ...
|
||||
// --- End Subtask Count & Complexity Check ---
|
||||
// --- End Force Flag Handling ---
|
||||
|
||||
// --- AI Subtask Generation using generateTextService ---
|
||||
let generatedSubtasks = [];
|
||||
const nextSubtaskId = (task.subtasks?.length || 0) + 1;
|
||||
// --- Complexity Report Integration ---
|
||||
let finalSubtaskCount;
|
||||
let promptContent = '';
|
||||
let complexityReasoningContext = '';
|
||||
let systemPrompt; // Declare systemPrompt here
|
||||
|
||||
let loadingIndicator = null;
|
||||
if (outputFormat === 'text') {
|
||||
loadingIndicator = startLoadingIndicator(
|
||||
`Generating ${subtaskCount} subtasks...`
|
||||
const projectRoot = path.dirname(path.dirname(tasksPath));
|
||||
const complexityReportPath = path.join(
|
||||
projectRoot,
|
||||
'scripts/task-complexity-report.json'
|
||||
);
|
||||
let taskAnalysis = null;
|
||||
|
||||
try {
|
||||
if (fs.existsSync(complexityReportPath)) {
|
||||
const complexityReport = readJSON(complexityReportPath);
|
||||
taskAnalysis = complexityReport?.complexityAnalysis?.find(
|
||||
(a) => a.taskId === task.id
|
||||
);
|
||||
if (taskAnalysis) {
|
||||
logger.info(
|
||||
`Found complexity analysis for task ${task.id}: Score ${taskAnalysis.complexityScore}`
|
||||
);
|
||||
if (taskAnalysis.reasoning) {
|
||||
complexityReasoningContext = `\nComplexity Analysis Reasoning: ${taskAnalysis.reasoning}`;
|
||||
}
|
||||
} else {
|
||||
logger.info(
|
||||
`No complexity analysis found for task ${task.id} in report.`
|
||||
);
|
||||
}
|
||||
} else {
|
||||
logger.info(
|
||||
`Complexity report not found at ${complexityReportPath}. Skipping complexity check.`
|
||||
);
|
||||
}
|
||||
} catch (reportError) {
|
||||
logger.warn(
|
||||
`Could not read or parse complexity report: ${reportError.message}. Proceeding without it.`
|
||||
);
|
||||
}
|
||||
|
||||
let responseText = ''; // To store the raw text response
|
||||
// Determine final subtask count
|
||||
const explicitNumSubtasks = parseInt(numSubtasks, 10);
|
||||
if (!isNaN(explicitNumSubtasks) && explicitNumSubtasks > 0) {
|
||||
finalSubtaskCount = explicitNumSubtasks;
|
||||
logger.info(
|
||||
`Using explicitly provided subtask count: ${finalSubtaskCount}`
|
||||
);
|
||||
} else if (taskAnalysis?.recommendedSubtasks) {
|
||||
finalSubtaskCount = parseInt(taskAnalysis.recommendedSubtasks, 10);
|
||||
logger.info(
|
||||
`Using subtask count from complexity report: ${finalSubtaskCount}`
|
||||
);
|
||||
} else {
|
||||
finalSubtaskCount = getDefaultSubtasks(session);
|
||||
logger.info(`Using default number of subtasks: ${finalSubtaskCount}`);
|
||||
}
|
||||
if (isNaN(finalSubtaskCount) || finalSubtaskCount <= 0) {
|
||||
logger.warn(
|
||||
`Invalid subtask count determined (${finalSubtaskCount}), defaulting to 3.`
|
||||
);
|
||||
finalSubtaskCount = 3;
|
||||
}
|
||||
|
||||
// Determine prompt content AND system prompt
|
||||
const nextSubtaskId = (task.subtasks?.length || 0) + 1;
|
||||
|
||||
if (taskAnalysis?.expansionPrompt) {
|
||||
// Use prompt from complexity report
|
||||
promptContent = taskAnalysis.expansionPrompt;
|
||||
// Append additional context and reasoning
|
||||
promptContent += `\n\n${additionalContext}`.trim();
|
||||
promptContent += `${complexityReasoningContext}`.trim();
|
||||
|
||||
// --- Use Simplified System Prompt for Report Prompts ---
|
||||
systemPrompt = `You are an AI assistant helping with task breakdown. Generate exactly ${finalSubtaskCount} subtasks based on the provided prompt and context. Respond ONLY with a valid JSON object containing a single key "subtasks" whose value is an array of the generated subtask objects. Each subtask object in the array must have keys: "id", "title", "description", "dependencies", "details", "status". Ensure the 'id' starts from ${nextSubtaskId} and is sequential. Ensure 'dependencies' only reference valid prior subtask IDs generated in this response (starting from ${nextSubtaskId}). Ensure 'status' is 'pending'. Do not include any other text or explanation.`;
|
||||
logger.info(
|
||||
`Using expansion prompt from complexity report and simplified system prompt for task ${task.id}.`
|
||||
);
|
||||
// --- End Simplified System Prompt ---
|
||||
} else {
|
||||
// Use standard prompt generation
|
||||
const combinedAdditionalContext =
|
||||
`${additionalContext}${complexityReasoningContext}`.trim();
|
||||
if (useResearch) {
|
||||
promptContent = generateResearchUserPrompt(
|
||||
task,
|
||||
finalSubtaskCount,
|
||||
combinedAdditionalContext,
|
||||
nextSubtaskId
|
||||
);
|
||||
// Use the specific research system prompt if needed, or a standard one
|
||||
systemPrompt = `You are an AI assistant that responds ONLY with valid JSON objects as requested. The object should contain a 'subtasks' array.`; // Or keep generateResearchSystemPrompt if it exists
|
||||
} else {
|
||||
promptContent = generateMainUserPrompt(
|
||||
task,
|
||||
finalSubtaskCount,
|
||||
combinedAdditionalContext,
|
||||
nextSubtaskId
|
||||
);
|
||||
// Use the original detailed system prompt for standard generation
|
||||
systemPrompt = generateMainSystemPrompt(finalSubtaskCount);
|
||||
}
|
||||
logger.info(`Using standard prompt generation for task ${task.id}.`);
|
||||
}
|
||||
// --- End Complexity Report / Prompt Logic ---
|
||||
|
||||
// --- AI Subtask Generation using generateTextService ---
|
||||
let generatedSubtasks = [];
|
||||
let loadingIndicator = null;
|
||||
if (outputFormat === 'text') {
|
||||
loadingIndicator = startLoadingIndicator(
|
||||
`Generating ${finalSubtaskCount} subtasks...`
|
||||
);
|
||||
}
|
||||
|
||||
let responseText = '';
|
||||
|
||||
try {
|
||||
// 1. Determine Role and Generate Prompts
|
||||
const role = useResearch ? 'research' : 'main';
|
||||
logger.info(`Using AI service with role: ${role}`);
|
||||
let prompt;
|
||||
let systemPrompt;
|
||||
if (useResearch) {
|
||||
prompt = generateResearchUserPrompt(
|
||||
task,
|
||||
subtaskCount,
|
||||
additionalContext,
|
||||
nextSubtaskId
|
||||
);
|
||||
systemPrompt = `You are an AI assistant that responds ONLY with valid JSON objects as requested. The object should contain a 'subtasks' array.`;
|
||||
} else {
|
||||
prompt = generateMainUserPrompt(
|
||||
task,
|
||||
subtaskCount,
|
||||
additionalContext,
|
||||
nextSubtaskId
|
||||
);
|
||||
systemPrompt = generateMainSystemPrompt(subtaskCount);
|
||||
}
|
||||
|
||||
// 2. Call generateTextService
|
||||
// Call generateTextService with the determined prompts
|
||||
responseText = await generateTextService({
|
||||
prompt,
|
||||
systemPrompt,
|
||||
prompt: promptContent,
|
||||
systemPrompt: systemPrompt, // Use the determined system prompt
|
||||
role,
|
||||
session
|
||||
});
|
||||
@@ -419,46 +510,45 @@ async function expandTask(
|
||||
'success'
|
||||
);
|
||||
|
||||
// 3. Parse Subtasks from Text Response
|
||||
try {
|
||||
generatedSubtasks = parseSubtasksFromText(
|
||||
responseText,
|
||||
nextSubtaskId,
|
||||
subtaskCount,
|
||||
task.id,
|
||||
logger // Pass the logger
|
||||
);
|
||||
logger.info(
|
||||
`Successfully parsed ${generatedSubtasks.length} subtasks from AI response.`
|
||||
);
|
||||
} catch (parseError) {
|
||||
// Log error and throw
|
||||
logger.error(
|
||||
`Failed to parse subtasks from AI response: ${parseError.message}`
|
||||
);
|
||||
if (getDebugFlag(session)) {
|
||||
// Use getter with session
|
||||
logger.error(`Raw AI Response:\n${responseText}`);
|
||||
}
|
||||
throw new Error(
|
||||
`Failed to parse valid subtasks from AI response: ${parseError.message}`
|
||||
);
|
||||
}
|
||||
// --- End AI Subtask Generation ---
|
||||
// Parse Subtasks
|
||||
generatedSubtasks = parseSubtasksFromText(
|
||||
responseText,
|
||||
nextSubtaskId,
|
||||
finalSubtaskCount,
|
||||
task.id,
|
||||
logger
|
||||
);
|
||||
logger.info(
|
||||
`Successfully parsed ${generatedSubtasks.length} subtasks from AI response.`
|
||||
);
|
||||
} catch (error) {
|
||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
||||
logger.error(
|
||||
`Error generating subtasks via AI service: ${error.message}`,
|
||||
`Error during AI call or parsing for task ${taskId}: ${error.message}`, // Added task ID context
|
||||
'error'
|
||||
);
|
||||
throw error; // Re-throw AI service error
|
||||
// Log raw response in debug mode if parsing failed
|
||||
if (
|
||||
error.message.includes('Failed to parse valid subtasks') &&
|
||||
getDebugFlag(session)
|
||||
) {
|
||||
logger.error(`Raw AI Response that failed parsing:\n${responseText}`);
|
||||
}
|
||||
throw error;
|
||||
} finally {
|
||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
||||
}
|
||||
|
||||
// --- Task Update & File Writing (Unchanged) ---
|
||||
task.subtasks = generatedSubtasks;
|
||||
data.tasks[taskIndex] = task;
|
||||
// --- Task Update & File Writing ---
|
||||
// Ensure task.subtasks is an array before appending
|
||||
if (!Array.isArray(task.subtasks)) {
|
||||
task.subtasks = [];
|
||||
}
|
||||
// Append the newly generated and validated subtasks
|
||||
task.subtasks.push(...generatedSubtasks);
|
||||
// --- End Change: Append instead of replace ---
|
||||
|
||||
data.tasks[taskIndex] = task; // Assign the modified task back
|
||||
logger.info(`Writing updated tasks to ${tasksPath}`);
|
||||
writeJSON(tasksPath, data);
|
||||
logger.info(`Generating individual task files...`);
|
||||
@@ -471,7 +561,6 @@ async function expandTask(
|
||||
// Catches errors from file reading, parsing, AI call etc.
|
||||
logger.error(`Error expanding task ${taskId}: ${error.message}`, 'error');
|
||||
if (outputFormat === 'text' && getDebugFlag(session)) {
|
||||
// Use getter with session
|
||||
console.error(error); // Log full stack in debug CLI mode
|
||||
}
|
||||
throw error; // Re-throw for the caller
|
||||
|
||||
@@ -70,3 +70,65 @@ This implementation should include:
|
||||
6. Performance Testing:
|
||||
- Measure rendering time for large projects
|
||||
- Ensure reasonable performance with 100+ interconnected tasks
|
||||
|
||||
# Subtasks:
|
||||
## 1. CLI Command Setup [pending]
|
||||
### Dependencies: None
|
||||
### Description: Design and implement the command-line interface for the dependency graph tool, including argument parsing and help documentation.
|
||||
### Details:
|
||||
Define commands for input file specification, output options, filtering, and other user-configurable parameters.
|
||||
|
||||
## 2. Graph Layout Algorithms [pending]
|
||||
### Dependencies: 41.1
|
||||
### Description: Develop or integrate algorithms to compute optimal node and edge placement for clear and readable graph layouts in a terminal environment.
|
||||
### Details:
|
||||
Consider topological sorting, hierarchical, and force-directed layouts suitable for ASCII/Unicode rendering.
|
||||
|
||||
## 3. ASCII/Unicode Rendering Engine [pending]
|
||||
### Dependencies: 41.2
|
||||
### Description: Implement rendering logic to display the dependency graph using ASCII and Unicode characters in the terminal.
|
||||
### Details:
|
||||
Support for various node and edge styles, and ensure compatibility with different terminal types.
|
||||
|
||||
## 4. Color Coding Support [pending]
|
||||
### Dependencies: 41.3
|
||||
### Description: Add color coding to nodes and edges to visually distinguish types, statuses, or other attributes in the graph.
|
||||
### Details:
|
||||
Use ANSI escape codes for color; provide options for colorblind-friendly palettes.
|
||||
|
||||
## 5. Circular Dependency Detection [pending]
|
||||
### Dependencies: 41.2
|
||||
### Description: Implement algorithms to detect and highlight circular dependencies within the graph.
|
||||
### Details:
|
||||
Clearly mark cycles in the rendered output and provide warnings or errors as appropriate.
|
||||
|
||||
## 6. Filtering and Search Functionality [pending]
|
||||
### Dependencies: 41.1, 41.2
|
||||
### Description: Enable users to filter nodes and edges by criteria such as name, type, or dependency depth.
|
||||
### Details:
|
||||
Support command-line flags for filtering and interactive search if feasible.
|
||||
|
||||
## 7. Accessibility Features [pending]
|
||||
### Dependencies: 41.3, 41.4
|
||||
### Description: Ensure the tool is accessible, including support for screen readers, high-contrast modes, and keyboard navigation.
|
||||
### Details:
|
||||
Provide alternative text output and ensure color is not the sole means of conveying information.
|
||||
|
||||
## 8. Performance Optimization [pending]
|
||||
### Dependencies: 41.2, 41.3, 41.4, 41.5, 41.6
|
||||
### Description: Profile and optimize the tool for large graphs to ensure responsive rendering and low memory usage.
|
||||
### Details:
|
||||
Implement lazy loading, efficient data structures, and parallel processing where appropriate.
|
||||
|
||||
## 9. Documentation [pending]
|
||||
### Dependencies: 41.1, 41.2, 41.3, 41.4, 41.5, 41.6, 41.7, 41.8
|
||||
### Description: Write comprehensive user and developer documentation covering installation, usage, configuration, and extension.
|
||||
### Details:
|
||||
Include examples, troubleshooting, and contribution guidelines.
|
||||
|
||||
## 10. Testing and Validation [pending]
|
||||
### Dependencies: 41.1, 41.2, 41.3, 41.4, 41.5, 41.6, 41.7, 41.8, 41.9
|
||||
### Description: Develop automated tests for all major features, including CLI parsing, layout correctness, rendering, color coding, filtering, and cycle detection.
|
||||
### Details:
|
||||
Include unit, integration, and regression tests; validate accessibility and performance claims.
|
||||
|
||||
|
||||
@@ -51,3 +51,41 @@ Testing should verify both the functionality and the quality of suggestions:
|
||||
- Test with a parent task that has no description
|
||||
- Test with a parent task that already has many subtasks
|
||||
- Test with a newly created system with minimal task history
|
||||
|
||||
# Subtasks:
|
||||
## 1. Implement parent task validation [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create validation logic to ensure subtasks are being added to valid parent tasks
|
||||
### Details:
|
||||
Develop functions to verify that the parent task exists in the system before allowing subtask creation. Handle error cases gracefully with informative messages. Include validation for task ID format and existence in the database.
|
||||
|
||||
## 2. Build context gathering mechanism [pending]
|
||||
### Dependencies: 53.1
|
||||
### Description: Develop a system to collect relevant context from parent task and existing subtasks
|
||||
### Details:
|
||||
Create functions to extract information from the parent task including title, description, and metadata. Also gather information about any existing subtasks to provide context for AI suggestions. Format this data appropriately for the AI prompt.
|
||||
|
||||
## 3. Develop AI suggestion logic for subtasks [pending]
|
||||
### Dependencies: 53.2
|
||||
### Description: Create the core AI integration to generate relevant subtask suggestions
|
||||
### Details:
|
||||
Implement the AI prompt engineering and response handling for subtask generation. Ensure the AI provides structured output with appropriate fields for subtasks. Include error handling for API failures and malformed responses.
|
||||
|
||||
## 4. Create interactive CLI interface [pending]
|
||||
### Dependencies: 53.3
|
||||
### Description: Build a user-friendly command-line interface for the subtask suggestion feature
|
||||
### Details:
|
||||
Develop CLI commands and options for requesting subtask suggestions. Include interactive elements for selecting, modifying, or rejecting suggested subtasks. Ensure clear user feedback throughout the process.
|
||||
|
||||
## 5. Implement subtask linking functionality [pending]
|
||||
### Dependencies: 53.4
|
||||
### Description: Create system to properly link suggested subtasks to their parent task
|
||||
### Details:
|
||||
Develop the database operations to save accepted subtasks and link them to the parent task. Include functionality for setting dependencies between subtasks. Ensure proper transaction handling to maintain data integrity.
|
||||
|
||||
## 6. Perform comprehensive testing [pending]
|
||||
### Dependencies: 53.5
|
||||
### Description: Test the subtask suggestion feature across various scenarios
|
||||
### Details:
|
||||
Create unit tests for each component. Develop integration tests for the full feature workflow. Test edge cases including invalid inputs, API failures, and unusual task structures. Document test results and fix any identified issues.
|
||||
|
||||
|
||||
@@ -28,3 +28,41 @@ This change will make the package more reliable, follow npm best practices, and
|
||||
7. Test the uninstall process to verify it cleanly removes the package without leaving unwanted modifications
|
||||
8. Verify the package works in different npm environments (npm 6, 7, 8) and with different Node.js versions
|
||||
9. Create an integration test that simulates a real user workflow from installation through usage
|
||||
|
||||
# Subtasks:
|
||||
## 1. Conduct Code Audit for Dependency Management [pending]
|
||||
### Dependencies: None
|
||||
### Description: Review the current codebase to identify all areas where dependencies are manually managed, modified, or referenced outside of npm best practices.
|
||||
### Details:
|
||||
Focus on scripts, configuration files, and any custom logic related to dependency installation or versioning.
|
||||
|
||||
## 2. Remove Manual Dependency Modifications [pending]
|
||||
### Dependencies: 59.1
|
||||
### Description: Eliminate any custom scripts or manual steps that alter dependencies outside of npm's standard workflow.
|
||||
### Details:
|
||||
Refactor or delete code that manually installs, updates, or modifies dependencies, ensuring all dependency management is handled via npm.
|
||||
|
||||
## 3. Update npm Dependencies [pending]
|
||||
### Dependencies: 59.2
|
||||
### Description: Update all project dependencies using npm, ensuring versions are current and compatible, and resolve any conflicts.
|
||||
### Details:
|
||||
Run npm update, audit for vulnerabilities, and adjust package.json and package-lock.json as needed.
|
||||
|
||||
## 4. Update Initialization and Installation Commands [pending]
|
||||
### Dependencies: 59.3
|
||||
### Description: Revise project setup scripts and documentation to reflect the new npm-based dependency management approach.
|
||||
### Details:
|
||||
Ensure that all initialization commands (e.g., npm install) are up-to-date and remove references to deprecated manual steps.
|
||||
|
||||
## 5. Update Documentation [pending]
|
||||
### Dependencies: 59.4
|
||||
### Description: Revise project documentation to describe the new dependency management process and provide clear setup instructions.
|
||||
### Details:
|
||||
Update README, onboarding guides, and any developer documentation to align with npm best practices.
|
||||
|
||||
## 6. Perform Regression Testing [pending]
|
||||
### Dependencies: 59.5
|
||||
### Description: Run comprehensive tests to ensure that the refactor has not introduced any regressions or broken existing functionality.
|
||||
### Details:
|
||||
Execute automated and manual tests, focusing on areas affected by dependency management changes.
|
||||
|
||||
|
||||
@@ -1918,7 +1918,7 @@ These enhancements will ensure the refactored code is modular, maintainable, and
|
||||
- Document the expected shape of the session object and any required fields for downstream service calls, so future maintainers know what context must be provided.
|
||||
</info added on 2025-04-24T17:46:51.286Z>
|
||||
|
||||
## 38. Refactor expand-all-tasks.js for Unified AI Helpers & Config [pending]
|
||||
## 38. Refactor expand-all-tasks.js for Unified AI Helpers & Config [done]
|
||||
### Dependencies: None
|
||||
### Description: Ensure this file correctly calls the refactored `getSubtasksFromAI` helper. Update config usage to only use `getDefaultSubtasks` from `config-manager.js` directly. AI interaction itself is handled by the helper.
|
||||
### Details:
|
||||
|
||||
249
tasks/tasks.json
249
tasks/tasks.json
@@ -2386,7 +2386,128 @@
|
||||
"dependencies": [],
|
||||
"priority": "medium",
|
||||
"details": "This implementation should include:\n\n1. Create a new command `graph` or `visualize` that displays the dependency graph.\n\n2. Design an ASCII/Unicode-based graph rendering system that:\n - Represents each task as a node with its ID and abbreviated title\n - Shows dependencies as directional lines between nodes (→, ↑, ↓, etc.)\n - Uses color coding for different task statuses (e.g., green for completed, yellow for in-progress, red for blocked)\n - Handles complex dependency chains with proper spacing and alignment\n\n3. Implement layout algorithms to:\n - Minimize crossing lines for better readability\n - Properly space nodes to avoid overlapping\n - Support both vertical and horizontal graph orientations (as a configurable option)\n\n4. Add detection and highlighting of circular dependencies with a distinct color/pattern\n\n5. Include a legend explaining the color coding and symbols used\n\n6. Ensure the graph is responsive to terminal width, with options to:\n - Automatically scale to fit the current terminal size\n - Allow zooming in/out of specific sections for large graphs\n - Support pagination or scrolling for very large dependency networks\n\n7. Add options to filter the graph by:\n - Specific task IDs or ranges\n - Task status\n - Dependency depth (e.g., show only direct dependencies or N levels deep)\n\n8. Ensure accessibility by using distinct patterns in addition to colors for users with color vision deficiencies\n\n9. Optimize performance for projects with many tasks and complex dependency relationships",
|
||||
"testStrategy": "1. Unit Tests:\n - Test the graph generation algorithm with various dependency structures\n - Verify correct node placement and connection rendering\n - Test circular dependency detection\n - Verify color coding matches task statuses\n\n2. Integration Tests:\n - Test the command with projects of varying sizes (small, medium, large)\n - Verify correct handling of different terminal sizes\n - Test all filtering options\n\n3. Visual Verification:\n - Create test cases with predefined dependency structures and verify the visual output matches expected patterns\n - Test with terminals of different sizes, including very narrow terminals\n - Verify readability of complex graphs\n\n4. Edge Cases:\n - Test with no dependencies (single nodes only)\n - Test with circular dependencies\n - Test with very deep dependency chains\n - Test with wide dependency networks (many parallel tasks)\n - Test with the maximum supported number of tasks\n\n5. Usability Testing:\n - Have team members use the feature and provide feedback on readability and usefulness\n - Test in different terminal emulators to ensure compatibility\n - Verify the feature works in terminals with limited color support\n\n6. Performance Testing:\n - Measure rendering time for large projects\n - Ensure reasonable performance with 100+ interconnected tasks"
|
||||
"testStrategy": "1. Unit Tests:\n - Test the graph generation algorithm with various dependency structures\n - Verify correct node placement and connection rendering\n - Test circular dependency detection\n - Verify color coding matches task statuses\n\n2. Integration Tests:\n - Test the command with projects of varying sizes (small, medium, large)\n - Verify correct handling of different terminal sizes\n - Test all filtering options\n\n3. Visual Verification:\n - Create test cases with predefined dependency structures and verify the visual output matches expected patterns\n - Test with terminals of different sizes, including very narrow terminals\n - Verify readability of complex graphs\n\n4. Edge Cases:\n - Test with no dependencies (single nodes only)\n - Test with circular dependencies\n - Test with very deep dependency chains\n - Test with wide dependency networks (many parallel tasks)\n - Test with the maximum supported number of tasks\n\n5. Usability Testing:\n - Have team members use the feature and provide feedback on readability and usefulness\n - Test in different terminal emulators to ensure compatibility\n - Verify the feature works in terminals with limited color support\n\n6. Performance Testing:\n - Measure rendering time for large projects\n - Ensure reasonable performance with 100+ interconnected tasks",
|
||||
"subtasks": [
|
||||
{
|
||||
"id": 1,
|
||||
"title": "CLI Command Setup",
|
||||
"description": "Design and implement the command-line interface for the dependency graph tool, including argument parsing and help documentation.",
|
||||
"dependencies": [],
|
||||
"details": "Define commands for input file specification, output options, filtering, and other user-configurable parameters.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"title": "Graph Layout Algorithms",
|
||||
"description": "Develop or integrate algorithms to compute optimal node and edge placement for clear and readable graph layouts in a terminal environment.",
|
||||
"dependencies": [
|
||||
1
|
||||
],
|
||||
"details": "Consider topological sorting, hierarchical, and force-directed layouts suitable for ASCII/Unicode rendering.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"title": "ASCII/Unicode Rendering Engine",
|
||||
"description": "Implement rendering logic to display the dependency graph using ASCII and Unicode characters in the terminal.",
|
||||
"dependencies": [
|
||||
2
|
||||
],
|
||||
"details": "Support for various node and edge styles, and ensure compatibility with different terminal types.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"title": "Color Coding Support",
|
||||
"description": "Add color coding to nodes and edges to visually distinguish types, statuses, or other attributes in the graph.",
|
||||
"dependencies": [
|
||||
3
|
||||
],
|
||||
"details": "Use ANSI escape codes for color; provide options for colorblind-friendly palettes.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"title": "Circular Dependency Detection",
|
||||
"description": "Implement algorithms to detect and highlight circular dependencies within the graph.",
|
||||
"dependencies": [
|
||||
2
|
||||
],
|
||||
"details": "Clearly mark cycles in the rendered output and provide warnings or errors as appropriate.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"title": "Filtering and Search Functionality",
|
||||
"description": "Enable users to filter nodes and edges by criteria such as name, type, or dependency depth.",
|
||||
"dependencies": [
|
||||
1,
|
||||
2
|
||||
],
|
||||
"details": "Support command-line flags for filtering and interactive search if feasible.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 7,
|
||||
"title": "Accessibility Features",
|
||||
"description": "Ensure the tool is accessible, including support for screen readers, high-contrast modes, and keyboard navigation.",
|
||||
"dependencies": [
|
||||
3,
|
||||
4
|
||||
],
|
||||
"details": "Provide alternative text output and ensure color is not the sole means of conveying information.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"title": "Performance Optimization",
|
||||
"description": "Profile and optimize the tool for large graphs to ensure responsive rendering and low memory usage.",
|
||||
"dependencies": [
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6
|
||||
],
|
||||
"details": "Implement lazy loading, efficient data structures, and parallel processing where appropriate.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 9,
|
||||
"title": "Documentation",
|
||||
"description": "Write comprehensive user and developer documentation covering installation, usage, configuration, and extension.",
|
||||
"dependencies": [
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8
|
||||
],
|
||||
"details": "Include examples, troubleshooting, and contribution guidelines.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 10,
|
||||
"title": "Testing and Validation",
|
||||
"description": "Develop automated tests for all major features, including CLI parsing, layout correctness, rendering, color coding, filtering, and cycle detection.",
|
||||
"dependencies": [
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8,
|
||||
9
|
||||
],
|
||||
"details": "Include unit, integration, and regression tests; validate accessibility and performance claims.",
|
||||
"status": "pending"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 42,
|
||||
@@ -2665,7 +2786,67 @@
|
||||
"dependencies": [],
|
||||
"priority": "medium",
|
||||
"details": "Develop a new command `suggest-subtask <task-id>` that generates intelligent subtask suggestions for a specified parent task. The implementation should:\n\n1. Accept a parent task ID as input and validate it exists\n2. Gather a snapshot of all existing tasks in the system (titles only, with their statuses and dependencies)\n3. Retrieve the full details of the specified parent task\n4. Use this context to generate a relevant subtask suggestion that would logically help complete the parent task\n5. Present the suggestion to the user in the CLI with options to:\n - Accept (a): Add the subtask to the system under the parent task\n - Decline (d): Reject the suggestion without adding anything\n - Regenerate (r): Generate a new alternative subtask suggestion\n - Edit (e): Accept but allow editing the title/description before adding\n\nThe suggestion algorithm should consider:\n- The parent task's description and requirements\n- Current progress (% complete) of the parent task\n- Existing subtasks already created for this parent\n- Similar patterns from other tasks in the system\n- Logical next steps based on software development best practices\n\nWhen a subtask is accepted, it should be properly linked to the parent task and assigned appropriate default values for priority and status.",
|
||||
"testStrategy": "Testing should verify both the functionality and the quality of suggestions:\n\n1. Unit tests:\n - Test command parsing and validation of task IDs\n - Test snapshot creation of existing tasks\n - Test the suggestion generation with mocked data\n - Test the user interaction flow with simulated inputs\n\n2. Integration tests:\n - Create a test parent task and verify subtask suggestions are contextually relevant\n - Test the accept/decline/regenerate workflow end-to-end\n - Verify proper linking of accepted subtasks to parent tasks\n - Test with various types of parent tasks (frontend, backend, documentation, etc.)\n\n3. Quality assessment:\n - Create a benchmark set of 10 diverse parent tasks\n - Generate 3 subtask suggestions for each and have team members rate relevance on 1-5 scale\n - Ensure average relevance score exceeds 3.5/5\n - Verify suggestions don't duplicate existing subtasks\n\n4. Edge cases:\n - Test with a parent task that has no description\n - Test with a parent task that already has many subtasks\n - Test with a newly created system with minimal task history"
|
||||
"testStrategy": "Testing should verify both the functionality and the quality of suggestions:\n\n1. Unit tests:\n - Test command parsing and validation of task IDs\n - Test snapshot creation of existing tasks\n - Test the suggestion generation with mocked data\n - Test the user interaction flow with simulated inputs\n\n2. Integration tests:\n - Create a test parent task and verify subtask suggestions are contextually relevant\n - Test the accept/decline/regenerate workflow end-to-end\n - Verify proper linking of accepted subtasks to parent tasks\n - Test with various types of parent tasks (frontend, backend, documentation, etc.)\n\n3. Quality assessment:\n - Create a benchmark set of 10 diverse parent tasks\n - Generate 3 subtask suggestions for each and have team members rate relevance on 1-5 scale\n - Ensure average relevance score exceeds 3.5/5\n - Verify suggestions don't duplicate existing subtasks\n\n4. Edge cases:\n - Test with a parent task that has no description\n - Test with a parent task that already has many subtasks\n - Test with a newly created system with minimal task history",
|
||||
"subtasks": [
|
||||
{
|
||||
"id": 1,
|
||||
"title": "Implement parent task validation",
|
||||
"description": "Create validation logic to ensure subtasks are being added to valid parent tasks",
|
||||
"dependencies": [],
|
||||
"details": "Develop functions to verify that the parent task exists in the system before allowing subtask creation. Handle error cases gracefully with informative messages. Include validation for task ID format and existence in the database.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"title": "Build context gathering mechanism",
|
||||
"description": "Develop a system to collect relevant context from parent task and existing subtasks",
|
||||
"dependencies": [
|
||||
1
|
||||
],
|
||||
"details": "Create functions to extract information from the parent task including title, description, and metadata. Also gather information about any existing subtasks to provide context for AI suggestions. Format this data appropriately for the AI prompt.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"title": "Develop AI suggestion logic for subtasks",
|
||||
"description": "Create the core AI integration to generate relevant subtask suggestions",
|
||||
"dependencies": [
|
||||
2
|
||||
],
|
||||
"details": "Implement the AI prompt engineering and response handling for subtask generation. Ensure the AI provides structured output with appropriate fields for subtasks. Include error handling for API failures and malformed responses.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"title": "Create interactive CLI interface",
|
||||
"description": "Build a user-friendly command-line interface for the subtask suggestion feature",
|
||||
"dependencies": [
|
||||
3
|
||||
],
|
||||
"details": "Develop CLI commands and options for requesting subtask suggestions. Include interactive elements for selecting, modifying, or rejecting suggested subtasks. Ensure clear user feedback throughout the process.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"title": "Implement subtask linking functionality",
|
||||
"description": "Create system to properly link suggested subtasks to their parent task",
|
||||
"dependencies": [
|
||||
4
|
||||
],
|
||||
"details": "Develop the database operations to save accepted subtasks and link them to the parent task. Include functionality for setting dependencies between subtasks. Ensure proper transaction handling to maintain data integrity.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"title": "Perform comprehensive testing",
|
||||
"description": "Test the subtask suggestion feature across various scenarios",
|
||||
"dependencies": [
|
||||
5
|
||||
],
|
||||
"details": "Create unit tests for each component. Develop integration tests for the full feature workflow. Test edge cases including invalid inputs, API failures, and unusual task structures. Document test results and fix any identified issues.",
|
||||
"status": "pending"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 54,
|
||||
@@ -2725,7 +2906,67 @@
|
||||
"dependencies": [],
|
||||
"priority": "medium",
|
||||
"details": "Currently, the application is attempting to manually modify users' package.json files, which is not the recommended approach for npm packages. Instead:\n\n1. Review all code that directly manipulates package.json files in users' projects\n2. Remove these manual modifications\n3. Properly define all dependencies in the package.json of task-master-ai itself\n4. Ensure all peer dependencies are correctly specified\n5. For any scripts that need to be available to users, use proper npm bin linking or npx commands\n6. Update the installation process to leverage npm's built-in dependency management\n7. If configuration is needed in users' projects, implement a proper initialization command that creates config files rather than modifying package.json\n8. Document the new approach in the README and any other relevant documentation\n\nThis change will make the package more reliable, follow npm best practices, and prevent potential conflicts or errors when modifying users' project files.",
|
||||
"testStrategy": "1. Create a fresh test project directory\n2. Install the updated task-master-ai package using npm install task-master-ai\n3. Verify that no code attempts to modify the test project's package.json\n4. Confirm all dependencies are properly installed in node_modules\n5. Test all commands to ensure they work without the previous manual package.json modifications\n6. Try installing in projects with various existing configurations to ensure no conflicts occur\n7. Test the uninstall process to verify it cleanly removes the package without leaving unwanted modifications\n8. Verify the package works in different npm environments (npm 6, 7, 8) and with different Node.js versions\n9. Create an integration test that simulates a real user workflow from installation through usage"
|
||||
"testStrategy": "1. Create a fresh test project directory\n2. Install the updated task-master-ai package using npm install task-master-ai\n3. Verify that no code attempts to modify the test project's package.json\n4. Confirm all dependencies are properly installed in node_modules\n5. Test all commands to ensure they work without the previous manual package.json modifications\n6. Try installing in projects with various existing configurations to ensure no conflicts occur\n7. Test the uninstall process to verify it cleanly removes the package without leaving unwanted modifications\n8. Verify the package works in different npm environments (npm 6, 7, 8) and with different Node.js versions\n9. Create an integration test that simulates a real user workflow from installation through usage",
|
||||
"subtasks": [
|
||||
{
|
||||
"id": 1,
|
||||
"title": "Conduct Code Audit for Dependency Management",
|
||||
"description": "Review the current codebase to identify all areas where dependencies are manually managed, modified, or referenced outside of npm best practices.",
|
||||
"dependencies": [],
|
||||
"details": "Focus on scripts, configuration files, and any custom logic related to dependency installation or versioning.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"title": "Remove Manual Dependency Modifications",
|
||||
"description": "Eliminate any custom scripts or manual steps that alter dependencies outside of npm's standard workflow.",
|
||||
"dependencies": [
|
||||
1
|
||||
],
|
||||
"details": "Refactor or delete code that manually installs, updates, or modifies dependencies, ensuring all dependency management is handled via npm.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"title": "Update npm Dependencies",
|
||||
"description": "Update all project dependencies using npm, ensuring versions are current and compatible, and resolve any conflicts.",
|
||||
"dependencies": [
|
||||
2
|
||||
],
|
||||
"details": "Run npm update, audit for vulnerabilities, and adjust package.json and package-lock.json as needed.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"title": "Update Initialization and Installation Commands",
|
||||
"description": "Revise project setup scripts and documentation to reflect the new npm-based dependency management approach.",
|
||||
"dependencies": [
|
||||
3
|
||||
],
|
||||
"details": "Ensure that all initialization commands (e.g., npm install) are up-to-date and remove references to deprecated manual steps.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"title": "Update Documentation",
|
||||
"description": "Revise project documentation to describe the new dependency management process and provide clear setup instructions.",
|
||||
"dependencies": [
|
||||
4
|
||||
],
|
||||
"details": "Update README, onboarding guides, and any developer documentation to align with npm best practices.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"title": "Perform Regression Testing",
|
||||
"description": "Run comprehensive tests to ensure that the refactor has not introduced any regressions or broken existing functionality.",
|
||||
"dependencies": [
|
||||
5
|
||||
],
|
||||
"details": "Execute automated and manual tests, focusing on areas affected by dependency management changes.",
|
||||
"status": "pending"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 60,
|
||||
@@ -3144,7 +3385,7 @@
|
||||
"title": "Refactor expand-all-tasks.js for Unified AI Helpers & Config",
|
||||
"description": "Ensure this file correctly calls the refactored `getSubtasksFromAI` helper. Update config usage to only use `getDefaultSubtasks` from `config-manager.js` directly. AI interaction itself is handled by the helper.",
|
||||
"details": "\n\n<info added on 2025-04-24T17:48:09.354Z>\n## Additional Implementation Notes for Refactoring expand-all-tasks.js\n\n- Replace any direct imports of AI clients (e.g., OpenAI, Anthropic) and configuration getters with a single import of `expandTask` from `expand-task.js`, which now encapsulates all AI and config logic.\n- Ensure that the orchestration logic in `expand-all-tasks.js`:\n - Iterates over all pending tasks, checking for existing subtasks before invoking expansion.\n - For each task, calls `expandTask` and passes both the `useResearch` flag and the current `session` object as received from upstream callers.\n - Does not contain any logic for AI prompt construction, API calls, or config file reading—these are now delegated to the unified helpers.\n- Maintain progress reporting by emitting status updates (e.g., via events or logging) before and after each task expansion, and ensure that errors from `expandTask` are caught and reported with sufficient context (task ID, error message).\n- Example code snippet for calling the refactored helper:\n\n```js\n// Pseudocode for orchestration loop\nfor (const task of pendingTasks) {\n try {\n reportProgress(`Expanding task ${task.id}...`);\n await expandTask({\n task,\n useResearch,\n session,\n });\n reportProgress(`Task ${task.id} expanded.`);\n } catch (err) {\n reportError(`Failed to expand task ${task.id}: ${err.message}`);\n }\n}\n```\n\n- Remove any fallback or legacy code paths that previously handled AI or config logic directly within this file.\n- Ensure that all configuration defaults are accessed exclusively via `getDefaultSubtasks` from `config-manager.js` and only within the unified helper, not in `expand-all-tasks.js`.\n- Add or update JSDoc comments to clarify that this module is now a pure orchestrator and does not perform AI or config operations directly.\n</info added on 2025-04-24T17:48:09.354Z>",
|
||||
"status": "pending",
|
||||
"status": "done",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 61
|
||||
},
|
||||
|
||||
@@ -2386,7 +2386,128 @@
|
||||
"dependencies": [],
|
||||
"priority": "medium",
|
||||
"details": "This implementation should include:\n\n1. Create a new command `graph` or `visualize` that displays the dependency graph.\n\n2. Design an ASCII/Unicode-based graph rendering system that:\n - Represents each task as a node with its ID and abbreviated title\n - Shows dependencies as directional lines between nodes (→, ↑, ↓, etc.)\n - Uses color coding for different task statuses (e.g., green for completed, yellow for in-progress, red for blocked)\n - Handles complex dependency chains with proper spacing and alignment\n\n3. Implement layout algorithms to:\n - Minimize crossing lines for better readability\n - Properly space nodes to avoid overlapping\n - Support both vertical and horizontal graph orientations (as a configurable option)\n\n4. Add detection and highlighting of circular dependencies with a distinct color/pattern\n\n5. Include a legend explaining the color coding and symbols used\n\n6. Ensure the graph is responsive to terminal width, with options to:\n - Automatically scale to fit the current terminal size\n - Allow zooming in/out of specific sections for large graphs\n - Support pagination or scrolling for very large dependency networks\n\n7. Add options to filter the graph by:\n - Specific task IDs or ranges\n - Task status\n - Dependency depth (e.g., show only direct dependencies or N levels deep)\n\n8. Ensure accessibility by using distinct patterns in addition to colors for users with color vision deficiencies\n\n9. Optimize performance for projects with many tasks and complex dependency relationships",
|
||||
"testStrategy": "1. Unit Tests:\n - Test the graph generation algorithm with various dependency structures\n - Verify correct node placement and connection rendering\n - Test circular dependency detection\n - Verify color coding matches task statuses\n\n2. Integration Tests:\n - Test the command with projects of varying sizes (small, medium, large)\n - Verify correct handling of different terminal sizes\n - Test all filtering options\n\n3. Visual Verification:\n - Create test cases with predefined dependency structures and verify the visual output matches expected patterns\n - Test with terminals of different sizes, including very narrow terminals\n - Verify readability of complex graphs\n\n4. Edge Cases:\n - Test with no dependencies (single nodes only)\n - Test with circular dependencies\n - Test with very deep dependency chains\n - Test with wide dependency networks (many parallel tasks)\n - Test with the maximum supported number of tasks\n\n5. Usability Testing:\n - Have team members use the feature and provide feedback on readability and usefulness\n - Test in different terminal emulators to ensure compatibility\n - Verify the feature works in terminals with limited color support\n\n6. Performance Testing:\n - Measure rendering time for large projects\n - Ensure reasonable performance with 100+ interconnected tasks"
|
||||
"testStrategy": "1. Unit Tests:\n - Test the graph generation algorithm with various dependency structures\n - Verify correct node placement and connection rendering\n - Test circular dependency detection\n - Verify color coding matches task statuses\n\n2. Integration Tests:\n - Test the command with projects of varying sizes (small, medium, large)\n - Verify correct handling of different terminal sizes\n - Test all filtering options\n\n3. Visual Verification:\n - Create test cases with predefined dependency structures and verify the visual output matches expected patterns\n - Test with terminals of different sizes, including very narrow terminals\n - Verify readability of complex graphs\n\n4. Edge Cases:\n - Test with no dependencies (single nodes only)\n - Test with circular dependencies\n - Test with very deep dependency chains\n - Test with wide dependency networks (many parallel tasks)\n - Test with the maximum supported number of tasks\n\n5. Usability Testing:\n - Have team members use the feature and provide feedback on readability and usefulness\n - Test in different terminal emulators to ensure compatibility\n - Verify the feature works in terminals with limited color support\n\n6. Performance Testing:\n - Measure rendering time for large projects\n - Ensure reasonable performance with 100+ interconnected tasks",
|
||||
"subtasks": [
|
||||
{
|
||||
"id": 1,
|
||||
"title": "CLI Command Setup",
|
||||
"description": "Design and implement the command-line interface for the dependency graph tool, including argument parsing and help documentation.",
|
||||
"dependencies": [],
|
||||
"details": "Define commands for input file specification, output options, filtering, and other user-configurable parameters.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"title": "Graph Layout Algorithms",
|
||||
"description": "Develop or integrate algorithms to compute optimal node and edge placement for clear and readable graph layouts in a terminal environment.",
|
||||
"dependencies": [
|
||||
1
|
||||
],
|
||||
"details": "Consider topological sorting, hierarchical, and force-directed layouts suitable for ASCII/Unicode rendering.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"title": "ASCII/Unicode Rendering Engine",
|
||||
"description": "Implement rendering logic to display the dependency graph using ASCII and Unicode characters in the terminal.",
|
||||
"dependencies": [
|
||||
2
|
||||
],
|
||||
"details": "Support for various node and edge styles, and ensure compatibility with different terminal types.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"title": "Color Coding Support",
|
||||
"description": "Add color coding to nodes and edges to visually distinguish types, statuses, or other attributes in the graph.",
|
||||
"dependencies": [
|
||||
3
|
||||
],
|
||||
"details": "Use ANSI escape codes for color; provide options for colorblind-friendly palettes.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"title": "Circular Dependency Detection",
|
||||
"description": "Implement algorithms to detect and highlight circular dependencies within the graph.",
|
||||
"dependencies": [
|
||||
2
|
||||
],
|
||||
"details": "Clearly mark cycles in the rendered output and provide warnings or errors as appropriate.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"title": "Filtering and Search Functionality",
|
||||
"description": "Enable users to filter nodes and edges by criteria such as name, type, or dependency depth.",
|
||||
"dependencies": [
|
||||
1,
|
||||
2
|
||||
],
|
||||
"details": "Support command-line flags for filtering and interactive search if feasible.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 7,
|
||||
"title": "Accessibility Features",
|
||||
"description": "Ensure the tool is accessible, including support for screen readers, high-contrast modes, and keyboard navigation.",
|
||||
"dependencies": [
|
||||
3,
|
||||
4
|
||||
],
|
||||
"details": "Provide alternative text output and ensure color is not the sole means of conveying information.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"title": "Performance Optimization",
|
||||
"description": "Profile and optimize the tool for large graphs to ensure responsive rendering and low memory usage.",
|
||||
"dependencies": [
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6
|
||||
],
|
||||
"details": "Implement lazy loading, efficient data structures, and parallel processing where appropriate.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 9,
|
||||
"title": "Documentation",
|
||||
"description": "Write comprehensive user and developer documentation covering installation, usage, configuration, and extension.",
|
||||
"dependencies": [
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8
|
||||
],
|
||||
"details": "Include examples, troubleshooting, and contribution guidelines.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 10,
|
||||
"title": "Testing and Validation",
|
||||
"description": "Develop automated tests for all major features, including CLI parsing, layout correctness, rendering, color coding, filtering, and cycle detection.",
|
||||
"dependencies": [
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8,
|
||||
9
|
||||
],
|
||||
"details": "Include unit, integration, and regression tests; validate accessibility and performance claims.",
|
||||
"status": "pending"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 42,
|
||||
@@ -2665,7 +2786,67 @@
|
||||
"dependencies": [],
|
||||
"priority": "medium",
|
||||
"details": "Develop a new command `suggest-subtask <task-id>` that generates intelligent subtask suggestions for a specified parent task. The implementation should:\n\n1. Accept a parent task ID as input and validate it exists\n2. Gather a snapshot of all existing tasks in the system (titles only, with their statuses and dependencies)\n3. Retrieve the full details of the specified parent task\n4. Use this context to generate a relevant subtask suggestion that would logically help complete the parent task\n5. Present the suggestion to the user in the CLI with options to:\n - Accept (a): Add the subtask to the system under the parent task\n - Decline (d): Reject the suggestion without adding anything\n - Regenerate (r): Generate a new alternative subtask suggestion\n - Edit (e): Accept but allow editing the title/description before adding\n\nThe suggestion algorithm should consider:\n- The parent task's description and requirements\n- Current progress (% complete) of the parent task\n- Existing subtasks already created for this parent\n- Similar patterns from other tasks in the system\n- Logical next steps based on software development best practices\n\nWhen a subtask is accepted, it should be properly linked to the parent task and assigned appropriate default values for priority and status.",
|
||||
"testStrategy": "Testing should verify both the functionality and the quality of suggestions:\n\n1. Unit tests:\n - Test command parsing and validation of task IDs\n - Test snapshot creation of existing tasks\n - Test the suggestion generation with mocked data\n - Test the user interaction flow with simulated inputs\n\n2. Integration tests:\n - Create a test parent task and verify subtask suggestions are contextually relevant\n - Test the accept/decline/regenerate workflow end-to-end\n - Verify proper linking of accepted subtasks to parent tasks\n - Test with various types of parent tasks (frontend, backend, documentation, etc.)\n\n3. Quality assessment:\n - Create a benchmark set of 10 diverse parent tasks\n - Generate 3 subtask suggestions for each and have team members rate relevance on 1-5 scale\n - Ensure average relevance score exceeds 3.5/5\n - Verify suggestions don't duplicate existing subtasks\n\n4. Edge cases:\n - Test with a parent task that has no description\n - Test with a parent task that already has many subtasks\n - Test with a newly created system with minimal task history"
|
||||
"testStrategy": "Testing should verify both the functionality and the quality of suggestions:\n\n1. Unit tests:\n - Test command parsing and validation of task IDs\n - Test snapshot creation of existing tasks\n - Test the suggestion generation with mocked data\n - Test the user interaction flow with simulated inputs\n\n2. Integration tests:\n - Create a test parent task and verify subtask suggestions are contextually relevant\n - Test the accept/decline/regenerate workflow end-to-end\n - Verify proper linking of accepted subtasks to parent tasks\n - Test with various types of parent tasks (frontend, backend, documentation, etc.)\n\n3. Quality assessment:\n - Create a benchmark set of 10 diverse parent tasks\n - Generate 3 subtask suggestions for each and have team members rate relevance on 1-5 scale\n - Ensure average relevance score exceeds 3.5/5\n - Verify suggestions don't duplicate existing subtasks\n\n4. Edge cases:\n - Test with a parent task that has no description\n - Test with a parent task that already has many subtasks\n - Test with a newly created system with minimal task history",
|
||||
"subtasks": [
|
||||
{
|
||||
"id": 1,
|
||||
"title": "Implement parent task validation",
|
||||
"description": "Create validation logic to ensure subtasks are being added to valid parent tasks",
|
||||
"dependencies": [],
|
||||
"details": "Develop functions to verify that the parent task exists in the system before allowing subtask creation. Handle error cases gracefully with informative messages. Include validation for task ID format and existence in the database.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"title": "Build context gathering mechanism",
|
||||
"description": "Develop a system to collect relevant context from parent task and existing subtasks",
|
||||
"dependencies": [
|
||||
1
|
||||
],
|
||||
"details": "Create functions to extract information from the parent task including title, description, and metadata. Also gather information about any existing subtasks to provide context for AI suggestions. Format this data appropriately for the AI prompt.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"title": "Develop AI suggestion logic for subtasks",
|
||||
"description": "Create the core AI integration to generate relevant subtask suggestions",
|
||||
"dependencies": [
|
||||
2
|
||||
],
|
||||
"details": "Implement the AI prompt engineering and response handling for subtask generation. Ensure the AI provides structured output with appropriate fields for subtasks. Include error handling for API failures and malformed responses.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"title": "Create interactive CLI interface",
|
||||
"description": "Build a user-friendly command-line interface for the subtask suggestion feature",
|
||||
"dependencies": [
|
||||
3
|
||||
],
|
||||
"details": "Develop CLI commands and options for requesting subtask suggestions. Include interactive elements for selecting, modifying, or rejecting suggested subtasks. Ensure clear user feedback throughout the process.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"title": "Implement subtask linking functionality",
|
||||
"description": "Create system to properly link suggested subtasks to their parent task",
|
||||
"dependencies": [
|
||||
4
|
||||
],
|
||||
"details": "Develop the database operations to save accepted subtasks and link them to the parent task. Include functionality for setting dependencies between subtasks. Ensure proper transaction handling to maintain data integrity.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"title": "Perform comprehensive testing",
|
||||
"description": "Test the subtask suggestion feature across various scenarios",
|
||||
"dependencies": [
|
||||
5
|
||||
],
|
||||
"details": "Create unit tests for each component. Develop integration tests for the full feature workflow. Test edge cases including invalid inputs, API failures, and unusual task structures. Document test results and fix any identified issues.",
|
||||
"status": "pending"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 54,
|
||||
@@ -2725,7 +2906,8 @@
|
||||
"dependencies": [],
|
||||
"priority": "medium",
|
||||
"details": "Currently, the application is attempting to manually modify users' package.json files, which is not the recommended approach for npm packages. Instead:\n\n1. Review all code that directly manipulates package.json files in users' projects\n2. Remove these manual modifications\n3. Properly define all dependencies in the package.json of task-master-ai itself\n4. Ensure all peer dependencies are correctly specified\n5. For any scripts that need to be available to users, use proper npm bin linking or npx commands\n6. Update the installation process to leverage npm's built-in dependency management\n7. If configuration is needed in users' projects, implement a proper initialization command that creates config files rather than modifying package.json\n8. Document the new approach in the README and any other relevant documentation\n\nThis change will make the package more reliable, follow npm best practices, and prevent potential conflicts or errors when modifying users' project files.",
|
||||
"testStrategy": "1. Create a fresh test project directory\n2. Install the updated task-master-ai package using npm install task-master-ai\n3. Verify that no code attempts to modify the test project's package.json\n4. Confirm all dependencies are properly installed in node_modules\n5. Test all commands to ensure they work without the previous manual package.json modifications\n6. Try installing in projects with various existing configurations to ensure no conflicts occur\n7. Test the uninstall process to verify it cleanly removes the package without leaving unwanted modifications\n8. Verify the package works in different npm environments (npm 6, 7, 8) and with different Node.js versions\n9. Create an integration test that simulates a real user workflow from installation through usage"
|
||||
"testStrategy": "1. Create a fresh test project directory\n2. Install the updated task-master-ai package using npm install task-master-ai\n3. Verify that no code attempts to modify the test project's package.json\n4. Confirm all dependencies are properly installed in node_modules\n5. Test all commands to ensure they work without the previous manual package.json modifications\n6. Try installing in projects with various existing configurations to ensure no conflicts occur\n7. Test the uninstall process to verify it cleanly removes the package without leaving unwanted modifications\n8. Verify the package works in different npm environments (npm 6, 7, 8) and with different Node.js versions\n9. Create an integration test that simulates a real user workflow from installation through usage",
|
||||
"subtasks": []
|
||||
},
|
||||
{
|
||||
"id": 60,
|
||||
@@ -3135,7 +3317,7 @@
|
||||
"title": "Refactor expand-task.js for Unified AI Service & Config",
|
||||
"description": "Replace direct AI calls (old `ai-services.js` helpers like `generateSubtasksWithPerplexity`) with `generateObjectService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters; use unified service instead. Keep `getDefaultSubtasks` usage.",
|
||||
"details": "\n\n<info added on 2025-04-24T17:46:51.286Z>\n- In expand-task.js, ensure that all AI parameter configuration (such as model, temperature, max tokens) is passed via the unified generateObjectService interface, not fetched directly from config files or environment variables. This centralizes AI config management and supports future service changes without further refactoring.\n\n- When preparing the service call, construct the payload to include both the prompt and any schema or validation requirements expected by generateObjectService. For example, if subtasks must conform to a Zod schema, pass the schema definition or reference as part of the call.\n\n- For the CLI handler, ensure that the --research flag is mapped to the useResearch boolean and that this is explicitly passed to the core expand-task logic. Also, propagate any session or user context from CLI options to the core function for downstream auditing or personalization.\n\n- In the MCP tool definition, validate that all CLI-exposed parameters are reflected in the Zod schema, including optional ones like prompt overrides or force regeneration. This ensures strict input validation and prevents runtime errors.\n\n- In the direct function wrapper, implement a try/catch block around the core expandTask invocation. On error, log the error with context (task id, session id) and return a standardized error response object with error code and message fields.\n\n- Add unit tests or integration tests to verify that expand-task.js no longer imports or uses any direct AI client or config getter, and that all AI calls are routed through ai-services-unified.js.\n\n- Document the expected shape of the session object and any required fields for downstream service calls, so future maintainers know what context must be provided.\n</info added on 2025-04-24T17:46:51.286Z>",
|
||||
"status": "in-progress",
|
||||
"status": "done",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 61
|
||||
},
|
||||
@@ -3363,12 +3545,56 @@
|
||||
"subtasks": [
|
||||
{
|
||||
"id": 1,
|
||||
"title": "Implement Yarn Support for Taskmaster",
|
||||
"description": "Add comprehensive support for installing and managing Taskmaster using Yarn package manager, ensuring feature parity with npm and pnpm installations.",
|
||||
"title": "Update package.json for Yarn Compatibility",
|
||||
"description": "Modify the package.json file to ensure all dependencies, scripts, and configurations are compatible with Yarn's installation and resolution methods.",
|
||||
"dependencies": [],
|
||||
"details": "1. Update package.json to ensure compatibility with Yarn by reviewing and adjusting dependencies, scripts, and configuration fields that might behave differently with Yarn.\n2. Create necessary Yarn configuration files (.yarnrc.yml) to handle Yarn-specific behaviors, especially if supporting both Yarn classic and Yarn Berry (v2+).\n3. Modify any post-install scripts to detect and handle Yarn-specific environments correctly.\n4. Update package manager detection logic in the codebase to properly identify when Taskmaster is installed via Yarn.\n5. Test and resolve any Yarn-specific package resolution or hoisting issues that might affect functionality.\n6. Update installation documentation to include clear instructions for installing with Yarn, including any version-specific considerations.\n7. Ensure proper lockfile generation by testing installation and updates with Yarn.\n8. Verify that all CLI commands function correctly when the package is installed via Yarn.",
|
||||
"details": "Review and update dependency declarations, script syntax, and any package manager-specific fields to avoid conflicts or unsupported features when using Yarn.",
|
||||
"status": "pending",
|
||||
"testStrategy": "1. Test installation using both Yarn classic and Yarn Berry (v2+) to ensure compatibility.\n2. Verify all CLI commands work correctly when installed via Yarn.\n3. Test the post-install process to ensure it completes successfully.\n4. Compare behavior between npm, pnpm, and Yarn installations to confirm feature parity.\n5. Test on different operating systems to ensure cross-platform compatibility with Yarn.\n6. Verify proper lockfile generation and dependency resolution."
|
||||
"testStrategy": "Run 'yarn install' and 'yarn run <script>' for all scripts to confirm successful execution and dependency resolution."
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"title": "Add Yarn-Specific Configuration Files",
|
||||
"description": "Introduce Yarn-specific configuration files such as .yarnrc.yml if needed to optimize Yarn behavior and ensure consistent installs.",
|
||||
"dependencies": [
|
||||
1
|
||||
],
|
||||
"details": "Determine if Yarn v2+ (Berry) or classic requires additional configuration for the project, and add or update .yarnrc.yml or .yarnrc files accordingly.",
|
||||
"status": "pending",
|
||||
"testStrategy": "Verify that Yarn respects the configuration by running installs and checking for expected behaviors (e.g., plug'n'play, nodeLinker settings)."
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"title": "Test and Fix Yarn Compatibility for Scripts and CLI",
|
||||
"description": "Ensure all scripts, post-install hooks, and CLI commands function correctly when Taskmaster is installed and managed via Yarn.",
|
||||
"dependencies": [
|
||||
2
|
||||
],
|
||||
"details": "Test all lifecycle scripts, post-install actions, and CLI commands using Yarn. Address any issues related to environment variables, script execution, or dependency hoisting.",
|
||||
"status": "pending",
|
||||
"testStrategy": "Install Taskmaster using Yarn and run all documented scripts and CLI commands, comparing results to npm/pnpm installations."
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"title": "Update Documentation for Yarn Installation and Usage",
|
||||
"description": "Revise installation and usage documentation to include clear instructions for installing and managing Taskmaster with Yarn.",
|
||||
"dependencies": [
|
||||
3
|
||||
],
|
||||
"details": "Add Yarn-specific installation commands, troubleshooting tips, and notes on version compatibility to the README and any relevant docs.",
|
||||
"status": "pending",
|
||||
"testStrategy": "Review documentation for accuracy and clarity; have a user follow the Yarn instructions to verify successful installation and usage."
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"title": "Implement and Test Package Manager Detection Logic",
|
||||
"description": "Update or add logic in the codebase to detect Yarn installations and handle Yarn-specific behaviors, ensuring feature parity across package managers.",
|
||||
"dependencies": [
|
||||
4
|
||||
],
|
||||
"details": "Modify detection logic to recognize Yarn (classic and berry), handle lockfile generation, and resolve any Yarn-specific package resolution or hoisting issues.",
|
||||
"status": "pending",
|
||||
"testStrategy": "Install Taskmaster using npm, pnpm, and Yarn (classic and berry), verifying that the application detects the package manager correctly and behaves consistently."
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user