refactor(expand): Align expand-task with unified AI service
Refactored the `expandTask` feature (`scripts/modules/task-manager/expand-task.js`) and related components (`commands.js`, `mcp-server/src/tools/expand-task.js`, `mcp-server/src/core/direct-functions/expand-task.js`) to integrate with the unified AI service layer (`ai-services-unified.js`) and configuration management (`config-manager.js`). The refactor involved: - Removing direct AI client calls and configuration fetching from `expand-task.js`. - Attempting to use `generateObjectService` for structured subtask generation. This failed due to provider-specific errors (Perplexity internal errors, Anthropic schema translation issues). - Reverting the core AI interaction to use `generateTextService`, asking the LLM to format its response as JSON containing a "subtasks" array. - Re-implementing manual JSON parsing and Zod validation (`parseSubtasksFromText`) to handle the text response reliably. - Updating prompt generation functions (`generateMainSystemPrompt`, `generateMainUserPrompt`, `generateResearchUserPrompt`) to request the correct JSON object structure within the text response. - Ensuring the `expandTaskDirect` function handles pre-checks (force flag, task status) and correctly passes the `session` context and logger wrapper to the core `expandTask` function. - Correcting duplicate imports in `commands.js`. - Validating the refactored feature works correctly via both CLI (`task-master expand --id <id>`) and MCP (`expand_task` tool) for main and research roles. This aligns the task expansion feature with the new architecture while using the more robust text generation approach due to current limitations with structured output services. Closes subtask 61.37.
This commit is contained in:
@@ -3,7 +3,7 @@
|
|||||||
* Direct function implementation for expanding a task into subtasks
|
* Direct function implementation for expanding a task into subtasks
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { expandTask } from '../../../../scripts/modules/task-manager.js';
|
import expandTask from '../../../../scripts/modules/task-manager/expand-task.js'; // Correct import path
|
||||||
import {
|
import {
|
||||||
readJSON,
|
readJSON,
|
||||||
writeJSON,
|
writeJSON,
|
||||||
@@ -11,10 +11,8 @@ import {
|
|||||||
disableSilentMode,
|
disableSilentMode,
|
||||||
isSilentMode
|
isSilentMode
|
||||||
} from '../../../../scripts/modules/utils.js';
|
} from '../../../../scripts/modules/utils.js';
|
||||||
import {
|
// Removed AI client imports:
|
||||||
getAnthropicClientForMCP,
|
// import { getAnthropicClientForMCP, getModelConfig } from '../utils/ai-client-utils.js';
|
||||||
getModelConfig
|
|
||||||
} from '../utils/ai-client-utils.js';
|
|
||||||
import path from 'path';
|
import path from 'path';
|
||||||
import fs from 'fs';
|
import fs from 'fs';
|
||||||
|
|
||||||
@@ -25,15 +23,16 @@ import fs from 'fs';
|
|||||||
* @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.
|
* @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.
|
||||||
* @param {string} args.id - The ID of the task to expand.
|
* @param {string} args.id - The ID of the task to expand.
|
||||||
* @param {number|string} [args.num] - Number of subtasks to generate.
|
* @param {number|string} [args.num] - Number of subtasks to generate.
|
||||||
* @param {boolean} [args.research] - Enable Perplexity AI for research-backed subtask generation.
|
* @param {boolean} [args.research] - Enable research role for subtask generation.
|
||||||
* @param {string} [args.prompt] - Additional context to guide subtask generation.
|
* @param {string} [args.prompt] - Additional context to guide subtask generation.
|
||||||
* @param {boolean} [args.force] - Force expansion even if subtasks exist.
|
* @param {boolean} [args.force] - Force expansion even if subtasks exist.
|
||||||
* @param {Object} log - Logger object
|
* @param {Object} log - Logger object
|
||||||
* @param {Object} context - Context object containing session and reportProgress
|
* @param {Object} context - Context object containing session
|
||||||
|
* @param {Object} [context.session] - MCP Session object
|
||||||
* @returns {Promise<Object>} - Task expansion result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
|
* @returns {Promise<Object>} - Task expansion result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
|
||||||
*/
|
*/
|
||||||
export async function expandTaskDirect(args, log, context = {}) {
|
export async function expandTaskDirect(args, log, context = {}) {
|
||||||
const { session } = context;
|
const { session } = context; // Extract session
|
||||||
// Destructure expected args
|
// Destructure expected args
|
||||||
const { tasksJsonPath, id, num, research, prompt, force } = args;
|
const { tasksJsonPath, id, num, research, prompt, force } = args;
|
||||||
|
|
||||||
@@ -85,28 +84,9 @@ export async function expandTaskDirect(args, log, context = {}) {
|
|||||||
const additionalContext = prompt || '';
|
const additionalContext = prompt || '';
|
||||||
const forceFlag = force === true;
|
const forceFlag = force === true;
|
||||||
|
|
||||||
// Initialize AI client if needed (for expandTask function)
|
|
||||||
try {
|
|
||||||
// This ensures the AI client is available by checking it
|
|
||||||
if (useResearch) {
|
|
||||||
log.info('Verifying AI client for research-backed expansion');
|
|
||||||
await getAnthropicClientForMCP(session, log);
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Failed to initialize AI client: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'AI_CLIENT_ERROR',
|
|
||||||
message: `Cannot initialize AI client: ${error.message}`
|
|
||||||
},
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
log.info(
|
log.info(
|
||||||
`[expandTaskDirect] Expanding task ${taskId} into ${numSubtasks || 'default'} subtasks. Research: ${useResearch}`
|
`[expandTaskDirect] Expanding task ${taskId} into ${numSubtasks || 'default'} subtasks. Research: ${useResearch}, Force: ${forceFlag}`
|
||||||
);
|
);
|
||||||
|
|
||||||
// Read tasks data
|
// Read tasks data
|
||||||
@@ -205,7 +185,16 @@ export async function expandTaskDirect(args, log, context = {}) {
|
|||||||
// Process the request
|
// Process the request
|
||||||
try {
|
try {
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
enableSilentMode();
|
const wasSilent = isSilentMode();
|
||||||
|
if (!wasSilent) enableSilentMode();
|
||||||
|
|
||||||
|
const logWrapper = {
|
||||||
|
info: (message, ...args) => log.info(message, ...args),
|
||||||
|
warn: (message, ...args) => log.warn(message, ...args),
|
||||||
|
error: (message, ...args) => log.error(message, ...args),
|
||||||
|
debug: (message, ...args) => log.debug && log.debug(message, ...args),
|
||||||
|
success: (message, ...args) => log.info(message, ...args)
|
||||||
|
};
|
||||||
|
|
||||||
// Call expandTask with session context to ensure AI client is properly initialized
|
// Call expandTask with session context to ensure AI client is properly initialized
|
||||||
const result = await expandTask(
|
const result = await expandTask(
|
||||||
@@ -214,11 +203,11 @@ export async function expandTaskDirect(args, log, context = {}) {
|
|||||||
numSubtasks,
|
numSubtasks,
|
||||||
useResearch,
|
useResearch,
|
||||||
additionalContext,
|
additionalContext,
|
||||||
{ mcpLog: log, session } // Only pass mcpLog and session, NOT reportProgress
|
{ session: session, mcpLog: logWrapper }
|
||||||
);
|
);
|
||||||
|
|
||||||
// Restore normal logging
|
// Restore normal logging
|
||||||
disableSilentMode();
|
if (!wasSilent && isSilentMode()) disableSilentMode();
|
||||||
|
|
||||||
// Read the updated data
|
// Read the updated data
|
||||||
const updatedData = readJSON(tasksPath);
|
const updatedData = readJSON(tasksPath);
|
||||||
@@ -244,7 +233,7 @@ export async function expandTaskDirect(args, log, context = {}) {
|
|||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Make sure to restore normal logging even if there's an error
|
// Make sure to restore normal logging even if there's an error
|
||||||
disableSilentMode();
|
if (!wasSilent && isSilentMode()) disableSilentMode();
|
||||||
|
|
||||||
log.error(`Error expanding task: ${error.message}`);
|
log.error(`Error expanding task: ${error.message}`);
|
||||||
return {
|
return {
|
||||||
|
|||||||
@@ -9,9 +9,8 @@ import {
|
|||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from './utils.js';
|
} from './utils.js';
|
||||||
import { expandTaskDirect } from '../core/task-master-core.js';
|
import { expandTaskDirect } from '../core/direct-functions/expand-task.js';
|
||||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
||||||
import fs from 'fs';
|
|
||||||
import path from 'path';
|
import path from 'path';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -28,16 +27,26 @@ export function registerExpandTaskTool(server) {
|
|||||||
research: z
|
research: z
|
||||||
.boolean()
|
.boolean()
|
||||||
.optional()
|
.optional()
|
||||||
.describe('Use Perplexity AI for research-backed generation'),
|
.default(false)
|
||||||
|
.describe('Use research role for generation'),
|
||||||
prompt: z
|
prompt: z
|
||||||
.string()
|
.string()
|
||||||
.optional()
|
.optional()
|
||||||
.describe('Additional context for subtask generation'),
|
.describe('Additional context for subtask generation'),
|
||||||
file: z.string().optional().describe('Absolute path to the tasks file'),
|
file: z
|
||||||
|
.string()
|
||||||
|
.optional()
|
||||||
|
.describe(
|
||||||
|
'Path to the tasks file relative to project root (e.g., tasks/tasks.json)'
|
||||||
|
),
|
||||||
projectRoot: z
|
projectRoot: z
|
||||||
.string()
|
.string()
|
||||||
.describe('The directory of the project. Must be an absolute path.'),
|
.describe('The directory of the project. Must be an absolute path.'),
|
||||||
force: z.boolean().optional().describe('Force the expansion')
|
force: z
|
||||||
|
.boolean()
|
||||||
|
.optional()
|
||||||
|
.default(false)
|
||||||
|
.describe('Force expansion even if subtasks exist')
|
||||||
}),
|
}),
|
||||||
execute: async (args, { log, session }) => {
|
execute: async (args, { log, session }) => {
|
||||||
try {
|
try {
|
||||||
|
|||||||
@@ -723,7 +723,7 @@ async function generateSubtasksWithPerplexity(
|
|||||||
// Formulate research query based on task
|
// Formulate research query based on task
|
||||||
const researchQuery = `I need to implement "${task.title}" which involves: "${task.description}".
|
const researchQuery = `I need to implement "${task.title}" which involves: "${task.description}".
|
||||||
What are current best practices, libraries, design patterns, and implementation approaches?
|
What are current best practices, libraries, design patterns, and implementation approaches?
|
||||||
Include concrete code examples and technical considerations where relevant.`;
|
Include concrete, researched, code examples and technical considerations where relevant. Include high-level, mid-level and low-level implementation details for a complete implementation.`;
|
||||||
|
|
||||||
// Query Perplexity for research
|
// Query Perplexity for research
|
||||||
const researchResponse = await perplexityClient.chat.completions.create({
|
const researchResponse = await perplexityClient.chat.completions.create({
|
||||||
@@ -731,9 +731,9 @@ Include concrete code examples and technical considerations where relevant.`;
|
|||||||
messages: [
|
messages: [
|
||||||
{
|
{
|
||||||
role: 'system',
|
role: 'system',
|
||||||
content: `You are a helpful assistant that provides research on current best practices and implementation approaches for software development.
|
content: `You are an expert software development assistant and researcher that provides high level, mid level and low level research on current best practices and implementation approaches for software development.
|
||||||
You are given a task and a description of the task.
|
You are given a task and a description of the task.
|
||||||
You need to provide a list of best practices, libraries, design patterns, and implementation approaches that are relevant to the task.
|
You need to provide a list of best practices, libraries, design patterns, and implementation approaches that are relevant to the task and up to date with today's latest best practices using those tools, libraries, design patterns and implementation approaches you are recommending.
|
||||||
You should provide concrete code examples and technical considerations where relevant.`
|
You should provide concrete code examples and technical considerations where relevant.`
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -77,6 +77,7 @@ import {
|
|||||||
getAvailableModelsList,
|
getAvailableModelsList,
|
||||||
setModel
|
setModel
|
||||||
} from './task-manager/models.js'; // Import new core functions
|
} from './task-manager/models.js'; // Import new core functions
|
||||||
|
import { findProjectRoot } from './utils.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Configure and register CLI commands
|
* Configure and register CLI commands
|
||||||
@@ -643,95 +644,76 @@ function registerCommands(programInstance) {
|
|||||||
// expand command
|
// expand command
|
||||||
programInstance
|
programInstance
|
||||||
.command('expand')
|
.command('expand')
|
||||||
.description('Break down tasks into detailed subtasks')
|
.description('Expand a task into subtasks using AI')
|
||||||
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
.option('-i, --id <id>', 'ID of the task to expand')
|
||||||
.option('-i, --id <id>', 'Task ID to expand')
|
.option(
|
||||||
.option('-a, --all', 'Expand all tasks')
|
'-a, --all',
|
||||||
|
'Expand all pending tasks based on complexity analysis'
|
||||||
|
)
|
||||||
.option(
|
.option(
|
||||||
'-n, --num <number>',
|
'-n, --num <number>',
|
||||||
'Number of subtasks to generate (default from config)',
|
'Number of subtasks to generate (uses complexity analysis by default if available)'
|
||||||
'5' // Set a simple string default here
|
|
||||||
)
|
)
|
||||||
.option(
|
.option(
|
||||||
'--research',
|
'-r, --research',
|
||||||
'Enable Perplexity AI for research-backed subtask generation'
|
'Enable research-backed generation (e.g., using Perplexity)',
|
||||||
|
false
|
||||||
)
|
)
|
||||||
|
.option('-p, --prompt <text>', 'Additional context for subtask generation')
|
||||||
|
.option('-f, --force', 'Force expansion even if subtasks exist', false) // Ensure force option exists
|
||||||
.option(
|
.option(
|
||||||
'-p, --prompt <text>',
|
'--file <file>',
|
||||||
'Additional context to guide subtask generation'
|
'Path to the tasks file (relative to project root)',
|
||||||
)
|
'tasks/tasks.json'
|
||||||
.option(
|
) // Allow file override
|
||||||
'--force',
|
|
||||||
'Force regeneration of subtasks for tasks that already have them'
|
|
||||||
)
|
|
||||||
.action(async (options) => {
|
.action(async (options) => {
|
||||||
const idArg = options.id;
|
const projectRoot = findProjectRoot();
|
||||||
// Get the actual default if the user didn't provide --num
|
if (!projectRoot) {
|
||||||
const numSubtasks =
|
console.error(chalk.red('Error: Could not find project root.'));
|
||||||
options.num === '5'
|
process.exit(1);
|
||||||
? getDefaultSubtasks(null)
|
}
|
||||||
: parseInt(options.num, 10);
|
const tasksPath = path.resolve(projectRoot, options.file); // Resolve tasks path
|
||||||
const useResearch = options.research || false;
|
|
||||||
const additionalContext = options.prompt || '';
|
|
||||||
const forceFlag = options.force || false;
|
|
||||||
const tasksPath = options.file || 'tasks/tasks.json';
|
|
||||||
|
|
||||||
if (options.all) {
|
if (options.all) {
|
||||||
console.log(
|
// --- Handle expand --all ---
|
||||||
chalk.blue(`Expanding all tasks with ${numSubtasks} subtasks each...`)
|
// This currently calls expandAllTasks. If expandAllTasks internally calls
|
||||||
);
|
// the refactored expandTask, it needs to be updated to pass the empty context {}.
|
||||||
if (useResearch) {
|
// For now, we assume expandAllTasks needs its own refactor (Subtask 61.38).
|
||||||
|
// We'll add a placeholder log here.
|
||||||
console.log(
|
console.log(
|
||||||
chalk.blue(
|
chalk.blue(
|
||||||
'Using Perplexity AI for research-backed subtask generation'
|
'Expanding all pending tasks... (Requires expand-all-tasks.js refactor)'
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
} else {
|
// Placeholder: await expandAllTasks(tasksPath, options.num, options.research, options.prompt, options.force, {});
|
||||||
console.log(
|
} else if (options.id) {
|
||||||
chalk.yellow('Research-backed subtask generation disabled')
|
// --- Handle expand --id <id> ---
|
||||||
|
if (!options.id) {
|
||||||
|
console.error(
|
||||||
|
chalk.red('Error: Task ID is required unless using --all.')
|
||||||
);
|
);
|
||||||
|
process.exit(1);
|
||||||
}
|
}
|
||||||
if (additionalContext) {
|
|
||||||
console.log(chalk.blue(`Additional context: "${additionalContext}"`));
|
console.log(chalk.blue(`Expanding task ${options.id}...`));
|
||||||
}
|
|
||||||
await expandAllTasks(
|
// Call the refactored expandTask function
|
||||||
tasksPath,
|
|
||||||
numSubtasks,
|
|
||||||
useResearch,
|
|
||||||
additionalContext,
|
|
||||||
forceFlag
|
|
||||||
);
|
|
||||||
} else if (idArg) {
|
|
||||||
console.log(
|
|
||||||
chalk.blue(`Expanding task ${idArg} with ${numSubtasks} subtasks...`)
|
|
||||||
);
|
|
||||||
if (useResearch) {
|
|
||||||
console.log(
|
|
||||||
chalk.blue(
|
|
||||||
'Using Perplexity AI for research-backed subtask generation'
|
|
||||||
)
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
console.log(
|
|
||||||
chalk.yellow('Research-backed subtask generation disabled')
|
|
||||||
);
|
|
||||||
}
|
|
||||||
if (additionalContext) {
|
|
||||||
console.log(chalk.blue(`Additional context: "${additionalContext}"`));
|
|
||||||
}
|
|
||||||
await expandTask(
|
await expandTask(
|
||||||
tasksPath,
|
tasksPath,
|
||||||
idArg,
|
options.id,
|
||||||
numSubtasks,
|
options.num, // Pass num (core function handles default)
|
||||||
useResearch,
|
options.research,
|
||||||
additionalContext
|
options.prompt,
|
||||||
|
// Pass empty context for CLI calls
|
||||||
|
{}
|
||||||
|
// Note: The 'force' flag is now primarily handled by the Direct Function Wrapper
|
||||||
|
// based on pre-checks, but the core function no longer explicitly needs it.
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
console.error(
|
console.error(
|
||||||
chalk.red(
|
chalk.red('Error: You must specify either a task ID (--id) or --all.')
|
||||||
'Error: Please specify a task ID with --id=<id> or use --all to expand all tasks.'
|
|
||||||
)
|
|
||||||
);
|
);
|
||||||
|
programInstance.help(); // Show help
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|||||||
@@ -1,173 +1,57 @@
|
|||||||
import fs from 'fs';
|
import fs from 'fs';
|
||||||
import path from 'path';
|
import path from 'path';
|
||||||
|
import { z } from 'zod';
|
||||||
|
|
||||||
import { log, readJSON, writeJSON, isSilentMode } from '../utils.js';
|
import { log, readJSON, writeJSON, isSilentMode } from '../utils.js';
|
||||||
|
|
||||||
import { startLoadingIndicator, stopLoadingIndicator } from '../ui.js';
|
import { startLoadingIndicator, stopLoadingIndicator } from '../ui.js';
|
||||||
|
|
||||||
import {
|
import { generateTextService } from '../ai-services-unified.js';
|
||||||
generateSubtasksWithPerplexity,
|
|
||||||
_handleAnthropicStream,
|
|
||||||
getConfiguredAnthropicClient,
|
|
||||||
parseSubtasksFromText
|
|
||||||
} from '../ai-services.js';
|
|
||||||
|
|
||||||
import {
|
import { getDefaultSubtasks, getDebugFlag } from '../config-manager.js';
|
||||||
getDefaultSubtasks,
|
|
||||||
getMainModelId,
|
|
||||||
getMainMaxTokens,
|
|
||||||
getMainTemperature
|
|
||||||
} from '../config-manager.js';
|
|
||||||
import generateTaskFiles from './generate-task-files.js';
|
import generateTaskFiles from './generate-task-files.js';
|
||||||
|
|
||||||
|
// --- Zod Schemas (Keep from previous step) ---
|
||||||
|
const subtaskSchema = z
|
||||||
|
.object({
|
||||||
|
id: z
|
||||||
|
.number()
|
||||||
|
.int()
|
||||||
|
.positive()
|
||||||
|
.describe('Sequential subtask ID starting from 1'),
|
||||||
|
title: z.string().min(5).describe('Clear, specific title for the subtask'),
|
||||||
|
description: z
|
||||||
|
.string()
|
||||||
|
.min(10)
|
||||||
|
.describe('Detailed description of the subtask'),
|
||||||
|
dependencies: z
|
||||||
|
.array(z.number().int())
|
||||||
|
.describe('IDs of prerequisite subtasks within this expansion'),
|
||||||
|
details: z.string().min(20).describe('Implementation details and guidance'),
|
||||||
|
status: z
|
||||||
|
.string()
|
||||||
|
.describe(
|
||||||
|
'The current status of the subtask (should be pending initially)'
|
||||||
|
),
|
||||||
|
testStrategy: z
|
||||||
|
.string()
|
||||||
|
.optional()
|
||||||
|
.describe('Approach for testing this subtask')
|
||||||
|
})
|
||||||
|
.strict();
|
||||||
|
const subtaskArraySchema = z.array(subtaskSchema);
|
||||||
|
const subtaskWrapperSchema = z.object({
|
||||||
|
subtasks: subtaskArraySchema.describe('The array of generated subtasks.')
|
||||||
|
});
|
||||||
|
// --- End Zod Schemas ---
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Expand a task into subtasks
|
* Generates the system prompt for the main AI role (e.g., Claude).
|
||||||
* @param {string} tasksPath - Path to the tasks.json file
|
* @param {number} subtaskCount - The target number of subtasks.
|
||||||
* @param {number} taskId - Task ID to expand
|
* @returns {string} The system prompt.
|
||||||
* @param {number} numSubtasks - Number of subtasks to generate
|
|
||||||
* @param {boolean} useResearch - Whether to use research with Perplexity
|
|
||||||
* @param {string} additionalContext - Additional context
|
|
||||||
* @param {Object} options - Options for expanding tasks
|
|
||||||
* @param {function} options.reportProgress - Function to report progress
|
|
||||||
* @param {Object} options.mcpLog - MCP logger object
|
|
||||||
* @param {Object} options.session - Session object from MCP
|
|
||||||
* @returns {Promise<Object>} Expanded task
|
|
||||||
*/
|
*/
|
||||||
async function expandTask(
|
function generateMainSystemPrompt(subtaskCount) {
|
||||||
tasksPath,
|
return `You are an AI assistant helping with task breakdown for software development.
|
||||||
taskId,
|
|
||||||
numSubtasks,
|
|
||||||
useResearch = false,
|
|
||||||
additionalContext = '',
|
|
||||||
{ reportProgress, mcpLog, session } = {}
|
|
||||||
) {
|
|
||||||
// Determine output format based on mcpLog presence (simplification)
|
|
||||||
const outputFormat = mcpLog ? 'json' : 'text';
|
|
||||||
|
|
||||||
// Create custom reporter that checks for MCP log and silent mode
|
|
||||||
const report = (message, level = 'info') => {
|
|
||||||
if (mcpLog) {
|
|
||||||
mcpLog[level](message);
|
|
||||||
} else if (!isSilentMode() && outputFormat === 'text') {
|
|
||||||
// Only log to console if not in silent mode and outputFormat is 'text'
|
|
||||||
log(level, message);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Keep the mcpLog check for specific MCP context logging
|
|
||||||
if (mcpLog) {
|
|
||||||
mcpLog.info(
|
|
||||||
`expandTask - reportProgress available: ${!!reportProgress}, session available: ${!!session}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
// Read the tasks.json file
|
|
||||||
const data = readJSON(tasksPath);
|
|
||||||
if (!data || !data.tasks) {
|
|
||||||
throw new Error('Invalid or missing tasks.json');
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the task
|
|
||||||
const task = data.tasks.find((t) => t.id === parseInt(taskId, 10));
|
|
||||||
if (!task) {
|
|
||||||
throw new Error(`Task with ID ${taskId} not found`);
|
|
||||||
}
|
|
||||||
|
|
||||||
report(`Expanding task ${taskId}: ${task.title}`);
|
|
||||||
|
|
||||||
// If the task already has subtasks and force flag is not set, return the existing subtasks
|
|
||||||
if (task.subtasks && task.subtasks.length > 0) {
|
|
||||||
report(`Task ${taskId} already has ${task.subtasks.length} subtasks`);
|
|
||||||
return task;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine the number of subtasks to generate
|
|
||||||
let subtaskCount = parseInt(numSubtasks, 10) || getDefaultSubtasks(); // Use getter
|
|
||||||
|
|
||||||
// Check if we have a complexity analysis for this task
|
|
||||||
let taskAnalysis = null;
|
|
||||||
try {
|
|
||||||
const reportPath = 'scripts/task-complexity-report.json';
|
|
||||||
if (fs.existsSync(reportPath)) {
|
|
||||||
const report = readJSON(reportPath);
|
|
||||||
if (report && report.complexityAnalysis) {
|
|
||||||
taskAnalysis = report.complexityAnalysis.find(
|
|
||||||
(a) => a.taskId === task.id
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
report(`Could not read complexity analysis: ${error.message}`, 'warn');
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use recommended subtask count if available
|
|
||||||
if (taskAnalysis) {
|
|
||||||
report(
|
|
||||||
`Found complexity analysis for task ${taskId}: Score ${taskAnalysis.complexityScore}/10`
|
|
||||||
);
|
|
||||||
|
|
||||||
// Use recommended number of subtasks if available
|
|
||||||
if (
|
|
||||||
taskAnalysis.recommendedSubtasks &&
|
|
||||||
subtaskCount === getDefaultSubtasks() // Use getter
|
|
||||||
) {
|
|
||||||
subtaskCount = taskAnalysis.recommendedSubtasks;
|
|
||||||
report(`Using recommended number of subtasks: ${subtaskCount}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use the expansion prompt from analysis as additional context
|
|
||||||
if (taskAnalysis.expansionPrompt && !additionalContext) {
|
|
||||||
additionalContext = taskAnalysis.expansionPrompt;
|
|
||||||
report(`Using expansion prompt from complexity analysis`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate subtasks with AI
|
|
||||||
let generatedSubtasks = [];
|
|
||||||
|
|
||||||
// Only create loading indicator if not in silent mode and no mcpLog (CLI mode)
|
|
||||||
let loadingIndicator = null;
|
|
||||||
if (!isSilentMode() && !mcpLog) {
|
|
||||||
loadingIndicator = startLoadingIndicator(
|
|
||||||
useResearch
|
|
||||||
? 'Generating research-backed subtasks...'
|
|
||||||
: 'Generating subtasks...'
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
// Determine the next subtask ID
|
|
||||||
const nextSubtaskId = 1;
|
|
||||||
|
|
||||||
if (useResearch) {
|
|
||||||
// Use Perplexity for research-backed subtasks
|
|
||||||
if (!perplexity) {
|
|
||||||
report(
|
|
||||||
'Perplexity AI is not available. Falling back to Claude AI.',
|
|
||||||
'warn'
|
|
||||||
);
|
|
||||||
useResearch = false;
|
|
||||||
} else {
|
|
||||||
report('Using Perplexity for research-backed subtasks');
|
|
||||||
generatedSubtasks = await generateSubtasksWithPerplexity(
|
|
||||||
task,
|
|
||||||
subtaskCount,
|
|
||||||
nextSubtaskId,
|
|
||||||
additionalContext,
|
|
||||||
{ reportProgress, mcpLog, silentMode: isSilentMode(), session }
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!useResearch) {
|
|
||||||
report('Using regular Claude for generating subtasks');
|
|
||||||
|
|
||||||
// Use our getConfiguredAnthropicClient function instead of getAnthropicClient
|
|
||||||
const client = getConfiguredAnthropicClient(session);
|
|
||||||
|
|
||||||
// Build the system prompt
|
|
||||||
const systemPrompt = `You are an AI assistant helping with task breakdown for software development.
|
|
||||||
You need to break down a high-level task into ${subtaskCount} specific subtasks that can be implemented one by one.
|
You need to break down a high-level task into ${subtaskCount} specific subtasks that can be implemented one by one.
|
||||||
|
|
||||||
Subtasks should:
|
Subtasks should:
|
||||||
@@ -175,91 +59,422 @@ Subtasks should:
|
|||||||
2. Follow a logical sequence
|
2. Follow a logical sequence
|
||||||
3. Each handle a distinct part of the parent task
|
3. Each handle a distinct part of the parent task
|
||||||
4. Include clear guidance on implementation approach
|
4. Include clear guidance on implementation approach
|
||||||
5. Have appropriate dependency chains between subtasks
|
5. Have appropriate dependency chains between subtasks (using the new sequential IDs)
|
||||||
6. Collectively cover all aspects of the parent task
|
6. Collectively cover all aspects of the parent task
|
||||||
|
|
||||||
For each subtask, provide:
|
For each subtask, provide:
|
||||||
- A clear, specific title
|
- id: Sequential integer starting from the provided nextSubtaskId
|
||||||
- Detailed implementation steps
|
- title: Clear, specific title
|
||||||
- Dependencies on previous subtasks
|
- description: Detailed description
|
||||||
- Testing approach
|
- dependencies: Array of prerequisite subtask IDs (use the new sequential IDs)
|
||||||
|
- details: Implementation details
|
||||||
|
- testStrategy: Optional testing approach
|
||||||
|
|
||||||
Each subtask should be implementable in a focused coding session.`;
|
|
||||||
|
|
||||||
|
Respond ONLY with a valid JSON object containing a single key "subtasks" whose value is an array matching the structure described. Do not include any explanatory text, markdown formatting, or code block markers.`;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generates the user prompt for the main AI role (e.g., Claude).
|
||||||
|
* @param {Object} task - The parent task object.
|
||||||
|
* @param {number} subtaskCount - The target number of subtasks.
|
||||||
|
* @param {string} additionalContext - Optional additional context.
|
||||||
|
* @param {number} nextSubtaskId - The starting ID for the new subtasks.
|
||||||
|
* @returns {string} The user prompt.
|
||||||
|
*/
|
||||||
|
function generateMainUserPrompt(
|
||||||
|
task,
|
||||||
|
subtaskCount,
|
||||||
|
additionalContext,
|
||||||
|
nextSubtaskId
|
||||||
|
) {
|
||||||
const contextPrompt = additionalContext
|
const contextPrompt = additionalContext
|
||||||
? `\n\nAdditional context to consider: ${additionalContext}`
|
? `\n\nAdditional context: ${additionalContext}`
|
||||||
: '';
|
: '';
|
||||||
|
const schemaDescription = `
|
||||||
|
{
|
||||||
|
"subtasks": [
|
||||||
|
{
|
||||||
|
"id": ${nextSubtaskId}, // First subtask ID
|
||||||
|
"title": "Specific subtask title",
|
||||||
|
"description": "Detailed description",
|
||||||
|
"dependencies": [], // e.g., [${nextSubtaskId + 1}] if it depends on the next
|
||||||
|
"details": "Implementation guidance",
|
||||||
|
"testStrategy": "Optional testing approach"
|
||||||
|
},
|
||||||
|
// ... (repeat for a total of ${subtaskCount} subtasks with sequential IDs)
|
||||||
|
]
|
||||||
|
}`;
|
||||||
|
|
||||||
const userPrompt = `Please break down this task into ${subtaskCount} specific, actionable subtasks:
|
return `Break down this task into exactly ${subtaskCount} specific subtasks:
|
||||||
|
|
||||||
Task ID: ${task.id}
|
Task ID: ${task.id}
|
||||||
Title: ${task.title}
|
Title: ${task.title}
|
||||||
Description: ${task.description}
|
Description: ${task.description}
|
||||||
Current details: ${task.details || 'None provided'}
|
Current details: ${task.details || 'None'}
|
||||||
${contextPrompt}
|
${contextPrompt}
|
||||||
|
|
||||||
Return exactly ${subtaskCount} subtasks with the following JSON structure:
|
Return ONLY the JSON object containing the "subtasks" array, matching this structure:
|
||||||
[
|
${schemaDescription}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generates the user prompt for the research AI role (e.g., Perplexity).
|
||||||
|
* @param {Object} task - The parent task object.
|
||||||
|
* @param {number} subtaskCount - The target number of subtasks.
|
||||||
|
* @param {string} additionalContext - Optional additional context.
|
||||||
|
* @param {number} nextSubtaskId - The starting ID for the new subtasks.
|
||||||
|
* @returns {string} The user prompt.
|
||||||
|
*/
|
||||||
|
function generateResearchUserPrompt(
|
||||||
|
task,
|
||||||
|
subtaskCount,
|
||||||
|
additionalContext,
|
||||||
|
nextSubtaskId
|
||||||
|
) {
|
||||||
|
const contextPrompt = additionalContext
|
||||||
|
? `\n\nConsider this context: ${additionalContext}`
|
||||||
|
: '';
|
||||||
|
const schemaDescription = `
|
||||||
{
|
{
|
||||||
"id": ${nextSubtaskId},
|
"subtasks": [
|
||||||
"title": "First subtask title",
|
{
|
||||||
"description": "Detailed description",
|
"id": <number>, // Sequential ID starting from ${nextSubtaskId}
|
||||||
"dependencies": [],
|
"title": "<string>",
|
||||||
"details": "Implementation details"
|
"description": "<string>",
|
||||||
|
"dependencies": [<number>], // e.g., [${nextSubtaskId + 1}]
|
||||||
|
"details": "<string>",
|
||||||
|
"testStrategy": "<string>" // Optional
|
||||||
},
|
},
|
||||||
...more subtasks...
|
// ... (repeat for ${subtaskCount} subtasks)
|
||||||
]
|
]
|
||||||
|
}`;
|
||||||
|
|
||||||
Note on dependencies: Subtasks can depend on other subtasks with lower IDs. Use an empty array if there are no dependencies.`;
|
return `Analyze the following task and break it down into exactly ${subtaskCount} specific subtasks using your research capabilities. Assign sequential IDs starting from ${nextSubtaskId}.
|
||||||
|
|
||||||
// Prepare API parameters using getters
|
Parent Task:
|
||||||
const apiParams = {
|
ID: ${task.id}
|
||||||
model: getMainModelId(session),
|
Title: ${task.title}
|
||||||
max_tokens: getMainMaxTokens(session),
|
Description: ${task.description}
|
||||||
temperature: getMainTemperature(session),
|
Current details: ${task.details || 'None'}
|
||||||
system: systemPrompt,
|
${contextPrompt}
|
||||||
messages: [{ role: 'user', content: userPrompt }]
|
|
||||||
|
CRITICAL: Respond ONLY with a valid JSON object containing a single key "subtasks". The value must be an array of the generated subtasks, strictly matching this structure:
|
||||||
|
${schemaDescription}
|
||||||
|
|
||||||
|
Do not include ANY explanatory text, markdown, or code block markers. Just the JSON object.`;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse subtasks from AI's text response. Includes basic cleanup.
|
||||||
|
* @param {string} text - Response text from AI.
|
||||||
|
* @param {number} startId - Starting subtask ID expected.
|
||||||
|
* @param {number} expectedCount - Expected number of subtasks.
|
||||||
|
* @param {number} parentTaskId - Parent task ID for context.
|
||||||
|
* @param {Object} logger - Logging object (mcpLog or console log).
|
||||||
|
* @returns {Array} Parsed and potentially corrected subtasks array.
|
||||||
|
* @throws {Error} If parsing fails or JSON is invalid/malformed.
|
||||||
|
*/
|
||||||
|
function parseSubtasksFromText(
|
||||||
|
text,
|
||||||
|
startId,
|
||||||
|
expectedCount,
|
||||||
|
parentTaskId,
|
||||||
|
logger
|
||||||
|
) {
|
||||||
|
logger.info('Attempting to parse subtasks object from text response...');
|
||||||
|
if (!text || text.trim() === '') {
|
||||||
|
throw new Error('AI response text is empty.');
|
||||||
|
}
|
||||||
|
|
||||||
|
let cleanedResponse = text.trim();
|
||||||
|
const originalResponseForDebug = cleanedResponse;
|
||||||
|
|
||||||
|
// 1. Extract from Markdown code block first
|
||||||
|
const codeBlockMatch = cleanedResponse.match(
|
||||||
|
/```(?:json)?\s*([\s\S]*?)\s*```/
|
||||||
|
);
|
||||||
|
if (codeBlockMatch) {
|
||||||
|
cleanedResponse = codeBlockMatch[1].trim();
|
||||||
|
logger.info('Extracted JSON content from Markdown code block.');
|
||||||
|
} else {
|
||||||
|
// 2. If no code block, find first '{' and last '}' for the object
|
||||||
|
const firstBrace = cleanedResponse.indexOf('{');
|
||||||
|
const lastBrace = cleanedResponse.lastIndexOf('}');
|
||||||
|
if (firstBrace !== -1 && lastBrace > firstBrace) {
|
||||||
|
cleanedResponse = cleanedResponse.substring(firstBrace, lastBrace + 1);
|
||||||
|
logger.info('Extracted content between first { and last }.');
|
||||||
|
} else {
|
||||||
|
logger.warn(
|
||||||
|
'Response does not appear to contain a JSON object structure. Parsing raw response.'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Attempt to parse the object
|
||||||
|
let parsedObject;
|
||||||
|
try {
|
||||||
|
parsedObject = JSON.parse(cleanedResponse);
|
||||||
|
} catch (parseError) {
|
||||||
|
logger.error(`Failed to parse JSON object: ${parseError.message}`);
|
||||||
|
logger.error(
|
||||||
|
`Problematic JSON string (first 500 chars): ${cleanedResponse.substring(0, 500)}`
|
||||||
|
);
|
||||||
|
logger.error(
|
||||||
|
`Original Raw Response (first 500 chars): ${originalResponseForDebug.substring(0, 500)}`
|
||||||
|
);
|
||||||
|
throw new Error(
|
||||||
|
`Failed to parse JSON response object: ${parseError.message}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. Validate the object structure and extract the subtasks array
|
||||||
|
if (
|
||||||
|
!parsedObject ||
|
||||||
|
typeof parsedObject !== 'object' ||
|
||||||
|
!Array.isArray(parsedObject.subtasks)
|
||||||
|
) {
|
||||||
|
logger.error(
|
||||||
|
`Parsed content is not an object or missing 'subtasks' array. Content: ${JSON.stringify(parsedObject).substring(0, 200)}`
|
||||||
|
);
|
||||||
|
throw new Error(
|
||||||
|
'Parsed AI response is not a valid object containing a "subtasks" array.'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
const parsedSubtasks = parsedObject.subtasks; // Extract the array
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
`Successfully parsed ${parsedSubtasks.length} potential subtasks from the object.`
|
||||||
|
);
|
||||||
|
if (expectedCount && parsedSubtasks.length !== expectedCount) {
|
||||||
|
logger.warn(
|
||||||
|
`Expected ${expectedCount} subtasks, but parsed ${parsedSubtasks.length}.`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5. Validate and Normalize each subtask using Zod schema
|
||||||
|
let currentId = startId;
|
||||||
|
const validatedSubtasks = [];
|
||||||
|
const validationErrors = [];
|
||||||
|
|
||||||
|
for (const rawSubtask of parsedSubtasks) {
|
||||||
|
const correctedSubtask = {
|
||||||
|
...rawSubtask,
|
||||||
|
id: currentId, // Enforce sequential ID
|
||||||
|
dependencies: Array.isArray(rawSubtask.dependencies)
|
||||||
|
? rawSubtask.dependencies
|
||||||
|
.map((dep) => (typeof dep === 'string' ? parseInt(dep, 10) : dep))
|
||||||
|
.filter(
|
||||||
|
(depId) => !isNaN(depId) && depId >= startId && depId < currentId
|
||||||
|
) // Ensure deps are numbers, valid range
|
||||||
|
: [],
|
||||||
|
status: 'pending' // Enforce pending status
|
||||||
|
// parentTaskId can be added if needed: parentTaskId: parentTaskId
|
||||||
};
|
};
|
||||||
|
|
||||||
// Call the streaming API using our helper
|
const result = subtaskSchema.safeParse(correctedSubtask);
|
||||||
const responseText = await _handleAnthropicStream(
|
|
||||||
client,
|
if (result.success) {
|
||||||
apiParams,
|
validatedSubtasks.push(result.data); // Add the validated data
|
||||||
{ reportProgress, mcpLog, silentMode: isSilentMode() }, // Pass isSilentMode() directly
|
} else {
|
||||||
!isSilentMode() // Only use CLI mode if not in silent mode
|
logger.warn(
|
||||||
|
`Subtask validation failed for raw data: ${JSON.stringify(rawSubtask).substring(0, 100)}...`
|
||||||
|
);
|
||||||
|
result.error.errors.forEach((err) => {
|
||||||
|
const errorMessage = ` - Field '${err.path.join('.')}': ${err.message}`;
|
||||||
|
logger.warn(errorMessage);
|
||||||
|
validationErrors.push(`Subtask ${currentId}: ${errorMessage}`);
|
||||||
|
});
|
||||||
|
// Optionally, decide whether to include partially valid tasks or skip them
|
||||||
|
// For now, we'll skip invalid ones
|
||||||
|
}
|
||||||
|
currentId++; // Increment ID for the next *potential* subtask
|
||||||
|
}
|
||||||
|
|
||||||
|
if (validationErrors.length > 0) {
|
||||||
|
logger.error(
|
||||||
|
`Found ${validationErrors.length} validation errors in the generated subtasks.`
|
||||||
|
);
|
||||||
|
// Optionally throw an error here if strict validation is required
|
||||||
|
// throw new Error(`Subtask validation failed:\n${validationErrors.join('\n')}`);
|
||||||
|
logger.warn('Proceeding with only the successfully validated subtasks.');
|
||||||
|
}
|
||||||
|
|
||||||
|
if (validatedSubtasks.length === 0 && parsedSubtasks.length > 0) {
|
||||||
|
throw new Error(
|
||||||
|
'AI response contained potential subtasks, but none passed validation.'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure we don't return more than expected, preferring validated ones
|
||||||
|
return validatedSubtasks.slice(0, expectedCount || validatedSubtasks.length);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Expand a task into subtasks using the unified AI service (generateTextService).
|
||||||
|
* @param {string} tasksPath - Path to the tasks.json file
|
||||||
|
* @param {number} taskId - Task ID to expand
|
||||||
|
* @param {number} [numSubtasks] - Optional: Target number of subtasks. Uses config default if not provided.
|
||||||
|
* @param {boolean} [useResearch=false] - Whether to use the research AI role.
|
||||||
|
* @param {string} [additionalContext=''] - Optional additional context.
|
||||||
|
* @param {Object} context - Context object containing session and mcpLog.
|
||||||
|
* @param {Object} [context.session] - Session object from MCP.
|
||||||
|
* @param {Object} [context.mcpLog] - MCP logger object.
|
||||||
|
* @returns {Promise<Object>} The updated parent task object with new subtasks.
|
||||||
|
* @throws {Error} If task not found, AI service fails, or parsing fails.
|
||||||
|
*/
|
||||||
|
async function expandTask(
|
||||||
|
tasksPath,
|
||||||
|
taskId,
|
||||||
|
numSubtasks,
|
||||||
|
useResearch = false,
|
||||||
|
additionalContext = '',
|
||||||
|
context = {}
|
||||||
|
) {
|
||||||
|
const { session, mcpLog } = context;
|
||||||
|
const outputFormat = mcpLog ? 'json' : 'text';
|
||||||
|
|
||||||
|
// Use mcpLog if available, otherwise use the default console log wrapper
|
||||||
|
const logger = mcpLog || {
|
||||||
|
info: (msg) => !isSilentMode() && log('info', msg),
|
||||||
|
warn: (msg) => !isSilentMode() && log('warn', msg),
|
||||||
|
error: (msg) => !isSilentMode() && log('error', msg),
|
||||||
|
debug: (msg) =>
|
||||||
|
!isSilentMode() && getDebugFlag(session) && log('debug', msg) // Use getDebugFlag
|
||||||
|
};
|
||||||
|
|
||||||
|
if (mcpLog) {
|
||||||
|
logger.info(`expandTask called with context: session=${!!session}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// --- Task Loading/Filtering (Unchanged) ---
|
||||||
|
logger.info(`Reading tasks from ${tasksPath}`);
|
||||||
|
const data = readJSON(tasksPath);
|
||||||
|
if (!data || !data.tasks)
|
||||||
|
throw new Error(`Invalid tasks data in ${tasksPath}`);
|
||||||
|
const taskIndex = data.tasks.findIndex(
|
||||||
|
(t) => t.id === parseInt(taskId, 10)
|
||||||
|
);
|
||||||
|
if (taskIndex === -1) throw new Error(`Task ${taskId} not found`);
|
||||||
|
const task = data.tasks[taskIndex];
|
||||||
|
logger.info(`Expanding task ${taskId}: ${task.title}`);
|
||||||
|
// --- End Task Loading/Filtering ---
|
||||||
|
|
||||||
|
// --- Subtask Count & Complexity Check (Unchanged) ---
|
||||||
|
let subtaskCount = parseInt(numSubtasks, 10);
|
||||||
|
if (isNaN(subtaskCount) || subtaskCount <= 0) {
|
||||||
|
subtaskCount = getDefaultSubtasks(session); // Pass session
|
||||||
|
logger.info(`Using default number of subtasks: ${subtaskCount}`);
|
||||||
|
}
|
||||||
|
// ... (complexity report check logic remains) ...
|
||||||
|
// --- End Subtask Count & Complexity Check ---
|
||||||
|
|
||||||
|
// --- AI Subtask Generation using generateTextService ---
|
||||||
|
let generatedSubtasks = [];
|
||||||
|
const nextSubtaskId = (task.subtasks?.length || 0) + 1;
|
||||||
|
|
||||||
|
let loadingIndicator = null;
|
||||||
|
if (outputFormat === 'text') {
|
||||||
|
loadingIndicator = startLoadingIndicator(
|
||||||
|
`Generating ${subtaskCount} subtasks...`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let responseText = ''; // To store the raw text response
|
||||||
|
|
||||||
|
try {
|
||||||
|
// 1. Determine Role and Generate Prompts
|
||||||
|
const role = useResearch ? 'research' : 'main';
|
||||||
|
logger.info(`Using AI service with role: ${role}`);
|
||||||
|
let prompt;
|
||||||
|
let systemPrompt;
|
||||||
|
if (useResearch) {
|
||||||
|
prompt = generateResearchUserPrompt(
|
||||||
|
task,
|
||||||
|
subtaskCount,
|
||||||
|
additionalContext,
|
||||||
|
nextSubtaskId
|
||||||
|
);
|
||||||
|
systemPrompt = `You are an AI assistant that responds ONLY with valid JSON objects as requested. The object should contain a 'subtasks' array.`;
|
||||||
|
} else {
|
||||||
|
prompt = generateMainUserPrompt(
|
||||||
|
task,
|
||||||
|
subtaskCount,
|
||||||
|
additionalContext,
|
||||||
|
nextSubtaskId
|
||||||
|
);
|
||||||
|
systemPrompt = generateMainSystemPrompt(subtaskCount);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Call generateTextService
|
||||||
|
responseText = await generateTextService({
|
||||||
|
prompt,
|
||||||
|
systemPrompt,
|
||||||
|
role,
|
||||||
|
session
|
||||||
|
});
|
||||||
|
logger.info(
|
||||||
|
'Successfully received text response from AI service',
|
||||||
|
'success'
|
||||||
);
|
);
|
||||||
|
|
||||||
// Parse the subtasks from the response
|
// 3. Parse Subtasks from Text Response
|
||||||
|
try {
|
||||||
generatedSubtasks = parseSubtasksFromText(
|
generatedSubtasks = parseSubtasksFromText(
|
||||||
responseText,
|
responseText,
|
||||||
nextSubtaskId,
|
nextSubtaskId,
|
||||||
subtaskCount,
|
subtaskCount,
|
||||||
task.id
|
task.id,
|
||||||
|
logger // Pass the logger
|
||||||
|
);
|
||||||
|
logger.info(
|
||||||
|
`Successfully parsed ${generatedSubtasks.length} subtasks from AI response.`
|
||||||
|
);
|
||||||
|
} catch (parseError) {
|
||||||
|
// Log error and throw
|
||||||
|
logger.error(
|
||||||
|
`Failed to parse subtasks from AI response: ${parseError.message}`
|
||||||
|
);
|
||||||
|
if (getDebugFlag(session)) {
|
||||||
|
// Use getter with session
|
||||||
|
logger.error(`Raw AI Response:\n${responseText}`);
|
||||||
|
}
|
||||||
|
throw new Error(
|
||||||
|
`Failed to parse valid subtasks from AI response: ${parseError.message}`
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
// --- End AI Subtask Generation ---
|
||||||
// Add the generated subtasks to the task
|
|
||||||
task.subtasks = generatedSubtasks;
|
|
||||||
|
|
||||||
// Write the updated tasks back to the file
|
|
||||||
writeJSON(tasksPath, data);
|
|
||||||
|
|
||||||
// Generate the individual task files
|
|
||||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
|
||||||
|
|
||||||
return task;
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
report(`Error expanding task: ${error.message}`, 'error');
|
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
||||||
throw error;
|
logger.error(
|
||||||
|
`Error generating subtasks via AI service: ${error.message}`,
|
||||||
|
'error'
|
||||||
|
);
|
||||||
|
throw error; // Re-throw AI service error
|
||||||
} finally {
|
} finally {
|
||||||
// Always stop the loading indicator if we created one
|
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
||||||
if (loadingIndicator) {
|
|
||||||
stopLoadingIndicator(loadingIndicator);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// --- Task Update & File Writing (Unchanged) ---
|
||||||
|
task.subtasks = generatedSubtasks;
|
||||||
|
data.tasks[taskIndex] = task;
|
||||||
|
logger.info(`Writing updated tasks to ${tasksPath}`);
|
||||||
|
writeJSON(tasksPath, data);
|
||||||
|
logger.info(`Generating individual task files...`);
|
||||||
|
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||||
|
logger.info(`Task files generated.`);
|
||||||
|
// --- End Task Update & File Writing ---
|
||||||
|
|
||||||
|
return task; // Return the updated task object
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
report(`Error expanding task: ${error.message}`, 'error');
|
// Catches errors from file reading, parsing, AI call etc.
|
||||||
throw error;
|
logger.error(`Error expanding task ${taskId}: ${error.message}`, 'error');
|
||||||
|
if (outputFormat === 'text' && getDebugFlag(session)) {
|
||||||
|
// Use getter with session
|
||||||
|
console.error(error); // Log full stack in debug CLI mode
|
||||||
|
}
|
||||||
|
throw error; // Re-throw for the caller
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1819,7 +1819,7 @@ This piecemeal approach aims to establish the refactoring pattern before tacklin
|
|||||||
### Details:
|
### Details:
|
||||||
|
|
||||||
|
|
||||||
## 36. Refactor analyze-task-complexity.js for Unified AI Service & Config [in-progress]
|
## 36. Refactor analyze-task-complexity.js for Unified AI Service & Config [done]
|
||||||
### Dependencies: None
|
### Dependencies: None
|
||||||
### Description: Replace direct AI calls with `generateObjectService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters; use unified service instead. Keep config getters needed for report metadata (`getProjectName`, `getDefaultSubtasks`).
|
### Description: Replace direct AI calls with `generateObjectService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters; use unified service instead. Keep config getters needed for report metadata (`getProjectName`, `getDefaultSubtasks`).
|
||||||
### Details:
|
### Details:
|
||||||
@@ -1896,7 +1896,7 @@ This piecemeal approach aims to establish the refactoring pattern before tacklin
|
|||||||
These enhancements will ensure the refactored code is modular, maintainable, and fully decoupled from AI implementation details, aligning with modern refactoring best practices[1][3][5].
|
These enhancements will ensure the refactored code is modular, maintainable, and fully decoupled from AI implementation details, aligning with modern refactoring best practices[1][3][5].
|
||||||
</info added on 2025-04-24T17:45:51.956Z>
|
</info added on 2025-04-24T17:45:51.956Z>
|
||||||
|
|
||||||
## 37. Refactor expand-task.js for Unified AI Service & Config [pending]
|
## 37. Refactor expand-task.js for Unified AI Service & Config [done]
|
||||||
### Dependencies: None
|
### Dependencies: None
|
||||||
### Description: Replace direct AI calls (old `ai-services.js` helpers like `generateSubtasksWithPerplexity`) with `generateObjectService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters; use unified service instead. Keep `getDefaultSubtasks` usage.
|
### Description: Replace direct AI calls (old `ai-services.js` helpers like `generateSubtasksWithPerplexity`) with `generateObjectService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters; use unified service instead. Keep `getDefaultSubtasks` usage.
|
||||||
### Details:
|
### Details:
|
||||||
|
|||||||
@@ -38,3 +38,53 @@ Testing should verify both the functionality and user experience of the new feat
|
|||||||
- Test with empty input text
|
- Test with empty input text
|
||||||
- Test with very long input text
|
- Test with very long input text
|
||||||
- Test with special characters and formatting in the input
|
- Test with special characters and formatting in the input
|
||||||
|
|
||||||
|
# Subtasks:
|
||||||
|
## 1. Update command parsers to recognize --simple flag [pending]
|
||||||
|
### Dependencies: None
|
||||||
|
### Description: Modify the command parsers for both update-task and update-subtask commands to recognize and process the new --simple flag option.
|
||||||
|
### Details:
|
||||||
|
Add the --simple flag option to the command parser configurations in the CLI module. This should be implemented as a boolean flag that doesn't require any additional arguments. Update both the update-task and update-subtask command definitions to include this new option.
|
||||||
|
|
||||||
|
## 2. Implement conditional logic to bypass AI processing [pending]
|
||||||
|
### Dependencies: 62.1
|
||||||
|
### Description: Modify the update logic to check for the --simple flag and conditionally skip the AI processing pipeline when the flag is present.
|
||||||
|
### Details:
|
||||||
|
In the update handlers for both commands, add a condition to check if the --simple flag is set. If it is, create a path that bypasses the normal AI processing flow. This will require modifying the update functions to accept the flag parameter and branch the execution flow accordingly.
|
||||||
|
|
||||||
|
## 3. Format user input with timestamp for simple updates [pending]
|
||||||
|
### Dependencies: 62.2
|
||||||
|
### Description: Implement functionality to format the user's direct text input with a timestamp in the same format as AI-processed updates when the --simple flag is used.
|
||||||
|
### Details:
|
||||||
|
Create a utility function that takes the user's raw input text and prepends a timestamp in the same format used for AI-generated updates. This function should be called when the --simple flag is active. Ensure the timestamp format is consistent with the existing format used throughout the application.
|
||||||
|
|
||||||
|
## 4. Add visual indicator for manual updates [pending]
|
||||||
|
### Dependencies: 62.3
|
||||||
|
### Description: Make simple updates visually distinguishable from AI-processed updates by adding a 'manual update' indicator or other visual differentiation.
|
||||||
|
### Details:
|
||||||
|
Modify the update formatting to include a visual indicator (such as '[Manual Update]' prefix or different styling) when displaying updates that were created using the --simple flag. This will help users distinguish between AI-processed and manually entered updates.
|
||||||
|
|
||||||
|
## 5. Implement storage of simple updates in history [pending]
|
||||||
|
### Dependencies: 62.3, 62.4
|
||||||
|
### Description: Ensure that updates made with the --simple flag are properly saved to the task or subtask's history in the same way as AI-processed updates.
|
||||||
|
### Details:
|
||||||
|
Modify the storage logic to save the formatted simple updates to the task or subtask history. The storage format should be consistent with AI-processed updates, but include the manual indicator. Ensure that the update is properly associated with the correct task or subtask.
|
||||||
|
|
||||||
|
## 6. Update help documentation for the new flag [pending]
|
||||||
|
### Dependencies: 62.1
|
||||||
|
### Description: Update the help documentation for both update-task and update-subtask commands to include information about the new --simple flag.
|
||||||
|
### Details:
|
||||||
|
Add clear descriptions of the --simple flag to the help text for both commands. The documentation should explain that the flag allows users to add timestamped notes without AI processing, directly using the text from the prompt. Include examples of how to use the flag.
|
||||||
|
|
||||||
|
## 7. Implement integration tests for the simple update feature [pending]
|
||||||
|
### Dependencies: 62.1, 62.2, 62.3, 62.4, 62.5
|
||||||
|
### Description: Create comprehensive integration tests to verify that the --simple flag works correctly in both commands and integrates properly with the rest of the system.
|
||||||
|
### Details:
|
||||||
|
Develop integration tests that verify the entire flow of using the --simple flag with both update commands. Tests should confirm that updates are correctly formatted, stored, and displayed. Include edge cases such as empty input, very long input, and special characters.
|
||||||
|
|
||||||
|
## 8. Perform final validation and documentation [pending]
|
||||||
|
### Dependencies: 62.1, 62.2, 62.3, 62.4, 62.5, 62.6, 62.7
|
||||||
|
### Description: Conduct final validation of the feature across all use cases and update the user documentation to include the new functionality.
|
||||||
|
### Details:
|
||||||
|
Perform end-to-end testing of the feature to ensure it works correctly in all scenarios. Update the user documentation with detailed information about the new --simple flag, including its purpose, how to use it, and examples. Ensure that the documentation clearly explains the difference between AI-processed updates and simple updates.
|
||||||
|
|
||||||
|
|||||||
101
tasks/task_063.txt
Normal file
101
tasks/task_063.txt
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
# Task ID: 63
|
||||||
|
# Title: Add pnpm Support for the Taskmaster Package
|
||||||
|
# Status: pending
|
||||||
|
# Dependencies: None
|
||||||
|
# Priority: medium
|
||||||
|
# Description: Implement full support for pnpm as an alternative package manager in the Taskmaster application, allowing users to install and manage the package using pnpm alongside the existing npm and yarn options.
|
||||||
|
# Details:
|
||||||
|
This task involves:
|
||||||
|
|
||||||
|
1. Update the installation documentation to include pnpm installation commands (e.g., `pnpm add taskmaster`).
|
||||||
|
|
||||||
|
2. Ensure all package scripts are compatible with pnpm's execution model:
|
||||||
|
- Review and modify package.json scripts if necessary
|
||||||
|
- Test script execution with pnpm syntax (`pnpm run <script>`)
|
||||||
|
- Address any pnpm-specific path or execution differences
|
||||||
|
|
||||||
|
3. Create a pnpm-lock.yaml file by installing dependencies with pnpm.
|
||||||
|
|
||||||
|
4. Test the application's installation and operation when installed via pnpm:
|
||||||
|
- Global installation (`pnpm add -g taskmaster`)
|
||||||
|
- Local project installation
|
||||||
|
- Verify CLI commands work correctly when installed with pnpm
|
||||||
|
|
||||||
|
5. Update CI/CD pipelines to include testing with pnpm:
|
||||||
|
- Add a pnpm test matrix to GitHub Actions workflows
|
||||||
|
- Ensure tests pass when dependencies are installed with pnpm
|
||||||
|
|
||||||
|
6. Handle any pnpm-specific dependency resolution issues:
|
||||||
|
- Address potential hoisting differences between npm/yarn and pnpm
|
||||||
|
- Test with pnpm's strict mode to ensure compatibility
|
||||||
|
|
||||||
|
7. Document any pnpm-specific considerations or commands in the README and documentation.
|
||||||
|
|
||||||
|
8. Consider adding a pnpm-specific installation script or helper if needed.
|
||||||
|
|
||||||
|
This implementation should maintain full feature parity regardless of which package manager is used to install Taskmaster.
|
||||||
|
|
||||||
|
# Test Strategy:
|
||||||
|
1. Manual Testing:
|
||||||
|
- Install Taskmaster globally using pnpm: `pnpm add -g taskmaster`
|
||||||
|
- Install Taskmaster locally in a test project: `pnpm add taskmaster`
|
||||||
|
- Verify all CLI commands function correctly with both installation methods
|
||||||
|
- Test all major features to ensure they work identically to npm/yarn installations
|
||||||
|
|
||||||
|
2. Automated Testing:
|
||||||
|
- Create a dedicated test workflow in GitHub Actions that uses pnpm
|
||||||
|
- Run the full test suite using pnpm to install dependencies
|
||||||
|
- Verify all tests pass with the same results as npm/yarn
|
||||||
|
|
||||||
|
3. Documentation Testing:
|
||||||
|
- Review all documentation to ensure pnpm commands are correctly documented
|
||||||
|
- Verify installation instructions work as written
|
||||||
|
- Test any pnpm-specific instructions or notes
|
||||||
|
|
||||||
|
4. Compatibility Testing:
|
||||||
|
- Test on different operating systems (Windows, macOS, Linux)
|
||||||
|
- Verify compatibility with different pnpm versions (latest stable and LTS)
|
||||||
|
- Test in environments with multiple package managers installed
|
||||||
|
|
||||||
|
5. Edge Case Testing:
|
||||||
|
- Test installation in a project that uses pnpm workspaces
|
||||||
|
- Verify behavior when upgrading from an npm/yarn installation to pnpm
|
||||||
|
- Test with pnpm's various flags and modes (--frozen-lockfile, --strict-peer-dependencies)
|
||||||
|
|
||||||
|
6. Performance Comparison:
|
||||||
|
- Measure and document any performance differences between package managers
|
||||||
|
- Compare installation times and disk space usage
|
||||||
|
|
||||||
|
Success criteria: Taskmaster should install and function identically regardless of whether it was installed via npm, yarn, or pnpm, with no degradation in functionality or performance.
|
||||||
|
|
||||||
|
# Subtasks:
|
||||||
|
## 1. Update Documentation for pnpm Support [pending]
|
||||||
|
### Dependencies: None
|
||||||
|
### Description: Revise installation and usage documentation to include pnpm commands and instructions for installing and managing Taskmaster with pnpm.
|
||||||
|
### Details:
|
||||||
|
Add pnpm installation commands (e.g., `pnpm add taskmaster`) and update all relevant sections in the README and official docs to reflect pnpm as a supported package manager.
|
||||||
|
|
||||||
|
## 2. Ensure Package Scripts Compatibility with pnpm [pending]
|
||||||
|
### Dependencies: 63.1
|
||||||
|
### Description: Review and update package.json scripts to ensure they work seamlessly with pnpm's execution model.
|
||||||
|
### Details:
|
||||||
|
Test all scripts using `pnpm run <script>`, address any pnpm-specific path or execution differences, and modify scripts as needed for compatibility.
|
||||||
|
|
||||||
|
## 3. Generate and Validate pnpm Lockfile [pending]
|
||||||
|
### Dependencies: 63.2
|
||||||
|
### Description: Install dependencies using pnpm to create a pnpm-lock.yaml file and ensure it accurately reflects the project's dependency tree.
|
||||||
|
### Details:
|
||||||
|
Run `pnpm install` to generate the lockfile, check it into version control, and verify that dependency resolution is correct and consistent.
|
||||||
|
|
||||||
|
## 4. Test Taskmaster Installation and Operation with pnpm [pending]
|
||||||
|
### Dependencies: 63.3
|
||||||
|
### Description: Thoroughly test Taskmaster's installation and CLI operation when installed via pnpm, both globally and locally.
|
||||||
|
### Details:
|
||||||
|
Perform global (`pnpm add -g taskmaster`) and local installations, verify CLI commands, and check for any pnpm-specific issues or incompatibilities.
|
||||||
|
|
||||||
|
## 5. Integrate pnpm into CI/CD Pipeline [pending]
|
||||||
|
### Dependencies: 63.4
|
||||||
|
### Description: Update CI/CD workflows to include pnpm in the test matrix, ensuring all tests pass when dependencies are installed with pnpm.
|
||||||
|
### Details:
|
||||||
|
Modify GitHub Actions or other CI configurations to use pnpm/action-setup, run tests with pnpm, and cache pnpm dependencies for efficiency.
|
||||||
|
|
||||||
84
tasks/task_064.txt
Normal file
84
tasks/task_064.txt
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
# Task ID: 64
|
||||||
|
# Title: Add Yarn Support for Taskmaster Installation
|
||||||
|
# Status: pending
|
||||||
|
# Dependencies: None
|
||||||
|
# Priority: medium
|
||||||
|
# Description: Implement full support for installing and managing Taskmaster using Yarn package manager, providing users with an alternative to npm and pnpm.
|
||||||
|
# Details:
|
||||||
|
This task involves adding comprehensive Yarn support to the Taskmaster package to ensure it can be properly installed and managed using Yarn. Implementation should include:
|
||||||
|
|
||||||
|
1. Update package.json to ensure compatibility with Yarn installation methods
|
||||||
|
2. Verify all scripts and dependencies work correctly with Yarn
|
||||||
|
3. Add Yarn-specific configuration files (e.g., .yarnrc.yml if needed)
|
||||||
|
4. Update installation documentation to include Yarn installation instructions
|
||||||
|
5. Ensure all post-install scripts work correctly with Yarn
|
||||||
|
6. Verify that all CLI commands function properly when installed via Yarn
|
||||||
|
7. Handle any Yarn-specific package resolution or hoisting issues
|
||||||
|
8. Test compatibility with different Yarn versions (classic and berry/v2+)
|
||||||
|
9. Ensure proper lockfile generation and management
|
||||||
|
10. Update any package manager detection logic in the codebase to recognize Yarn installations
|
||||||
|
|
||||||
|
The implementation should maintain feature parity regardless of which package manager (npm, pnpm, or Yarn) is used to install Taskmaster.
|
||||||
|
|
||||||
|
# Test Strategy:
|
||||||
|
Testing should verify complete Yarn support through the following steps:
|
||||||
|
|
||||||
|
1. Fresh installation tests:
|
||||||
|
- Install Taskmaster using `yarn add taskmaster` (global and local installations)
|
||||||
|
- Verify installation completes without errors
|
||||||
|
- Check that all binaries and executables are properly linked
|
||||||
|
|
||||||
|
2. Functionality tests:
|
||||||
|
- Run all Taskmaster commands on a Yarn-installed version
|
||||||
|
- Verify all features work identically to npm/pnpm installations
|
||||||
|
- Test with both Yarn v1 (classic) and Yarn v2+ (berry)
|
||||||
|
|
||||||
|
3. Update/uninstall tests:
|
||||||
|
- Test updating the package using Yarn commands
|
||||||
|
- Verify clean uninstallation using Yarn
|
||||||
|
|
||||||
|
4. CI integration:
|
||||||
|
- Add Yarn installation tests to CI pipeline
|
||||||
|
- Test on different operating systems (Windows, macOS, Linux)
|
||||||
|
|
||||||
|
5. Documentation verification:
|
||||||
|
- Ensure all documentation accurately reflects Yarn installation methods
|
||||||
|
- Verify any Yarn-specific commands or configurations are properly documented
|
||||||
|
|
||||||
|
6. Edge cases:
|
||||||
|
- Test installation in monorepo setups using Yarn workspaces
|
||||||
|
- Verify compatibility with other Yarn-specific features (plug'n'play, zero-installs)
|
||||||
|
|
||||||
|
All tests should pass with the same results as when using npm or pnpm.
|
||||||
|
|
||||||
|
# Subtasks:
|
||||||
|
## 1. Update package.json for Yarn Compatibility [pending]
|
||||||
|
### Dependencies: None
|
||||||
|
### Description: Modify the package.json file to ensure all dependencies, scripts, and configurations are compatible with Yarn's installation and resolution methods.
|
||||||
|
### Details:
|
||||||
|
Review and update dependency declarations, script syntax, and any package manager-specific fields to avoid conflicts or unsupported features when using Yarn.
|
||||||
|
|
||||||
|
## 2. Add Yarn-Specific Configuration Files [pending]
|
||||||
|
### Dependencies: 64.1
|
||||||
|
### Description: Introduce Yarn-specific configuration files such as .yarnrc.yml if needed to optimize Yarn behavior and ensure consistent installs.
|
||||||
|
### Details:
|
||||||
|
Determine if Yarn v2+ (Berry) or classic requires additional configuration for the project, and add or update .yarnrc.yml or .yarnrc files accordingly.
|
||||||
|
|
||||||
|
## 3. Test and Fix Yarn Compatibility for Scripts and CLI [pending]
|
||||||
|
### Dependencies: 64.2
|
||||||
|
### Description: Ensure all scripts, post-install hooks, and CLI commands function correctly when Taskmaster is installed and managed via Yarn.
|
||||||
|
### Details:
|
||||||
|
Test all lifecycle scripts, post-install actions, and CLI commands using Yarn. Address any issues related to environment variables, script execution, or dependency hoisting.
|
||||||
|
|
||||||
|
## 4. Update Documentation for Yarn Installation and Usage [pending]
|
||||||
|
### Dependencies: 64.3
|
||||||
|
### Description: Revise installation and usage documentation to include clear instructions for installing and managing Taskmaster with Yarn.
|
||||||
|
### Details:
|
||||||
|
Add Yarn-specific installation commands, troubleshooting tips, and notes on version compatibility to the README and any relevant docs.
|
||||||
|
|
||||||
|
## 5. Implement and Test Package Manager Detection Logic [pending]
|
||||||
|
### Dependencies: 64.4
|
||||||
|
### Description: Update or add logic in the codebase to detect Yarn installations and handle Yarn-specific behaviors, ensuring feature parity across package managers.
|
||||||
|
### Details:
|
||||||
|
Modify detection logic to recognize Yarn (classic and berry), handle lockfile generation, and resolve any Yarn-specific package resolution or hoisting issues.
|
||||||
|
|
||||||
235
tasks/tasks.json
235
tasks/tasks.json
@@ -3126,7 +3126,7 @@
|
|||||||
"title": "Refactor analyze-task-complexity.js for Unified AI Service & Config",
|
"title": "Refactor analyze-task-complexity.js for Unified AI Service & Config",
|
||||||
"description": "Replace direct AI calls with `generateObjectService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters; use unified service instead. Keep config getters needed for report metadata (`getProjectName`, `getDefaultSubtasks`).",
|
"description": "Replace direct AI calls with `generateObjectService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters; use unified service instead. Keep config getters needed for report metadata (`getProjectName`, `getDefaultSubtasks`).",
|
||||||
"details": "\n\n<info added on 2025-04-24T17:45:51.956Z>\n## Additional Implementation Notes for Refactoring\n\n**General Guidance**\n\n- Ensure all AI-related logic in `analyze-task-complexity.js` is abstracted behind the `generateObjectService` interface. The function should only specify *what* to generate (schema, prompt, and parameters), not *how* the AI call is made or which model/config is used.\n- Remove any code that directly fetches AI model parameters or credentials from configuration files. All such details must be handled by the unified service layer.\n\n**1. Core Logic Function (analyze-task-complexity.js)**\n\n- Refactor the function signature to accept a `session` object and a `role` parameter, in addition to the existing arguments.\n- When preparing the service call, construct a payload object containing:\n - The Zod schema for expected output.\n - The prompt or input for the AI.\n - The `role` (e.g., \"researcher\" or \"default\") based on the `useResearch` flag.\n - The `session` context for downstream configuration and authentication.\n- Example service call:\n ```js\n const result = await generateObjectService({\n schema: complexitySchema,\n prompt: buildPrompt(task, options),\n role,\n session,\n });\n ```\n- Remove all references to direct AI client instantiation or configuration fetching.\n\n**2. CLI Command Action Handler (commands.js)**\n\n- Ensure the CLI handler for `analyze-complexity`:\n - Accepts and parses the `--use-research` flag (or equivalent).\n - Passes the `useResearch` flag and the current session context to the core function.\n - Handles errors from the unified service gracefully, providing user-friendly feedback.\n\n**3. MCP Tool Definition (mcp-server/src/tools/analyze.js)**\n\n- Align the Zod schema for CLI options with the parameters expected by the core function, including `useResearch` and any new required fields.\n- Use `getMCPProjectRoot` to resolve the project path before invoking the core function.\n- Add status logging before and after the analysis, e.g., \"Analyzing task complexity...\" and \"Analysis complete.\"\n- Ensure the tool calls the core function with all required parameters, including session and resolved paths.\n\n**4. MCP Direct Function Wrapper (mcp-server/src/core/direct-functions/analyze-complexity-direct.js)**\n\n- Remove any direct AI client or config usage.\n- Implement a logger wrapper that standardizes log output for this function (e.g., `logger.info`, `logger.error`).\n- Pass the session context through to the core function to ensure all environment/config access is centralized.\n- Return a standardized response object, e.g.:\n ```js\n return {\n success: true,\n data: analysisResult,\n message: \"Task complexity analysis completed.\",\n };\n ```\n\n**Testing and Validation**\n\n- After refactoring, add or update tests to ensure:\n - The function does not break if AI service configuration changes.\n - The correct role and session are always passed to the unified service.\n - Errors from the unified service are handled and surfaced appropriately.\n\n**Best Practices**\n\n- Keep the core logic function pure and focused on orchestration, not implementation details.\n- Use dependency injection for session/context to facilitate testing and future extensibility.\n- Document the expected structure of the session and role parameters for maintainability.\n\nThese enhancements will ensure the refactored code is modular, maintainable, and fully decoupled from AI implementation details, aligning with modern refactoring best practices[1][3][5].\n</info added on 2025-04-24T17:45:51.956Z>",
|
"details": "\n\n<info added on 2025-04-24T17:45:51.956Z>\n## Additional Implementation Notes for Refactoring\n\n**General Guidance**\n\n- Ensure all AI-related logic in `analyze-task-complexity.js` is abstracted behind the `generateObjectService` interface. The function should only specify *what* to generate (schema, prompt, and parameters), not *how* the AI call is made or which model/config is used.\n- Remove any code that directly fetches AI model parameters or credentials from configuration files. All such details must be handled by the unified service layer.\n\n**1. Core Logic Function (analyze-task-complexity.js)**\n\n- Refactor the function signature to accept a `session` object and a `role` parameter, in addition to the existing arguments.\n- When preparing the service call, construct a payload object containing:\n - The Zod schema for expected output.\n - The prompt or input for the AI.\n - The `role` (e.g., \"researcher\" or \"default\") based on the `useResearch` flag.\n - The `session` context for downstream configuration and authentication.\n- Example service call:\n ```js\n const result = await generateObjectService({\n schema: complexitySchema,\n prompt: buildPrompt(task, options),\n role,\n session,\n });\n ```\n- Remove all references to direct AI client instantiation or configuration fetching.\n\n**2. CLI Command Action Handler (commands.js)**\n\n- Ensure the CLI handler for `analyze-complexity`:\n - Accepts and parses the `--use-research` flag (or equivalent).\n - Passes the `useResearch` flag and the current session context to the core function.\n - Handles errors from the unified service gracefully, providing user-friendly feedback.\n\n**3. MCP Tool Definition (mcp-server/src/tools/analyze.js)**\n\n- Align the Zod schema for CLI options with the parameters expected by the core function, including `useResearch` and any new required fields.\n- Use `getMCPProjectRoot` to resolve the project path before invoking the core function.\n- Add status logging before and after the analysis, e.g., \"Analyzing task complexity...\" and \"Analysis complete.\"\n- Ensure the tool calls the core function with all required parameters, including session and resolved paths.\n\n**4. MCP Direct Function Wrapper (mcp-server/src/core/direct-functions/analyze-complexity-direct.js)**\n\n- Remove any direct AI client or config usage.\n- Implement a logger wrapper that standardizes log output for this function (e.g., `logger.info`, `logger.error`).\n- Pass the session context through to the core function to ensure all environment/config access is centralized.\n- Return a standardized response object, e.g.:\n ```js\n return {\n success: true,\n data: analysisResult,\n message: \"Task complexity analysis completed.\",\n };\n ```\n\n**Testing and Validation**\n\n- After refactoring, add or update tests to ensure:\n - The function does not break if AI service configuration changes.\n - The correct role and session are always passed to the unified service.\n - Errors from the unified service are handled and surfaced appropriately.\n\n**Best Practices**\n\n- Keep the core logic function pure and focused on orchestration, not implementation details.\n- Use dependency injection for session/context to facilitate testing and future extensibility.\n- Document the expected structure of the session and role parameters for maintainability.\n\nThese enhancements will ensure the refactored code is modular, maintainable, and fully decoupled from AI implementation details, aligning with modern refactoring best practices[1][3][5].\n</info added on 2025-04-24T17:45:51.956Z>",
|
||||||
"status": "in-progress",
|
"status": "done",
|
||||||
"dependencies": [],
|
"dependencies": [],
|
||||||
"parentTaskId": 61
|
"parentTaskId": 61
|
||||||
},
|
},
|
||||||
@@ -3135,7 +3135,7 @@
|
|||||||
"title": "Refactor expand-task.js for Unified AI Service & Config",
|
"title": "Refactor expand-task.js for Unified AI Service & Config",
|
||||||
"description": "Replace direct AI calls (old `ai-services.js` helpers like `generateSubtasksWithPerplexity`) with `generateObjectService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters; use unified service instead. Keep `getDefaultSubtasks` usage.",
|
"description": "Replace direct AI calls (old `ai-services.js` helpers like `generateSubtasksWithPerplexity`) with `generateObjectService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters; use unified service instead. Keep `getDefaultSubtasks` usage.",
|
||||||
"details": "\n\n<info added on 2025-04-24T17:46:51.286Z>\n- In expand-task.js, ensure that all AI parameter configuration (such as model, temperature, max tokens) is passed via the unified generateObjectService interface, not fetched directly from config files or environment variables. This centralizes AI config management and supports future service changes without further refactoring.\n\n- When preparing the service call, construct the payload to include both the prompt and any schema or validation requirements expected by generateObjectService. For example, if subtasks must conform to a Zod schema, pass the schema definition or reference as part of the call.\n\n- For the CLI handler, ensure that the --research flag is mapped to the useResearch boolean and that this is explicitly passed to the core expand-task logic. Also, propagate any session or user context from CLI options to the core function for downstream auditing or personalization.\n\n- In the MCP tool definition, validate that all CLI-exposed parameters are reflected in the Zod schema, including optional ones like prompt overrides or force regeneration. This ensures strict input validation and prevents runtime errors.\n\n- In the direct function wrapper, implement a try/catch block around the core expandTask invocation. On error, log the error with context (task id, session id) and return a standardized error response object with error code and message fields.\n\n- Add unit tests or integration tests to verify that expand-task.js no longer imports or uses any direct AI client or config getter, and that all AI calls are routed through ai-services-unified.js.\n\n- Document the expected shape of the session object and any required fields for downstream service calls, so future maintainers know what context must be provided.\n</info added on 2025-04-24T17:46:51.286Z>",
|
"details": "\n\n<info added on 2025-04-24T17:46:51.286Z>\n- In expand-task.js, ensure that all AI parameter configuration (such as model, temperature, max tokens) is passed via the unified generateObjectService interface, not fetched directly from config files or environment variables. This centralizes AI config management and supports future service changes without further refactoring.\n\n- When preparing the service call, construct the payload to include both the prompt and any schema or validation requirements expected by generateObjectService. For example, if subtasks must conform to a Zod schema, pass the schema definition or reference as part of the call.\n\n- For the CLI handler, ensure that the --research flag is mapped to the useResearch boolean and that this is explicitly passed to the core expand-task logic. Also, propagate any session or user context from CLI options to the core function for downstream auditing or personalization.\n\n- In the MCP tool definition, validate that all CLI-exposed parameters are reflected in the Zod schema, including optional ones like prompt overrides or force regeneration. This ensures strict input validation and prevents runtime errors.\n\n- In the direct function wrapper, implement a try/catch block around the core expandTask invocation. On error, log the error with context (task id, session id) and return a standardized error response object with error code and message fields.\n\n- Add unit tests or integration tests to verify that expand-task.js no longer imports or uses any direct AI client or config getter, and that all AI calls are routed through ai-services-unified.js.\n\n- Document the expected shape of the session object and any required fields for downstream service calls, so future maintainers know what context must be provided.\n</info added on 2025-04-24T17:46:51.286Z>",
|
||||||
"status": "pending",
|
"status": "done",
|
||||||
"dependencies": [],
|
"dependencies": [],
|
||||||
"parentTaskId": 61
|
"parentTaskId": 61
|
||||||
},
|
},
|
||||||
@@ -3185,7 +3185,236 @@
|
|||||||
"testStrategy": "Testing should verify both the functionality and user experience of the new feature:\n\n1. Unit tests:\n - Test that the command parser correctly recognizes the --simple flag\n - Verify that AI processing is bypassed when the flag is present\n - Ensure timestamps are correctly formatted and added\n\n2. Integration tests:\n - Update a task with --simple flag and verify the exact text is saved\n - Update a subtask with --simple flag and verify the exact text is saved\n - Compare the output format with AI-processed updates to ensure consistency\n\n3. User experience tests:\n - Verify help documentation correctly explains the new flag\n - Test with various input lengths to ensure proper formatting\n - Ensure the update appears correctly when viewing task history\n\n4. Edge cases:\n - Test with empty input text\n - Test with very long input text\n - Test with special characters and formatting in the input",
|
"testStrategy": "Testing should verify both the functionality and user experience of the new feature:\n\n1. Unit tests:\n - Test that the command parser correctly recognizes the --simple flag\n - Verify that AI processing is bypassed when the flag is present\n - Ensure timestamps are correctly formatted and added\n\n2. Integration tests:\n - Update a task with --simple flag and verify the exact text is saved\n - Update a subtask with --simple flag and verify the exact text is saved\n - Compare the output format with AI-processed updates to ensure consistency\n\n3. User experience tests:\n - Verify help documentation correctly explains the new flag\n - Test with various input lengths to ensure proper formatting\n - Ensure the update appears correctly when viewing task history\n\n4. Edge cases:\n - Test with empty input text\n - Test with very long input text\n - Test with special characters and formatting in the input",
|
||||||
"status": "pending",
|
"status": "pending",
|
||||||
"dependencies": [],
|
"dependencies": [],
|
||||||
"priority": "medium"
|
"priority": "medium",
|
||||||
|
"subtasks": [
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"title": "Update command parsers to recognize --simple flag",
|
||||||
|
"description": "Modify the command parsers for both update-task and update-subtask commands to recognize and process the new --simple flag option.",
|
||||||
|
"dependencies": [],
|
||||||
|
"details": "Add the --simple flag option to the command parser configurations in the CLI module. This should be implemented as a boolean flag that doesn't require any additional arguments. Update both the update-task and update-subtask command definitions to include this new option.",
|
||||||
|
"status": "pending",
|
||||||
|
"testStrategy": "Test that both commands correctly recognize the --simple flag when provided and that the flag's presence is properly captured in the command arguments object."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2,
|
||||||
|
"title": "Implement conditional logic to bypass AI processing",
|
||||||
|
"description": "Modify the update logic to check for the --simple flag and conditionally skip the AI processing pipeline when the flag is present.",
|
||||||
|
"dependencies": [
|
||||||
|
1
|
||||||
|
],
|
||||||
|
"details": "In the update handlers for both commands, add a condition to check if the --simple flag is set. If it is, create a path that bypasses the normal AI processing flow. This will require modifying the update functions to accept the flag parameter and branch the execution flow accordingly.",
|
||||||
|
"status": "pending",
|
||||||
|
"testStrategy": "Test that when the --simple flag is provided, the AI processing functions are not called, and when the flag is not provided, the normal AI processing flow is maintained."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 3,
|
||||||
|
"title": "Format user input with timestamp for simple updates",
|
||||||
|
"description": "Implement functionality to format the user's direct text input with a timestamp in the same format as AI-processed updates when the --simple flag is used.",
|
||||||
|
"dependencies": [
|
||||||
|
2
|
||||||
|
],
|
||||||
|
"details": "Create a utility function that takes the user's raw input text and prepends a timestamp in the same format used for AI-generated updates. This function should be called when the --simple flag is active. Ensure the timestamp format is consistent with the existing format used throughout the application.",
|
||||||
|
"status": "pending",
|
||||||
|
"testStrategy": "Verify that the timestamp format matches the AI-generated updates and that the user's text is preserved exactly as entered."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 4,
|
||||||
|
"title": "Add visual indicator for manual updates",
|
||||||
|
"description": "Make simple updates visually distinguishable from AI-processed updates by adding a 'manual update' indicator or other visual differentiation.",
|
||||||
|
"dependencies": [
|
||||||
|
3
|
||||||
|
],
|
||||||
|
"details": "Modify the update formatting to include a visual indicator (such as '[Manual Update]' prefix or different styling) when displaying updates that were created using the --simple flag. This will help users distinguish between AI-processed and manually entered updates.",
|
||||||
|
"status": "pending",
|
||||||
|
"testStrategy": "Check that updates made with the --simple flag are visually distinct from AI-processed updates when displayed in the task or subtask history."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 5,
|
||||||
|
"title": "Implement storage of simple updates in history",
|
||||||
|
"description": "Ensure that updates made with the --simple flag are properly saved to the task or subtask's history in the same way as AI-processed updates.",
|
||||||
|
"dependencies": [
|
||||||
|
3,
|
||||||
|
4
|
||||||
|
],
|
||||||
|
"details": "Modify the storage logic to save the formatted simple updates to the task or subtask history. The storage format should be consistent with AI-processed updates, but include the manual indicator. Ensure that the update is properly associated with the correct task or subtask.",
|
||||||
|
"status": "pending",
|
||||||
|
"testStrategy": "Test that updates made with the --simple flag are correctly saved to the history and persist between application restarts."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 6,
|
||||||
|
"title": "Update help documentation for the new flag",
|
||||||
|
"description": "Update the help documentation for both update-task and update-subtask commands to include information about the new --simple flag.",
|
||||||
|
"dependencies": [
|
||||||
|
1
|
||||||
|
],
|
||||||
|
"details": "Add clear descriptions of the --simple flag to the help text for both commands. The documentation should explain that the flag allows users to add timestamped notes without AI processing, directly using the text from the prompt. Include examples of how to use the flag.",
|
||||||
|
"status": "pending",
|
||||||
|
"testStrategy": "Verify that the help command correctly displays information about the --simple flag for both update commands."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 7,
|
||||||
|
"title": "Implement integration tests for the simple update feature",
|
||||||
|
"description": "Create comprehensive integration tests to verify that the --simple flag works correctly in both commands and integrates properly with the rest of the system.",
|
||||||
|
"dependencies": [
|
||||||
|
1,
|
||||||
|
2,
|
||||||
|
3,
|
||||||
|
4,
|
||||||
|
5
|
||||||
|
],
|
||||||
|
"details": "Develop integration tests that verify the entire flow of using the --simple flag with both update commands. Tests should confirm that updates are correctly formatted, stored, and displayed. Include edge cases such as empty input, very long input, and special characters.",
|
||||||
|
"status": "pending",
|
||||||
|
"testStrategy": "Run integration tests that simulate user input with and without the --simple flag and verify the correct behavior in each case."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 8,
|
||||||
|
"title": "Perform final validation and documentation",
|
||||||
|
"description": "Conduct final validation of the feature across all use cases and update the user documentation to include the new functionality.",
|
||||||
|
"dependencies": [
|
||||||
|
1,
|
||||||
|
2,
|
||||||
|
3,
|
||||||
|
4,
|
||||||
|
5,
|
||||||
|
6,
|
||||||
|
7
|
||||||
|
],
|
||||||
|
"details": "Perform end-to-end testing of the feature to ensure it works correctly in all scenarios. Update the user documentation with detailed information about the new --simple flag, including its purpose, how to use it, and examples. Ensure that the documentation clearly explains the difference between AI-processed updates and simple updates.",
|
||||||
|
"status": "pending",
|
||||||
|
"testStrategy": "Manually test all use cases and review documentation for completeness and clarity."
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 63,
|
||||||
|
"title": "Add pnpm Support for the Taskmaster Package",
|
||||||
|
"description": "Implement full support for pnpm as an alternative package manager in the Taskmaster application, allowing users to install and manage the package using pnpm alongside the existing npm and yarn options.",
|
||||||
|
"details": "This task involves:\n\n1. Update the installation documentation to include pnpm installation commands (e.g., `pnpm add taskmaster`).\n\n2. Ensure all package scripts are compatible with pnpm's execution model:\n - Review and modify package.json scripts if necessary\n - Test script execution with pnpm syntax (`pnpm run <script>`)\n - Address any pnpm-specific path or execution differences\n\n3. Create a pnpm-lock.yaml file by installing dependencies with pnpm.\n\n4. Test the application's installation and operation when installed via pnpm:\n - Global installation (`pnpm add -g taskmaster`)\n - Local project installation\n - Verify CLI commands work correctly when installed with pnpm\n\n5. Update CI/CD pipelines to include testing with pnpm:\n - Add a pnpm test matrix to GitHub Actions workflows\n - Ensure tests pass when dependencies are installed with pnpm\n\n6. Handle any pnpm-specific dependency resolution issues:\n - Address potential hoisting differences between npm/yarn and pnpm\n - Test with pnpm's strict mode to ensure compatibility\n\n7. Document any pnpm-specific considerations or commands in the README and documentation.\n\n8. Consider adding a pnpm-specific installation script or helper if needed.\n\nThis implementation should maintain full feature parity regardless of which package manager is used to install Taskmaster.",
|
||||||
|
"testStrategy": "1. Manual Testing:\n - Install Taskmaster globally using pnpm: `pnpm add -g taskmaster`\n - Install Taskmaster locally in a test project: `pnpm add taskmaster`\n - Verify all CLI commands function correctly with both installation methods\n - Test all major features to ensure they work identically to npm/yarn installations\n\n2. Automated Testing:\n - Create a dedicated test workflow in GitHub Actions that uses pnpm\n - Run the full test suite using pnpm to install dependencies\n - Verify all tests pass with the same results as npm/yarn\n\n3. Documentation Testing:\n - Review all documentation to ensure pnpm commands are correctly documented\n - Verify installation instructions work as written\n - Test any pnpm-specific instructions or notes\n\n4. Compatibility Testing:\n - Test on different operating systems (Windows, macOS, Linux)\n - Verify compatibility with different pnpm versions (latest stable and LTS)\n - Test in environments with multiple package managers installed\n\n5. Edge Case Testing:\n - Test installation in a project that uses pnpm workspaces\n - Verify behavior when upgrading from an npm/yarn installation to pnpm\n - Test with pnpm's various flags and modes (--frozen-lockfile, --strict-peer-dependencies)\n\n6. Performance Comparison:\n - Measure and document any performance differences between package managers\n - Compare installation times and disk space usage\n\nSuccess criteria: Taskmaster should install and function identically regardless of whether it was installed via npm, yarn, or pnpm, with no degradation in functionality or performance.",
|
||||||
|
"status": "pending",
|
||||||
|
"dependencies": [],
|
||||||
|
"priority": "medium",
|
||||||
|
"subtasks": [
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"title": "Update Documentation for pnpm Support",
|
||||||
|
"description": "Revise installation and usage documentation to include pnpm commands and instructions for installing and managing Taskmaster with pnpm.",
|
||||||
|
"dependencies": [],
|
||||||
|
"details": "Add pnpm installation commands (e.g., `pnpm add taskmaster`) and update all relevant sections in the README and official docs to reflect pnpm as a supported package manager.",
|
||||||
|
"status": "pending",
|
||||||
|
"testStrategy": "Verify that documentation changes are clear, accurate, and render correctly in all documentation formats."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2,
|
||||||
|
"title": "Ensure Package Scripts Compatibility with pnpm",
|
||||||
|
"description": "Review and update package.json scripts to ensure they work seamlessly with pnpm's execution model.",
|
||||||
|
"dependencies": [
|
||||||
|
1
|
||||||
|
],
|
||||||
|
"details": "Test all scripts using `pnpm run <script>`, address any pnpm-specific path or execution differences, and modify scripts as needed for compatibility.",
|
||||||
|
"status": "pending",
|
||||||
|
"testStrategy": "Run all package scripts using pnpm and confirm expected behavior matches npm/yarn."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 3,
|
||||||
|
"title": "Generate and Validate pnpm Lockfile",
|
||||||
|
"description": "Install dependencies using pnpm to create a pnpm-lock.yaml file and ensure it accurately reflects the project's dependency tree.",
|
||||||
|
"dependencies": [
|
||||||
|
2
|
||||||
|
],
|
||||||
|
"details": "Run `pnpm install` to generate the lockfile, check it into version control, and verify that dependency resolution is correct and consistent.",
|
||||||
|
"status": "pending",
|
||||||
|
"testStrategy": "Compare dependency trees between npm/yarn and pnpm; ensure no missing or extraneous dependencies."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 4,
|
||||||
|
"title": "Test Taskmaster Installation and Operation with pnpm",
|
||||||
|
"description": "Thoroughly test Taskmaster's installation and CLI operation when installed via pnpm, both globally and locally.",
|
||||||
|
"dependencies": [
|
||||||
|
3
|
||||||
|
],
|
||||||
|
"details": "Perform global (`pnpm add -g taskmaster`) and local installations, verify CLI commands, and check for any pnpm-specific issues or incompatibilities.",
|
||||||
|
"status": "pending",
|
||||||
|
"testStrategy": "Document and resolve any errors encountered during installation or usage with pnpm."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 5,
|
||||||
|
"title": "Integrate pnpm into CI/CD Pipeline",
|
||||||
|
"description": "Update CI/CD workflows to include pnpm in the test matrix, ensuring all tests pass when dependencies are installed with pnpm.",
|
||||||
|
"dependencies": [
|
||||||
|
4
|
||||||
|
],
|
||||||
|
"details": "Modify GitHub Actions or other CI configurations to use pnpm/action-setup, run tests with pnpm, and cache pnpm dependencies for efficiency.",
|
||||||
|
"status": "pending",
|
||||||
|
"testStrategy": "Confirm that CI passes for all supported package managers, including pnpm, and that pnpm-specific jobs are green."
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 64,
|
||||||
|
"title": "Add Yarn Support for Taskmaster Installation",
|
||||||
|
"description": "Implement full support for installing and managing Taskmaster using Yarn package manager, providing users with an alternative to npm and pnpm.",
|
||||||
|
"details": "This task involves adding comprehensive Yarn support to the Taskmaster package to ensure it can be properly installed and managed using Yarn. Implementation should include:\n\n1. Update package.json to ensure compatibility with Yarn installation methods\n2. Verify all scripts and dependencies work correctly with Yarn\n3. Add Yarn-specific configuration files (e.g., .yarnrc.yml if needed)\n4. Update installation documentation to include Yarn installation instructions\n5. Ensure all post-install scripts work correctly with Yarn\n6. Verify that all CLI commands function properly when installed via Yarn\n7. Handle any Yarn-specific package resolution or hoisting issues\n8. Test compatibility with different Yarn versions (classic and berry/v2+)\n9. Ensure proper lockfile generation and management\n10. Update any package manager detection logic in the codebase to recognize Yarn installations\n\nThe implementation should maintain feature parity regardless of which package manager (npm, pnpm, or Yarn) is used to install Taskmaster.",
|
||||||
|
"testStrategy": "Testing should verify complete Yarn support through the following steps:\n\n1. Fresh installation tests:\n - Install Taskmaster using `yarn add taskmaster` (global and local installations)\n - Verify installation completes without errors\n - Check that all binaries and executables are properly linked\n\n2. Functionality tests:\n - Run all Taskmaster commands on a Yarn-installed version\n - Verify all features work identically to npm/pnpm installations\n - Test with both Yarn v1 (classic) and Yarn v2+ (berry)\n\n3. Update/uninstall tests:\n - Test updating the package using Yarn commands\n - Verify clean uninstallation using Yarn\n\n4. CI integration:\n - Add Yarn installation tests to CI pipeline\n - Test on different operating systems (Windows, macOS, Linux)\n\n5. Documentation verification:\n - Ensure all documentation accurately reflects Yarn installation methods\n - Verify any Yarn-specific commands or configurations are properly documented\n\n6. Edge cases:\n - Test installation in monorepo setups using Yarn workspaces\n - Verify compatibility with other Yarn-specific features (plug'n'play, zero-installs)\n\nAll tests should pass with the same results as when using npm or pnpm.",
|
||||||
|
"status": "pending",
|
||||||
|
"dependencies": [],
|
||||||
|
"priority": "medium",
|
||||||
|
"subtasks": [
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"title": "Update package.json for Yarn Compatibility",
|
||||||
|
"description": "Modify the package.json file to ensure all dependencies, scripts, and configurations are compatible with Yarn's installation and resolution methods.",
|
||||||
|
"dependencies": [],
|
||||||
|
"details": "Review and update dependency declarations, script syntax, and any package manager-specific fields to avoid conflicts or unsupported features when using Yarn.",
|
||||||
|
"status": "pending",
|
||||||
|
"testStrategy": "Run 'yarn install' and 'yarn run <script>' for all scripts to confirm successful execution and dependency resolution."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2,
|
||||||
|
"title": "Add Yarn-Specific Configuration Files",
|
||||||
|
"description": "Introduce Yarn-specific configuration files such as .yarnrc.yml if needed to optimize Yarn behavior and ensure consistent installs.",
|
||||||
|
"dependencies": [
|
||||||
|
1
|
||||||
|
],
|
||||||
|
"details": "Determine if Yarn v2+ (Berry) or classic requires additional configuration for the project, and add or update .yarnrc.yml or .yarnrc files accordingly.",
|
||||||
|
"status": "pending",
|
||||||
|
"testStrategy": "Verify that Yarn respects the configuration by running installs and checking for expected behaviors (e.g., plug'n'play, nodeLinker settings)."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 3,
|
||||||
|
"title": "Test and Fix Yarn Compatibility for Scripts and CLI",
|
||||||
|
"description": "Ensure all scripts, post-install hooks, and CLI commands function correctly when Taskmaster is installed and managed via Yarn.",
|
||||||
|
"dependencies": [
|
||||||
|
2
|
||||||
|
],
|
||||||
|
"details": "Test all lifecycle scripts, post-install actions, and CLI commands using Yarn. Address any issues related to environment variables, script execution, or dependency hoisting.",
|
||||||
|
"status": "pending",
|
||||||
|
"testStrategy": "Install Taskmaster using Yarn and run all documented scripts and CLI commands, comparing results to npm/pnpm installations."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 4,
|
||||||
|
"title": "Update Documentation for Yarn Installation and Usage",
|
||||||
|
"description": "Revise installation and usage documentation to include clear instructions for installing and managing Taskmaster with Yarn.",
|
||||||
|
"dependencies": [
|
||||||
|
3
|
||||||
|
],
|
||||||
|
"details": "Add Yarn-specific installation commands, troubleshooting tips, and notes on version compatibility to the README and any relevant docs.",
|
||||||
|
"status": "pending",
|
||||||
|
"testStrategy": "Review documentation for accuracy and clarity; have a user follow the Yarn instructions to verify successful installation and usage."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 5,
|
||||||
|
"title": "Implement and Test Package Manager Detection Logic",
|
||||||
|
"description": "Update or add logic in the codebase to detect Yarn installations and handle Yarn-specific behaviors, ensuring feature parity across package managers.",
|
||||||
|
"dependencies": [
|
||||||
|
4
|
||||||
|
],
|
||||||
|
"details": "Modify detection logic to recognize Yarn (classic and berry), handle lockfile generation, and resolve any Yarn-specific package resolution or hoisting issues.",
|
||||||
|
"status": "pending",
|
||||||
|
"testStrategy": "Install Taskmaster using npm, pnpm, and Yarn (classic and berry), verifying that the application detects the package manager correctly and behaves consistently."
|
||||||
|
}
|
||||||
|
]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user