refactor(tasks): Align update-tasks with unified AI service and remove obsolete helpers

Completes the refactoring of the AI-interacting task management functions by aligning `update-tasks.js` with the unified service architecture and removing now-unused helper files.

Key Changes:

- **`update-tasks.js` Refactoring:**

    - Replaced direct AI client calls and AI-specific config fetching with a call to `generateTextService` from `ai-services-unified.js`.

    - Preserved the original system and user prompts requesting a JSON array output.

    - Implemented manual JSON parsing (`parseUpdatedTasksFromText`) with Zod validation to handle the text response reliably.

    - Updated the core function signature to accept the standard `context` object (`{ session, mcpLog }`).

    - Corrected logger implementation to handle both MCP (`mcpLog`) and CLI (`consoleLog`) contexts appropriately.

- **Related Component Updates:**

    - Refactored `mcp-server/src/core/direct-functions/update-tasks.js` to use the standard direct function pattern (logger wrapper, silent mode, call core function with context).

    - Verified `mcp-server/src/tools/update.js` correctly passes arguments and context.

    - Verified `scripts/modules/commands.js` (update command) correctly calls the refactored core function.

- **Obsolete File Cleanup:**

    - Removed the now-unused `scripts/modules/task-manager/get-subtasks-from-ai.js` file and its export, as its functionality was integrated into `expand-task.js`.

    - Removed the now-unused `scripts/modules/task-manager/generate-subtask-prompt.js` file and its export for the same reason.

- **Task Management:**

    - Marked subtasks 61.38, 61.39, and 61.41 as complete.

This commit finalizes the alignment of `updateTasks`, `updateTaskById`, `expandTask`, `expandAllTasks`, `analyzeTaskComplexity`, `addTask`, and `parsePRD` with the unified AI service and configuration management patterns.
This commit is contained in:
Eyal Toledano
2025-04-25 04:09:14 -04:00
parent 443824a35e
commit 60363be0fe
12 changed files with 676 additions and 804 deletions

View File

@@ -6,182 +6,122 @@
import { updateTasks } from '../../../../scripts/modules/task-manager.js';
import {
enableSilentMode,
disableSilentMode
disableSilentMode,
isSilentMode
} from '../../../../scripts/modules/utils.js';
import {
getAnthropicClientForMCP,
getPerplexityClientForMCP
} from '../utils/ai-client-utils.js';
/**
* Direct function wrapper for updating tasks based on new context/prompt.
*
* @param {Object} args - Command arguments containing fromId, prompt, useResearch and tasksJsonPath.
* @param {Object} args - Command arguments containing from, prompt, research and tasksJsonPath.
* @param {Object} log - Logger object.
* @param {Object} context - Context object containing session data.
* @returns {Promise<Object>} - Result object with success status and data/error information.
*/
export async function updateTasksDirect(args, log, context = {}) {
const { session } = context; // Only extract session, not reportProgress
const { session } = context; // Extract session
const { tasksJsonPath, from, prompt, research } = args;
try {
log.info(`Updating tasks with args: ${JSON.stringify(args)}`);
// Create the standard logger wrapper
const logWrapper = {
info: (message, ...args) => log.info(message, ...args),
warn: (message, ...args) => log.warn(message, ...args),
error: (message, ...args) => log.error(message, ...args),
debug: (message, ...args) => log.debug && log.debug(message, ...args),
success: (message, ...args) => log.info(message, ...args)
};
// Check if tasksJsonPath was provided
if (!tasksJsonPath) {
const errorMessage = 'tasksJsonPath is required but was not provided.';
log.error(errorMessage);
return {
success: false,
error: { code: 'MISSING_ARGUMENT', message: errorMessage },
fromCache: false
};
}
// Check for the common mistake of using 'id' instead of 'from'
if (args.id !== undefined && from === undefined) {
const errorMessage =
"You specified 'id' parameter but 'update' requires 'from' parameter. Use 'from' for this tool or use 'update_task' tool if you want to update a single task.";
log.error(errorMessage);
return {
success: false,
error: {
code: 'PARAMETER_MISMATCH',
message: errorMessage,
suggestion:
"Use 'from' parameter instead of 'id', or use the 'update_task' tool for single task updates"
},
fromCache: false
};
}
// Check required parameters
if (!from) {
const errorMessage =
'No from ID specified. Please provide a task ID to start updating from.';
log.error(errorMessage);
return {
success: false,
error: { code: 'MISSING_FROM_ID', message: errorMessage },
fromCache: false
};
}
if (!prompt) {
const errorMessage =
'No prompt specified. Please provide a prompt with new context for task updates.';
log.error(errorMessage);
return {
success: false,
error: { code: 'MISSING_PROMPT', message: errorMessage },
fromCache: false
};
}
// Parse fromId - handle both string and number values
let fromId;
if (typeof from === 'string') {
fromId = parseInt(from, 10);
if (isNaN(fromId)) {
const errorMessage = `Invalid from ID: ${from}. Task ID must be a positive integer.`;
log.error(errorMessage);
return {
success: false,
error: { code: 'INVALID_FROM_ID', message: errorMessage },
fromCache: false
};
}
} else {
fromId = from;
}
// Get research flag
const useResearch = research === true;
// Initialize appropriate AI client based on research flag
let aiClient;
try {
if (useResearch) {
log.info('Using Perplexity AI for research-backed task updates');
aiClient = await getPerplexityClientForMCP(session, log);
} else {
log.info('Using Claude AI for task updates');
aiClient = getAnthropicClientForMCP(session, log);
}
} catch (error) {
log.error(`Failed to initialize AI client: ${error.message}`);
return {
success: false,
error: {
code: 'AI_CLIENT_ERROR',
message: `Cannot initialize AI client: ${error.message}`
},
fromCache: false
};
}
log.info(
`Updating tasks from ID ${fromId} with prompt "${prompt}" and research: ${useResearch}`
);
// Create the logger wrapper to ensure compatibility with core functions
const logWrapper = {
info: (message, ...args) => log.info(message, ...args),
warn: (message, ...args) => log.warn(message, ...args),
error: (message, ...args) => log.error(message, ...args),
debug: (message, ...args) => log.debug && log.debug(message, ...args), // Handle optional debug
success: (message, ...args) => log.info(message, ...args) // Map success to info if needed
// --- Input Validation (Keep existing checks) ---
if (!tasksJsonPath) {
log.error('updateTasksDirect called without tasksJsonPath');
return {
success: false,
error: { code: 'MISSING_ARGUMENT', message: 'tasksJsonPath is required' },
fromCache: false
};
try {
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Execute core updateTasks function, passing the AI client and session
await updateTasks(tasksJsonPath, fromId, prompt, useResearch, {
mcpLog: logWrapper, // Pass the wrapper instead of the raw log object
session
});
// Since updateTasks doesn't return a value but modifies the tasks file,
// we'll return a success message
return {
success: true,
data: {
message: `Successfully updated tasks from ID ${fromId} based on the prompt`,
fromId,
tasksPath: tasksJsonPath,
useResearch
},
fromCache: false // This operation always modifies state and should never be cached
};
} catch (error) {
log.error(`Error updating tasks: ${error.message}`);
return {
success: false,
error: {
code: 'UPDATE_TASKS_ERROR',
message: error.message || 'Unknown error updating tasks'
},
fromCache: false
};
} finally {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
}
} catch (error) {
// Ensure silent mode is disabled
disableSilentMode();
log.error(`Error updating tasks: ${error.message}`);
}
if (args.id !== undefined && from === undefined) {
// Keep 'from' vs 'id' check
const errorMessage =
"Use 'from' parameter, not 'id', or use 'update_task' tool.";
log.error(errorMessage);
return {
success: false,
error: { code: 'PARAMETER_MISMATCH', message: errorMessage },
fromCache: false
};
}
if (!from) {
log.error('Missing from ID.');
return {
success: false,
error: { code: 'MISSING_FROM_ID', message: 'No from ID specified.' },
fromCache: false
};
}
if (!prompt) {
log.error('Missing prompt.');
return {
success: false,
error: { code: 'MISSING_PROMPT', message: 'No prompt specified.' },
fromCache: false
};
}
let fromId;
try {
fromId = parseInt(from, 10);
if (isNaN(fromId) || fromId <= 0) throw new Error();
} catch {
log.error(`Invalid from ID: ${from}`);
return {
success: false,
error: {
code: 'UPDATE_TASKS_ERROR',
message: error.message || 'Unknown error updating tasks'
code: 'INVALID_FROM_ID',
message: `Invalid from ID: ${from}. Must be a positive integer.`
},
fromCache: false
};
}
const useResearch = research === true;
// --- End Input Validation ---
log.info(`Updating tasks from ID ${fromId}. Research: ${useResearch}`);
enableSilentMode(); // Enable silent mode
try {
// Execute core updateTasks function, passing session context
await updateTasks(
tasksJsonPath,
fromId,
prompt,
useResearch,
// Pass context with logger wrapper and session
{ mcpLog: logWrapper, session },
'json' // Explicitly request JSON format for MCP
);
// Since updateTasks modifies file and doesn't return data, create success message
return {
success: true,
data: {
message: `Successfully initiated update for tasks from ID ${fromId} based on the prompt.`,
fromId,
tasksPath: tasksJsonPath,
useResearch
},
fromCache: false // Modifies state
};
} catch (error) {
log.error(`Error executing core updateTasks: ${error.message}`);
return {
success: false,
error: {
code: 'UPDATE_TASKS_CORE_ERROR',
message: error.message || 'Unknown error updating tasks'
},
fromCache: false
};
} finally {
disableSilentMode(); // Ensure silent mode is disabled
}
}

View File

@@ -11,6 +11,7 @@ import {
} from './utils.js';
import { updateTasksDirect } from '../core/task-master-core.js';
import { findTasksJsonPath } from '../core/utils/path-utils.js';
import path from 'path';
/**
* Register the update tool with the MCP server
@@ -41,26 +42,25 @@ export function registerUpdateTool(server) {
}),
execute: async (args, { log, session }) => {
try {
log.info(`Updating tasks with args: ${JSON.stringify(args)}`);
log.info(`Executing update tool with args: ${JSON.stringify(args)}`);
// Get project root from args or session
const rootFolder =
args.projectRoot || getProjectRootFromSession(session, log);
// Ensure project root was determined
if (!rootFolder) {
// 1. Get Project Root
const rootFolder = args.projectRoot;
if (!rootFolder || !path.isAbsolute(rootFolder)) {
return createErrorResponse(
'Could not determine project root. Please provide it explicitly or ensure your session contains valid root information.'
'projectRoot is required and must be absolute.'
);
}
log.info(`Project root: ${rootFolder}`);
// Resolve the path to tasks.json
// 2. Resolve Path
let tasksJsonPath;
try {
tasksJsonPath = findTasksJsonPath(
{ projectRoot: rootFolder, file: args.file },
log
);
log.info(`Resolved tasks path: ${tasksJsonPath}`);
} catch (error) {
log.error(`Error finding tasks.json: ${error.message}`);
return createErrorResponse(
@@ -68,6 +68,7 @@ export function registerUpdateTool(server) {
);
}
// 3. Call Direct Function
const result = await updateTasksDirect(
{
tasksJsonPath: tasksJsonPath,
@@ -79,20 +80,12 @@ export function registerUpdateTool(server) {
{ session }
);
if (result.success) {
log.info(
`Successfully updated tasks from ID ${args.from}: ${result.data.message}`
);
} else {
log.error(
`Failed to update tasks: ${result.error?.message || 'Unknown error'}`
);
}
// 4. Handle Result
log.info(`updateTasksDirect result: success=${result.success}`);
return handleApiResult(result, log, 'Error updating tasks');
} catch (error) {
log.error(`Error in update tool: ${error.message}`);
return createErrorResponse(error.message);
log.error(`Critical error in update tool execute: ${error.message}`);
return createErrorResponse(`Internal tool error: ${error.message}`);
}
}
});

View File

@@ -211,7 +211,7 @@ function registerCommands(programInstance) {
)
.action(async (options) => {
const tasksPath = options.file;
const fromId = parseInt(options.from, 10);
const fromId = parseInt(options.from, 10); // Validation happens here
const prompt = options.prompt;
const useResearch = options.research || false;
@@ -260,7 +260,14 @@ function registerCommands(programInstance) {
);
}
await updateTasks(tasksPath, fromId, prompt, useResearch);
// Call core updateTasks, passing empty context for CLI
await updateTasks(
tasksPath,
fromId,
prompt,
useResearch,
{} // Pass empty context
);
});
// update-task command

View File

@@ -22,8 +22,6 @@ import removeSubtask from './task-manager/remove-subtask.js';
import updateSubtaskById from './task-manager/update-subtask-by-id.js';
import removeTask from './task-manager/remove-task.js';
import taskExists from './task-manager/task-exists.js';
import generateSubtaskPrompt from './task-manager/generate-subtask-prompt.js';
import getSubtasksFromAI from './task-manager/get-subtasks-from-ai.js';
import isTaskDependentOn from './task-manager/is-task-dependent.js';
// Export task manager functions
@@ -47,7 +45,5 @@ export {
removeTask,
findTaskById,
taskExists,
generateSubtaskPrompt,
getSubtasksFromAI,
isTaskDependentOn
};

View File

@@ -1,51 +0,0 @@
/**
* Generate a prompt for creating subtasks from a task
* @param {Object} task - The task to generate subtasks for
* @param {number} numSubtasks - Number of subtasks to generate
* @param {string} additionalContext - Additional context to include in the prompt
* @param {Object} taskAnalysis - Optional complexity analysis for the task
* @returns {string} - The generated prompt
*/
function generateSubtaskPrompt(
task,
numSubtasks,
additionalContext = '',
taskAnalysis = null
) {
// Build the system prompt
const basePrompt = `You need to break down the following task into ${numSubtasks} specific subtasks that can be implemented one by one.
Task ID: ${task.id}
Title: ${task.title}
Description: ${task.description || 'No description provided'}
Current details: ${task.details || 'No details provided'}
${additionalContext ? `\nAdditional context to consider: ${additionalContext}` : ''}
${taskAnalysis ? `\nComplexity analysis: This task has a complexity score of ${taskAnalysis.complexityScore}/10.` : ''}
${taskAnalysis && taskAnalysis.reasoning ? `\nReasoning for complexity: ${taskAnalysis.reasoning}` : ''}
Subtasks should:
1. Be specific and actionable implementation steps
2. Follow a logical sequence
3. Each handle a distinct part of the parent task
4. Include clear guidance on implementation approach
5. Have appropriate dependency chains between subtasks
6. Collectively cover all aspects of the parent task
Return exactly ${numSubtasks} subtasks with the following JSON structure:
[
{
"id": 1,
"title": "First subtask title",
"description": "Detailed description",
"dependencies": [],
"details": "Implementation details"
},
...more subtasks...
]
Note on dependencies: Subtasks can depend on other subtasks with lower IDs. Use an empty array if there are no dependencies.`;
return basePrompt;
}
export default generateSubtaskPrompt;

View File

@@ -19,7 +19,7 @@ function generateTaskFiles(tasksPath, outputDir, options = {}) {
// Determine if we're in MCP mode by checking for mcpLog
const isMcpMode = !!options?.mcpLog;
log('info', `Reading tasks from ${tasksPath}...`);
log('info', `Preparing to regenerate task files in ${tasksPath}`);
const data = readJSON(tasksPath);
if (!data || !data.tasks) {
@@ -31,13 +31,10 @@ function generateTaskFiles(tasksPath, outputDir, options = {}) {
fs.mkdirSync(outputDir, { recursive: true });
}
log('info', `Found ${data.tasks.length} tasks to generate files for.`);
log('info', `Found ${data.tasks.length} tasks to regenerate`);
// Validate and fix dependencies before generating files
log(
'info',
`Validating and fixing dependencies before generating files...`
);
log('info', `Validating and fixing dependencies`);
validateAndFixDependencies(data, tasksPath);
// Generate task files
@@ -120,7 +117,7 @@ function generateTaskFiles(tasksPath, outputDir, options = {}) {
// Write the file
fs.writeFileSync(taskPath, content);
log('info', `Generated: task_${task.id.toString().padStart(3, '0')}.txt`);
// log('info', `Generated: task_${task.id.toString().padStart(3, '0')}.txt`); // Pollutes the CLI output
});
log(

View File

@@ -1,139 +0,0 @@
import { log, isSilentMode } from '../utils.js';
import {
_handleAnthropicStream,
getConfiguredAnthropicClient,
parseSubtasksFromText
} from '../ai-services.js';
// Import necessary config getters
import {
getMainModelId,
getMainMaxTokens,
getMainTemperature,
getResearchModelId,
getResearchMaxTokens,
getResearchTemperature
} from '../config-manager.js';
/**
* Call AI to generate subtasks based on a prompt
* @param {string} prompt - The prompt to send to the AI
* @param {boolean} useResearch - Whether to use Perplexity for research
* @param {Object} session - Session object from MCP
* @param {Object} mcpLog - MCP logger object
* @returns {Object} - Object containing generated subtasks
*/
async function getSubtasksFromAI(
prompt,
useResearch = false,
session = null,
mcpLog = null
) {
try {
// Get the configured client
const client = getConfiguredAnthropicClient(session);
// Prepare API parameters
const apiParams = {
model: getMainModelId(session),
max_tokens: getMainMaxTokens(session),
temperature: getMainTemperature(session),
system:
'You are an AI assistant helping with task breakdown for software development.',
messages: [{ role: 'user', content: prompt }]
};
if (mcpLog) {
mcpLog.info('Calling AI to generate subtasks');
}
let responseText;
// Call the AI - with research if requested
if (useResearch && perplexity) {
if (mcpLog) {
mcpLog.info('Using Perplexity AI for research-backed subtasks');
}
const perplexityModel = getResearchModelId(session);
const result = await perplexity.chat.completions.create({
model: perplexityModel,
messages: [
{
role: 'system',
content:
'You are an AI assistant helping with task breakdown for software development. Research implementation details and provide comprehensive subtasks.'
},
{ role: 'user', content: prompt }
],
temperature: getResearchTemperature(session),
max_tokens: getResearchMaxTokens(session)
});
responseText = result.choices[0].message.content;
} else {
// Use regular Claude
if (mcpLog) {
mcpLog.info('Using Claude for generating subtasks');
}
// Call the streaming API
responseText = await _handleAnthropicStream(
client,
apiParams,
{ mcpLog, silentMode: isSilentMode() },
!isSilentMode()
);
}
// Ensure we have a valid response
if (!responseText) {
throw new Error('Empty response from AI');
}
// Try to parse the subtasks
try {
const parsedSubtasks = parseSubtasksFromText(responseText);
if (
!parsedSubtasks ||
!Array.isArray(parsedSubtasks) ||
parsedSubtasks.length === 0
) {
throw new Error(
'Failed to parse valid subtasks array from AI response'
);
}
return { subtasks: parsedSubtasks };
} catch (parseError) {
if (mcpLog) {
mcpLog.error(`Error parsing subtasks: ${parseError.message}`);
mcpLog.error(`Response start: ${responseText.substring(0, 200)}...`);
} else {
log('error', `Error parsing subtasks: ${parseError.message}`);
}
// Return error information instead of fallback subtasks
return {
error: parseError.message,
taskId: null, // This will be filled in by the calling function
suggestion:
'Use \'task-master update-task --id=<id> --prompt="Generate subtasks for this task"\' to manually create subtasks.'
};
}
} catch (error) {
if (mcpLog) {
mcpLog.error(`Error generating subtasks: ${error.message}`);
} else {
log('error', `Error generating subtasks: ${error.message}`);
}
// Return error information instead of fallback subtasks
return {
error: error.message,
taskId: null, // This will be filled in by the calling function
suggestion:
'Use \'task-master update-task --id=<id> --prompt="Generate subtasks for this task"\' to manually create subtasks.'
};
}
}
export default getSubtasksFromAI;

View File

@@ -2,8 +2,15 @@ import path from 'path';
import chalk from 'chalk';
import boxen from 'boxen';
import Table from 'cli-table3';
import { z } from 'zod'; // Keep Zod for post-parsing validation
import { log, readJSON, writeJSON, truncate, isSilentMode } from '../utils.js';
import {
log as consoleLog,
readJSON,
writeJSON,
truncate,
isSilentMode
} from '../utils.js';
import {
getStatusWithColor,
@@ -21,68 +28,195 @@ import {
getMainTemperature
} from '../config-manager.js';
import generateTaskFiles from './generate-task-files.js';
import { generateTextService } from '../ai-services-unified.js';
// Zod schema for validating the structure of tasks AFTER parsing
const updatedTaskSchema = z
.object({
id: z.number().int(),
title: z.string(),
description: z.string(),
status: z.string(),
dependencies: z.array(z.union([z.number().int(), z.string()])),
priority: z.string().optional(),
details: z.string().optional(),
testStrategy: z.string().optional(),
subtasks: z.array(z.any()).optional() // Keep subtasks flexible for now
})
.strip(); // Allow potential extra fields during parsing if needed, then validate structure
const updatedTaskArraySchema = z.array(updatedTaskSchema);
/**
* Update tasks based on new context
* Parses an array of task objects from AI's text response.
* @param {string} text - Response text from AI.
* @param {number} expectedCount - Expected number of tasks.
* @param {Function | Object} logFn - The logging function (consoleLog) or MCP log object.
* @param {boolean} isMCP - Flag indicating if logFn is MCP logger.
* @returns {Array} Parsed and validated tasks array.
* @throws {Error} If parsing or validation fails.
*/
function parseUpdatedTasksFromText(text, expectedCount, logFn, isMCP) {
// Helper for consistent logging inside parser
const report = (level, ...args) => {
if (isMCP) {
if (typeof logFn[level] === 'function') logFn[level](...args);
else logFn.info(...args);
} else if (!isSilentMode()) {
// Check silent mode for consoleLog
consoleLog(level, ...args);
}
};
report(
'info',
'Attempting to parse updated tasks array from text response...'
);
if (!text || text.trim() === '')
throw new Error('AI response text is empty.');
let cleanedResponse = text.trim();
const originalResponseForDebug = cleanedResponse;
// Extract from Markdown code block first
const codeBlockMatch = cleanedResponse.match(
/```(?:json)?\s*([\s\S]*?)\s*```/
);
if (codeBlockMatch) {
cleanedResponse = codeBlockMatch[1].trim();
report('info', 'Extracted JSON content from Markdown code block.');
} else {
// If no code block, find first '[' and last ']' for the array
const firstBracket = cleanedResponse.indexOf('[');
const lastBracket = cleanedResponse.lastIndexOf(']');
if (firstBracket !== -1 && lastBracket > firstBracket) {
cleanedResponse = cleanedResponse.substring(
firstBracket,
lastBracket + 1
);
report('info', 'Extracted content between first [ and last ].');
} else {
report(
'warn',
'Response does not appear to contain a JSON array structure. Parsing raw response.'
);
}
}
// Attempt to parse the array
let parsedTasks;
try {
parsedTasks = JSON.parse(cleanedResponse);
} catch (parseError) {
report('error', `Failed to parse JSON array: ${parseError.message}`);
report(
'error',
`Problematic JSON string (first 500 chars): ${cleanedResponse.substring(0, 500)}`
);
report(
'error',
`Original Raw Response (first 500 chars): ${originalResponseForDebug.substring(0, 500)}`
);
throw new Error(
`Failed to parse JSON response array: ${parseError.message}`
);
}
// Validate Array structure
if (!Array.isArray(parsedTasks)) {
report(
'error',
`Parsed content is not an array. Type: ${typeof parsedTasks}`
);
report(
'error',
`Parsed content sample: ${JSON.stringify(parsedTasks).substring(0, 200)}`
);
throw new Error('Parsed AI response is not a valid JSON array.');
}
report('info', `Successfully parsed ${parsedTasks.length} potential tasks.`);
if (expectedCount && parsedTasks.length !== expectedCount) {
report(
'warn',
`Expected ${expectedCount} tasks, but parsed ${parsedTasks.length}.`
);
}
// Validate each task object using Zod
const validationResult = updatedTaskArraySchema.safeParse(parsedTasks);
if (!validationResult.success) {
report('error', 'Parsed task array failed Zod validation.');
validationResult.error.errors.forEach((err) => {
report('error', ` - Path '${err.path.join('.')}': ${err.message}`);
});
throw new Error(
`AI response failed task structure validation: ${validationResult.error.message}`
);
}
report('info', 'Successfully validated task structure.');
// Return the validated data, potentially filtering/adjusting length if needed
return validationResult.data.slice(
0,
expectedCount || validationResult.data.length
);
}
/**
* Update tasks based on new context using the unified AI service.
* @param {string} tasksPath - Path to the tasks.json file
* @param {number} fromId - Task ID to start updating from
* @param {string} prompt - Prompt with new context
* @param {boolean} useResearch - Whether to use Perplexity AI for research
* @param {function} reportProgress - Function to report progress to MCP server (optional)
* @param {Object} mcpLog - MCP logger object (optional)
* @param {Object} session - Session object from MCP server (optional)
* @param {boolean} [useResearch=false] - Whether to use the research AI role.
* @param {Object} context - Context object containing session and mcpLog.
* @param {Object} [context.session] - Session object from MCP server.
* @param {Object} [context.mcpLog] - MCP logger object.
* @param {string} [outputFormat='text'] - Output format ('text' or 'json').
*/
async function updateTasks(
tasksPath,
fromId,
prompt,
useResearch = false,
{ reportProgress, mcpLog, session } = {}
context = {},
outputFormat = 'text' // Default to text for CLI
) {
// Determine output format based on mcpLog presence (simplification)
const outputFormat = mcpLog ? 'json' : 'text';
const { session, mcpLog } = context;
// Use mcpLog if available, otherwise use the imported consoleLog function
const logFn = mcpLog || consoleLog;
// Flag to easily check which logger type we have
const isMCP = !!mcpLog;
// Create custom reporter that checks for MCP log and silent mode
const report = (message, level = 'info') => {
if (mcpLog) {
mcpLog[level](message);
} else if (!isSilentMode() && outputFormat === 'text') {
// Only log to console if not in silent mode and outputFormat is 'text'
log(level, message);
}
};
if (isMCP)
logFn.info(`updateTasks called with context: session=${!!session}`);
else logFn('info', `updateTasks called`); // CLI log
try {
report(`Updating tasks from ID ${fromId} with prompt: "${prompt}"`);
if (isMCP) logFn.info(`Updating tasks from ID ${fromId}`);
else
logFn(
'info',
`Updating tasks from ID ${fromId} with prompt: "${prompt}"`
);
// Read the tasks file
// --- Task Loading/Filtering (Unchanged) ---
const data = readJSON(tasksPath);
if (!data || !data.tasks) {
if (!data || !data.tasks)
throw new Error(`No valid tasks found in ${tasksPath}`);
}
// Find tasks to update (ID >= fromId and not 'done')
const tasksToUpdate = data.tasks.filter(
(task) => task.id >= fromId && task.status !== 'done'
);
if (tasksToUpdate.length === 0) {
report(
`No tasks to update (all tasks with ID >= ${fromId} are already marked as done)`,
'info'
);
// Only show UI elements for text output (CLI)
if (outputFormat === 'text') {
console.log(
chalk.yellow(
`No tasks to update (all tasks with ID >= ${fromId} are already marked as done)`
)
);
}
return;
if (isMCP)
logFn.info(`No tasks to update (ID >= ${fromId} and not 'done').`);
else
logFn('info', `No tasks to update (ID >= ${fromId} and not 'done').`);
if (outputFormat === 'text') console.log(/* yellow message */);
return; // Nothing to do
}
// --- End Task Loading/Filtering ---
// Only show UI elements for text output (CLI)
// --- Display Tasks to Update (CLI Only - Unchanged) ---
if (outputFormat === 'text') {
// Show the tasks that will be updated
const table = new Table({
@@ -139,8 +273,10 @@ async function updateTasks(
)
);
}
// --- End Display Tasks ---
// Build the system prompt
// --- Build Prompts (Unchanged Core Logic) ---
// Keep the original system prompt logic
const systemPrompt = `You are an AI assistant helping to update software development tasks based on new context.
You will be given a set of tasks and a prompt describing changes or new implementation details.
Your job is to update the tasks to reflect these changes, while preserving their basic structure.
@@ -159,331 +295,158 @@ Guidelines:
The changes described in the prompt should be applied to ALL tasks in the list.`;
const taskData = JSON.stringify(tasksToUpdate, null, 2);
// Keep the original user prompt logic
const taskDataString = JSON.stringify(tasksToUpdate, null, 2);
const userPrompt = `Here are the tasks to update:\n${taskDataString}\n\nPlease update these tasks based on the following new context:\n${prompt}\n\nIMPORTANT: In the tasks JSON above, any subtasks with "status": "done" or "status": "completed" should be preserved exactly as is. Build your changes around these completed items.\n\nReturn only the updated tasks as a valid JSON array.`;
// --- End Build Prompts ---
// Initialize variables for model selection and fallback
let updatedTasks;
let loadingIndicator = null;
let claudeOverloaded = false;
let modelAttempts = 0;
const maxModelAttempts = 2; // Try up to 2 models before giving up
// Only create loading indicator for text output (CLI) initially
if (outputFormat === 'text') {
loadingIndicator = startLoadingIndicator(
useResearch
? 'Updating tasks with Perplexity AI research...'
: 'Updating tasks with Claude AI...'
'Calling AI service to update tasks...'
);
}
let responseText = '';
let updatedTasks;
try {
// Import the getAvailableAIModel function
const { getAvailableAIModel } = await import('./ai-services.js');
// --- Call Unified AI Service ---
const role = useResearch ? 'research' : 'main';
if (isMCP) logFn.info(`Using AI service with role: ${role}`);
else logFn('info', `Using AI service with role: ${role}`);
// Try different models with fallback
while (modelAttempts < maxModelAttempts && !updatedTasks) {
modelAttempts++;
const isLastAttempt = modelAttempts >= maxModelAttempts;
let modelType = null;
try {
// Get the appropriate model based on current state
const result = getAvailableAIModel({
claudeOverloaded,
requiresResearch: useResearch
});
modelType = result.type;
const client = result.client;
report(
`Attempt ${modelAttempts}/${maxModelAttempts}: Updating tasks using ${modelType}`,
'info'
);
// Update loading indicator - only for text output
if (outputFormat === 'text') {
if (loadingIndicator) {
stopLoadingIndicator(loadingIndicator);
}
loadingIndicator = startLoadingIndicator(
`Attempt ${modelAttempts}: Using ${modelType.toUpperCase()}...`
);
}
if (modelType === 'perplexity') {
// Call Perplexity AI using proper format and getters
const result = await client.chat.completions.create({
model: getResearchModelId(session),
messages: [
{
role: 'system',
content: `${systemPrompt}\n\nAdditionally, please research the latest best practices, implementation details, and considerations when updating these tasks. Use your online search capabilities to gather relevant information. Remember to strictly follow the guidelines about preserving completed subtasks and building upon what has already been done rather than modifying or replacing it.`
},
{
role: 'user',
content: `Here are the tasks to update:\n${taskData}\n\nPlease update these tasks based on the following new context:\n${prompt}\n\nIMPORTANT: In the tasks JSON above, any subtasks with "status": "done" or "status": "completed" should be preserved exactly as is. Build your changes around these completed items.\n\nReturn only the updated tasks as a valid JSON array.`
}
],
temperature: getResearchTemperature(session),
max_tokens: getResearchMaxTokens(session)
});
const responseText = result.choices[0].message.content;
// Extract JSON from response
const jsonStart = responseText.indexOf('[');
const jsonEnd = responseText.lastIndexOf(']');
if (jsonStart === -1 || jsonEnd === -1) {
throw new Error(
`Could not find valid JSON array in ${modelType}'s response`
);
}
const jsonText = responseText.substring(jsonStart, jsonEnd + 1);
updatedTasks = JSON.parse(jsonText);
} else {
// Call Claude to update the tasks with streaming
let responseText = '';
let streamingInterval = null;
try {
// Update loading indicator to show streaming progress - only for text output
if (outputFormat === 'text') {
let dotCount = 0;
const readline = await import('readline');
streamingInterval = setInterval(() => {
readline.cursorTo(process.stdout, 0);
process.stdout.write(
`Receiving streaming response from Claude${'.'.repeat(dotCount)}`
);
dotCount = (dotCount + 1) % 4;
}, 500);
}
// Use streaming API call with getters
const stream = await client.messages.create({
model: getMainModelId(session),
max_tokens: getMainMaxTokens(session),
temperature: getMainTemperature(session),
system: systemPrompt,
messages: [
{
role: 'user',
content: `Here is the task to update:
${taskData}
Please update this task based on the following new context:
${prompt}
IMPORTANT: In the task JSON above, any subtasks with "status": "done" or "status": "completed" should be preserved exactly as is. Build your changes around these completed items.
Return only the updated task as a valid JSON object.`
}
],
stream: true
});
// Process the stream
for await (const chunk of stream) {
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
responseText += chunk.delta.text;
}
if (reportProgress) {
await reportProgress({
progress:
(responseText.length / getMainMaxTokens(session)) * 100
});
}
if (mcpLog) {
mcpLog.info(
`Progress: ${(responseText.length / getMainMaxTokens(session)) * 100}%`
);
}
}
if (streamingInterval) clearInterval(streamingInterval);
report(
`Completed streaming response from ${modelType} API (Attempt ${modelAttempts})`,
'info'
);
// Extract JSON from response
const jsonStart = responseText.indexOf('[');
const jsonEnd = responseText.lastIndexOf(']');
if (jsonStart === -1 || jsonEnd === -1) {
throw new Error(
`Could not find valid JSON array in ${modelType}'s response`
);
}
const jsonText = responseText.substring(jsonStart, jsonEnd + 1);
updatedTasks = JSON.parse(jsonText);
} catch (streamError) {
if (streamingInterval) clearInterval(streamingInterval);
// Process stream errors explicitly
report(`Stream error: ${streamError.message}`, 'error');
// Check if this is an overload error
let isOverload = false;
// Check 1: SDK specific property
if (streamError.type === 'overloaded_error') {
isOverload = true;
}
// Check 2: Check nested error property
else if (streamError.error?.type === 'overloaded_error') {
isOverload = true;
}
// Check 3: Check status code
else if (
streamError.status === 429 ||
streamError.status === 529
) {
isOverload = true;
}
// Check 4: Check message string
else if (
streamError.message?.toLowerCase().includes('overloaded')
) {
isOverload = true;
}
if (isOverload) {
claudeOverloaded = true;
report(
'Claude overloaded. Will attempt fallback model if available.',
'warn'
);
// Let the loop continue to try the next model
throw new Error('Claude overloaded');
} else {
// Re-throw non-overload errors
throw streamError;
}
}
}
// If we got here successfully, break out of the loop
if (updatedTasks) {
report(
`Successfully updated tasks using ${modelType} on attempt ${modelAttempts}`,
'success'
);
break;
}
} catch (modelError) {
const failedModel = modelType || 'unknown model';
report(
`Attempt ${modelAttempts} failed using ${failedModel}: ${modelError.message}`,
'warn'
);
// Continue to next attempt if we have more attempts and this was an overload error
const wasOverload = modelError.message
?.toLowerCase()
.includes('overload');
if (wasOverload && !isLastAttempt) {
if (modelType === 'claude') {
claudeOverloaded = true;
report('Will attempt with Perplexity AI next', 'info');
}
continue; // Continue to next attempt
} else if (isLastAttempt) {
report(
`Final attempt (${modelAttempts}/${maxModelAttempts}) failed. No fallback possible.`,
'error'
);
throw modelError; // Re-throw on last attempt
} else {
throw modelError; // Re-throw for non-overload errors
}
}
}
// If we don't have updated tasks after all attempts, throw an error
if (!updatedTasks) {
throw new Error(
'Failed to generate updated tasks after all model attempts'
);
}
// Replace the tasks in the original data
updatedTasks.forEach((updatedTask) => {
const index = data.tasks.findIndex((t) => t.id === updatedTask.id);
if (index !== -1) {
data.tasks[index] = updatedTask;
}
responseText = await generateTextService({
prompt: userPrompt,
systemPrompt: systemPrompt,
role,
session
});
// Write the updated tasks to the file
writeJSON(tasksPath, data);
report(`Successfully updated ${updatedTasks.length} tasks`, 'success');
// Generate individual task files
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
// Only show success box for text output (CLI)
if (outputFormat === 'text') {
console.log(
boxen(
chalk.green(`Successfully updated ${updatedTasks.length} tasks`),
{ padding: 1, borderColor: 'green', borderStyle: 'round' }
)
);
if (isMCP) logFn.info('Successfully received text response');
else
logFn('success', 'Successfully received text response via AI service');
// --- End AI Service Call ---
} catch (error) {
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
if (isMCP) logFn.error(`Error during AI service call: ${error.message}`);
else logFn('error', `Error during AI service call: ${error.message}`);
if (error.message.includes('API key')) {
if (isMCP)
logFn.error(
'Please ensure API keys are configured correctly in .env or mcp.json.'
);
else
logFn(
'error',
'Please ensure API keys are configured correctly in .env or mcp.json.'
);
}
throw error; // Re-throw error
} finally {
// Stop the loading indicator if it was created
if (loadingIndicator) {
stopLoadingIndicator(loadingIndicator);
loadingIndicator = null;
}
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
}
} catch (error) {
report(`Error updating tasks: ${error.message}`, 'error');
// Only show error box for text output (CLI)
// --- Parse and Validate Response ---
try {
updatedTasks = parseUpdatedTasksFromText(
responseText,
tasksToUpdate.length,
logFn,
isMCP
);
} catch (parseError) {
if (isMCP)
logFn.error(
`Failed to parse updated tasks from AI response: ${parseError.message}`
);
else
logFn(
'error',
`Failed to parse updated tasks from AI response: ${parseError.message}`
);
if (getDebugFlag(session)) {
if (isMCP) logFn.error(`Raw AI Response:\n${responseText}`);
else logFn('error', `Raw AI Response:\n${responseText}`);
}
throw new Error(
`Failed to parse valid updated tasks from AI response: ${parseError.message}`
);
}
// --- End Parse/Validate ---
// --- Update Tasks Data (Unchanged) ---
if (!Array.isArray(updatedTasks)) {
// Should be caught by parser, but extra check
throw new Error('Parsed AI response for updated tasks was not an array.');
}
if (isMCP)
logFn.info(`Received ${updatedTasks.length} updated tasks from AI.`);
else
logFn('info', `Received ${updatedTasks.length} updated tasks from AI.`);
// Create a map for efficient lookup
const updatedTasksMap = new Map(
updatedTasks.map((task) => [task.id, task])
);
// Iterate through the original data and update based on the map
let actualUpdateCount = 0;
data.tasks.forEach((task, index) => {
if (updatedTasksMap.has(task.id)) {
// Only update if the task was part of the set sent to AI
data.tasks[index] = updatedTasksMap.get(task.id);
actualUpdateCount++;
}
});
if (isMCP)
logFn.info(
`Applied updates to ${actualUpdateCount} tasks in the dataset.`
);
else
logFn(
'info',
`Applied updates to ${actualUpdateCount} tasks in the dataset.`
);
// --- End Update Tasks Data ---
// --- Write File and Generate (Unchanged) ---
writeJSON(tasksPath, data);
if (isMCP)
logFn.info(
`Successfully updated ${actualUpdateCount} tasks in ${tasksPath}`
);
else
logFn(
'success',
`Successfully updated ${actualUpdateCount} tasks in ${tasksPath}`
);
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
// --- End Write File ---
// --- Final CLI Output (Unchanged) ---
if (outputFormat === 'text') {
console.log(
boxen(chalk.green(`Successfully updated ${actualUpdateCount} tasks`), {
padding: 1,
borderColor: 'green',
borderStyle: 'round'
})
);
}
// --- End Final CLI Output ---
} catch (error) {
// --- General Error Handling (Unchanged) ---
if (isMCP) logFn.error(`Error updating tasks: ${error.message}`);
else logFn('error', `Error updating tasks: ${error.message}`);
if (outputFormat === 'text') {
console.error(chalk.red(`Error: ${error.message}`));
// Provide helpful error messages based on error type
if (error.message?.includes('ANTHROPIC_API_KEY')) {
console.log(
chalk.yellow('\nTo fix this issue, set your Anthropic API key:')
);
console.log(' export ANTHROPIC_API_KEY=your_api_key_here');
} else if (error.message?.includes('PERPLEXITY_API_KEY') && useResearch) {
console.log(chalk.yellow('\nTo fix this issue:'));
console.log(
' 1. Set your Perplexity API key: export PERPLEXITY_API_KEY=your_api_key_here'
);
console.log(
' 2. Or run without the research flag: task-master update --from=<id> --prompt="..."'
);
} else if (error.message?.includes('overloaded')) {
console.log(
chalk.yellow(
'\nAI model overloaded, and fallback failed or was unavailable:'
)
);
console.log(' 1. Try again in a few minutes.');
console.log(' 2. Ensure PERPLEXITY_API_KEY is set for fallback.');
}
if (getDebugFlag()) {
// Use getter
if (getDebugFlag(session)) {
console.error(error);
}
process.exit(1);
} else {
throw error; // Re-throw for JSON output
throw error; // Re-throw for MCP/programmatic callers
}
// --- End General Error Handling ---
}
}

View File

@@ -1957,7 +1957,7 @@ for (const task of pendingTasks) {
- Add or update JSDoc comments to clarify that this module is now a pure orchestrator and does not perform AI or config operations directly.
</info added on 2025-04-24T17:48:09.354Z>
## 39. Refactor get-subtasks-from-ai.js for Unified AI Service & Config [pending]
## 39. Refactor get-subtasks-from-ai.js for Unified AI Service & Config [done]
### Dependencies: None
### Description: Replace direct AI calls (old `ai-services.js` helpers) with `generateObjectService` or `generateTextService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters; use unified service instead.
### Details:
@@ -2043,7 +2043,7 @@ for (const task of pendingTasks) {
These enhancements ensure the refactored file is robust, maintainable, and aligned with the unified AI service architecture, leveraging Zod for strict runtime validation and clear error boundaries[5][1][3].
</info added on 2025-04-24T17:48:35.005Z>
## 40. Refactor update-task-by-id.js for Unified AI Service & Config [pending]
## 40. Refactor update-task-by-id.js for Unified AI Service & Config [done]
### Dependencies: None
### Description: Replace direct AI calls (old `ai-services.js` helpers) with `generateObjectService` or `generateTextService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters and fallback logic; use unified service instead. Keep `getDebugFlag`.
### Details:
@@ -2063,7 +2063,7 @@ These enhancements ensure the refactored file is robust, maintainable, and align
- If you need to validate or transform nested objects (such as task metadata or options), use Zod's object and nested schema capabilities to define these structures precisely, catching errors early and simplifying downstream logic[3][5].
</info added on 2025-04-24T17:48:58.133Z>
## 41. Refactor update-tasks.js for Unified AI Service & Config [pending]
## 41. Refactor update-tasks.js for Unified AI Service & Config [done]
### Dependencies: None
### Description: Replace direct AI calls (old `ai-services.js` helpers) with `generateObjectService` or `generateTextService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters and fallback logic; use unified service instead. Keep `getDebugFlag`.
### Details:
@@ -2149,3 +2149,15 @@ These enhancements ensure the refactored file is robust, maintainable, and align
These enhancements ensure robust validation, unified service usage, and maintainable, predictable batch update behavior.
</info added on 2025-04-24T17:49:25.126Z>
## 42. Remove all unused imports [pending]
### Dependencies: None
### Description:
### Details:
## 43. Remove all unnecessary console logs [pending]
### Dependencies: None
### Description:
### Details:

View File

@@ -3,7 +3,7 @@
# Status: pending
# Dependencies: None
# Priority: medium
# Description: Implement full support for pnpm as an alternative package manager in the Taskmaster application, allowing users to install and manage the package using pnpm alongside the existing npm and yarn options.
# Description: Implement full support for pnpm as an alternative package manager in the Taskmaster application, ensuring users have the exact same experience as with npm when installing and managing the package. The installation process, including any CLI prompts or web interfaces, must serve the exact same content and user experience regardless of whether npm or pnpm is used. The project uses 'module' as the package type, defines binaries 'task-master' and 'task-master-mcp', and its core logic resides in 'scripts/modules/'. The 'init' command (via scripts/init.js) creates the directory structure (.cursor/rules, scripts, tasks), copies templates (.env.example, .gitignore, rule files, dev.js), manages package.json merging, and sets up MCP config (.cursor/mcp.json). All dependencies are standard npm dependencies listed in package.json, and manual modifications are being removed.
# Details:
This task involves:
@@ -13,6 +13,7 @@ This task involves:
- Review and modify package.json scripts if necessary
- Test script execution with pnpm syntax (`pnpm run <script>`)
- Address any pnpm-specific path or execution differences
- Confirm that scripts responsible for showing a website or prompt during install behave identically with pnpm and npm
3. Create a pnpm-lock.yaml file by installing dependencies with pnpm.
@@ -20,32 +21,43 @@ This task involves:
- Global installation (`pnpm add -g taskmaster`)
- Local project installation
- Verify CLI commands work correctly when installed with pnpm
- Verify binaries `task-master` and `task-master-mcp` are properly linked
- Ensure the `init` command (scripts/init.js) correctly creates directory structure and copies templates as described
5. Update CI/CD pipelines to include testing with pnpm:
- Add a pnpm test matrix to GitHub Actions workflows
- Ensure tests pass when dependencies are installed with pnpm
6. Handle any pnpm-specific dependency resolution issues:
- Address potential hoisting differences between npm/yarn and pnpm
- Address potential hoisting differences between npm and pnpm
- Test with pnpm's strict mode to ensure compatibility
- Verify proper handling of 'module' package type
7. Document any pnpm-specific considerations or commands in the README and documentation.
8. Consider adding a pnpm-specific installation script or helper if needed.
8. Verify that the `scripts/init.js` file works correctly with pnpm:
- Ensure it properly creates `.cursor/rules`, `scripts`, and `tasks` directories
- Verify template copying (`.env.example`, `.gitignore`, rule files, `dev.js`)
- Confirm `package.json` merging works correctly
- Test MCP config setup (`.cursor/mcp.json`)
This implementation should maintain full feature parity regardless of which package manager is used to install Taskmaster.
9. Ensure core logic in `scripts/modules/` works correctly when installed via pnpm.
This implementation should maintain full feature parity and identical user experience regardless of which package manager is used to install Taskmaster.
# Test Strategy:
1. Manual Testing:
- Install Taskmaster globally using pnpm: `pnpm add -g taskmaster`
- Install Taskmaster locally in a test project: `pnpm add taskmaster`
- Verify all CLI commands function correctly with both installation methods
- Test all major features to ensure they work identically to npm/yarn installations
- Test all major features to ensure they work identically to npm installations
- Verify binaries `task-master` and `task-master-mcp` are properly linked and executable
- Test the `init` command to ensure it correctly sets up the directory structure and files as defined in scripts/init.js
2. Automated Testing:
- Create a dedicated test workflow in GitHub Actions that uses pnpm
- Run the full test suite using pnpm to install dependencies
- Verify all tests pass with the same results as npm/yarn
- Verify all tests pass with the same results as npm
3. Documentation Testing:
- Review all documentation to ensure pnpm commands are correctly documented
@@ -56,46 +68,71 @@ This implementation should maintain full feature parity regardless of which pack
- Test on different operating systems (Windows, macOS, Linux)
- Verify compatibility with different pnpm versions (latest stable and LTS)
- Test in environments with multiple package managers installed
- Verify proper handling of 'module' package type
5. Edge Case Testing:
- Test installation in a project that uses pnpm workspaces
- Verify behavior when upgrading from an npm/yarn installation to pnpm
- Verify behavior when upgrading from an npm installation to pnpm
- Test with pnpm's various flags and modes (--frozen-lockfile, --strict-peer-dependencies)
6. Performance Comparison:
- Measure and document any performance differences between package managers
- Compare installation times and disk space usage
Success criteria: Taskmaster should install and function identically regardless of whether it was installed via npm, yarn, or pnpm, with no degradation in functionality or performance.
7. Structure Testing:
- Verify that the core logic in `scripts/modules/` is accessible and functions correctly
- Confirm that the `init` command properly creates all required directories and files as per scripts/init.js
- Test package.json merging functionality
- Verify MCP config setup
Success criteria: Taskmaster should install and function identically regardless of whether it was installed via npm or pnpm, with no degradation in functionality, performance, or user experience. All binaries should be properly linked, and the directory structure should be correctly created.
# Subtasks:
## 1. Update Documentation for pnpm Support [pending]
### Dependencies: None
### Description: Revise installation and usage documentation to include pnpm commands and instructions for installing and managing Taskmaster with pnpm.
### Description: Revise installation and usage documentation to include pnpm commands and instructions for installing and managing Taskmaster with pnpm. Clearly state that the installation process, including any website or UI shown, is identical to npm. Ensure documentation reflects the use of 'module' package type, binaries, and the init process as defined in scripts/init.js.
### Details:
Add pnpm installation commands (e.g., `pnpm add taskmaster`) and update all relevant sections in the README and official docs to reflect pnpm as a supported package manager.
Add pnpm installation commands (e.g., `pnpm add taskmaster`) and update all relevant sections in the README and official docs to reflect pnpm as a supported package manager. Document that any installation website or prompt is the same as with npm. Include notes on the 'module' package type, binaries, and the directory/template setup performed by scripts/init.js.
## 2. Ensure Package Scripts Compatibility with pnpm [pending]
### Dependencies: 63.1
### Description: Review and update package.json scripts to ensure they work seamlessly with pnpm's execution model.
### Description: Review and update package.json scripts to ensure they work seamlessly with pnpm's execution model. Confirm that any scripts responsible for showing a website or prompt during install behave identically with pnpm and npm. Ensure compatibility with 'module' package type and correct binary definitions.
### Details:
Test all scripts using `pnpm run <script>`, address any pnpm-specific path or execution differences, and modify scripts as needed for compatibility.
Test all scripts using `pnpm run <script>`, address any pnpm-specific path or execution differences, and modify scripts as needed for compatibility. Pay special attention to any scripts that trigger a website or prompt during installation, ensuring they serve the same content as npm. Validate that scripts/init.js and binaries are referenced correctly for ESM ('module') projects.
## 3. Generate and Validate pnpm Lockfile [pending]
### Dependencies: 63.2
### Description: Install dependencies using pnpm to create a pnpm-lock.yaml file and ensure it accurately reflects the project's dependency tree.
### Description: Install dependencies using pnpm to create a pnpm-lock.yaml file and ensure it accurately reflects the project's dependency tree, considering the 'module' package type.
### Details:
Run `pnpm install` to generate the lockfile, check it into version control, and verify that dependency resolution is correct and consistent.
Run `pnpm install` to generate the lockfile, check it into version control, and verify that dependency resolution is correct and consistent. Ensure that all dependencies listed in package.json are resolved as expected for an ESM project.
## 4. Test Taskmaster Installation and Operation with pnpm [pending]
### Dependencies: 63.3
### Description: Thoroughly test Taskmaster's installation and CLI operation when installed via pnpm, both globally and locally.
### Description: Thoroughly test Taskmaster's installation and CLI operation when installed via pnpm, both globally and locally. Confirm that any website or UI shown during installation is identical to npm. Validate that binaries and the init process (scripts/init.js) work as expected.
### Details:
Perform global (`pnpm add -g taskmaster`) and local installations, verify CLI commands, and check for any pnpm-specific issues or incompatibilities.
Perform global (`pnpm add -g taskmaster`) and local installations, verify CLI commands, and check for any pnpm-specific issues or incompatibilities. Ensure any installation UIs or websites appear identical to npm installations, including any website or prompt shown during install. Test that binaries 'task-master' and 'task-master-mcp' are linked and that scripts/init.js creates the correct structure and templates.
## 5. Integrate pnpm into CI/CD Pipeline [pending]
### Dependencies: 63.4
### Description: Update CI/CD workflows to include pnpm in the test matrix, ensuring all tests pass when dependencies are installed with pnpm.
### Description: Update CI/CD workflows to include pnpm in the test matrix, ensuring all tests pass when dependencies are installed with pnpm. Confirm that tests cover the 'module' package type, binaries, and init process.
### Details:
Modify GitHub Actions or other CI configurations to use pnpm/action-setup, run tests with pnpm, and cache pnpm dependencies for efficiency.
Modify GitHub Actions or other CI configurations to use pnpm/action-setup, run tests with pnpm, and cache pnpm dependencies for efficiency. Ensure that CI covers CLI commands, binary linking, and the directory/template setup performed by scripts/init.js.
## 6. Verify Installation UI/Website Consistency [pending]
### Dependencies: 63.4
### Description: Ensure any installation UIs, websites, or interactive prompts—including any website or prompt shown during install—appear and function identically when installing with pnpm compared to npm. Confirm that the experience is consistent for the 'module' package type and the init process.
### Details:
Identify all user-facing elements during the installation process, including any website or prompt shown during install, and verify they are consistent across package managers. If a website is shown during installation, ensure it appears the same regardless of package manager used. Validate that any prompts or UIs triggered by scripts/init.js are identical.
## 7. Test init.js Script with pnpm [pending]
### Dependencies: 63.4
### Description: Verify that the scripts/init.js file works correctly when Taskmaster is installed via pnpm, creating the proper directory structure and copying all required templates as defined in the project structure.
### Details:
Test the init command to ensure it properly creates .cursor/rules, scripts, and tasks directories, copies templates (.env.example, .gitignore, rule files, dev.js), handles package.json merging, and sets up MCP config (.cursor/mcp.json) as per scripts/init.js.
## 8. Verify Binary Links with pnpm [pending]
### Dependencies: 63.4
### Description: Ensure that the task-master and task-master-mcp binaries are properly defined in package.json, linked, and executable when installed via pnpm, in both global and local installations.
### Details:
Check that the binaries defined in package.json are correctly linked in node_modules/.bin when installed with pnpm, and that they can be executed without errors. Validate that binaries work for ESM ('module') projects and are accessible after both global and local installs.

View File

@@ -3,22 +3,29 @@
# Status: pending
# Dependencies: None
# Priority: medium
# Description: Implement full support for installing and managing Taskmaster using Yarn package manager, providing users with an alternative to npm and pnpm.
# Description: Implement full support for installing and managing Taskmaster using Yarn package manager, ensuring users have the exact same experience as with npm or pnpm. The installation process, including any CLI prompts or web interfaces, must serve the exact same content and user experience regardless of whether npm, pnpm, or Yarn is used. The project uses 'module' as the package type, defines binaries 'task-master' and 'task-master-mcp', and its core logic resides in 'scripts/modules/'. The 'init' command (via scripts/init.js) creates the directory structure (.cursor/rules, scripts, tasks), copies templates (.env.example, .gitignore, rule files, dev.js), manages package.json merging, and sets up MCP config (.cursor/mcp.json). All dependencies are standard npm dependencies listed in package.json, and manual modifications are being removed.
# Details:
This task involves adding comprehensive Yarn support to the Taskmaster package to ensure it can be properly installed and managed using Yarn. Implementation should include:
1. Update package.json to ensure compatibility with Yarn installation methods
1. Update package.json to ensure compatibility with Yarn installation methods, considering the 'module' package type and binary definitions
2. Verify all scripts and dependencies work correctly with Yarn
3. Add Yarn-specific configuration files (e.g., .yarnrc.yml if needed)
4. Update installation documentation to include Yarn installation instructions
5. Ensure all post-install scripts work correctly with Yarn
6. Verify that all CLI commands function properly when installed via Yarn
7. Handle any Yarn-specific package resolution or hoisting issues
8. Test compatibility with different Yarn versions (classic and berry/v2+)
9. Ensure proper lockfile generation and management
10. Update any package manager detection logic in the codebase to recognize Yarn installations
7. Ensure binaries `task-master` and `task-master-mcp` are properly linked
8. Test the `scripts/init.js` file with Yarn to verify it correctly:
- Creates directory structure (`.cursor/rules`, `scripts`, `tasks`)
- Copies templates (`.env.example`, `.gitignore`, rule files, `dev.js`)
- Manages `package.json` merging
- Sets up MCP config (`.cursor/mcp.json`)
9. Handle any Yarn-specific package resolution or hoisting issues
10. Test compatibility with different Yarn versions (classic and berry/v2+)
11. Ensure proper lockfile generation and management
12. Update any package manager detection logic in the codebase to recognize Yarn installations
13. Verify that core logic in `scripts/modules/` works correctly when installed via Yarn
The implementation should maintain feature parity regardless of which package manager (npm, pnpm, or Yarn) is used to install Taskmaster.
The implementation should maintain feature parity and identical user experience regardless of which package manager (npm, pnpm, or Yarn) is used to install Taskmaster.
# Test Strategy:
Testing should verify complete Yarn support through the following steps:
@@ -26,12 +33,14 @@ Testing should verify complete Yarn support through the following steps:
1. Fresh installation tests:
- Install Taskmaster using `yarn add taskmaster` (global and local installations)
- Verify installation completes without errors
- Check that all binaries and executables are properly linked
- Check that binaries `task-master` and `task-master-mcp` are properly linked
- Test the `init` command to ensure it correctly sets up the directory structure and files as defined in scripts/init.js
2. Functionality tests:
- Run all Taskmaster commands on a Yarn-installed version
- Verify all features work identically to npm/pnpm installations
- Verify all features work identically to npm installations
- Test with both Yarn v1 (classic) and Yarn v2+ (berry)
- Verify proper handling of 'module' package type
3. Update/uninstall tests:
- Test updating the package using Yarn commands
@@ -49,36 +58,60 @@ Testing should verify complete Yarn support through the following steps:
- Test installation in monorepo setups using Yarn workspaces
- Verify compatibility with other Yarn-specific features (plug'n'play, zero-installs)
All tests should pass with the same results as when using npm or pnpm.
7. Structure Testing:
- Verify that the core logic in `scripts/modules/` is accessible and functions correctly
- Confirm that the `init` command properly creates all required directories and files as per scripts/init.js
- Test package.json merging functionality
- Verify MCP config setup
All tests should pass with the same results as when using npm, with identical user experience throughout the installation and usage process.
# Subtasks:
## 1. Update package.json for Yarn Compatibility [pending]
### Dependencies: None
### Description: Modify the package.json file to ensure all dependencies, scripts, and configurations are compatible with Yarn's installation and resolution methods.
### Description: Modify the package.json file to ensure all dependencies, scripts, and configurations are compatible with Yarn's installation and resolution methods. Confirm that any scripts responsible for showing a website or prompt during install behave identically with Yarn and npm. Ensure compatibility with 'module' package type and correct binary definitions.
### Details:
Review and update dependency declarations, script syntax, and any package manager-specific fields to avoid conflicts or unsupported features when using Yarn.
Review and update dependency declarations, script syntax, and any package manager-specific fields to avoid conflicts or unsupported features when using Yarn. Pay special attention to any scripts that trigger a website or prompt during installation, ensuring they serve the same content as npm. Validate that scripts/init.js and binaries are referenced correctly for ESM ('module') projects.
## 2. Add Yarn-Specific Configuration Files [pending]
### Dependencies: 64.1
### Description: Introduce Yarn-specific configuration files such as .yarnrc.yml if needed to optimize Yarn behavior and ensure consistent installs.
### Description: Introduce Yarn-specific configuration files such as .yarnrc.yml if needed to optimize Yarn behavior and ensure consistent installs for 'module' package type and binary definitions.
### Details:
Determine if Yarn v2+ (Berry) or classic requires additional configuration for the project, and add or update .yarnrc.yml or .yarnrc files accordingly.
Determine if Yarn v2+ (Berry) or classic requires additional configuration for the project, and add or update .yarnrc.yml or .yarnrc files accordingly. Ensure configuration supports ESM and binary linking.
## 3. Test and Fix Yarn Compatibility for Scripts and CLI [pending]
### Dependencies: 64.2
### Description: Ensure all scripts, post-install hooks, and CLI commands function correctly when Taskmaster is installed and managed via Yarn.
### Description: Ensure all scripts, post-install hooks, and CLI commands function correctly when Taskmaster is installed and managed via Yarn. Confirm that any website or UI shown during installation is identical to npm. Validate that binaries and the init process (scripts/init.js) work as expected.
### Details:
Test all lifecycle scripts, post-install actions, and CLI commands using Yarn. Address any issues related to environment variables, script execution, or dependency hoisting.
Test all lifecycle scripts, post-install actions, and CLI commands using Yarn. Address any issues related to environment variables, script execution, or dependency hoisting. Ensure any website or prompt shown during install is the same as with npm. Validate that binaries 'task-master' and 'task-master-mcp' are linked and that scripts/init.js creates the correct structure and templates.
## 4. Update Documentation for Yarn Installation and Usage [pending]
### Dependencies: 64.3
### Description: Revise installation and usage documentation to include clear instructions for installing and managing Taskmaster with Yarn.
### Description: Revise installation and usage documentation to include clear instructions for installing and managing Taskmaster with Yarn. Clearly state that the installation process, including any website or UI shown, is identical to npm. Ensure documentation reflects the use of 'module' package type, binaries, and the init process as defined in scripts/init.js.
### Details:
Add Yarn-specific installation commands, troubleshooting tips, and notes on version compatibility to the README and any relevant docs.
Add Yarn-specific installation commands, troubleshooting tips, and notes on version compatibility to the README and any relevant docs. Document that any installation website or prompt is the same as with npm. Include notes on the 'module' package type, binaries, and the directory/template setup performed by scripts/init.js.
## 5. Implement and Test Package Manager Detection Logic [pending]
### Dependencies: 64.4
### Description: Update or add logic in the codebase to detect Yarn installations and handle Yarn-specific behaviors, ensuring feature parity across package managers.
### Description: Update or add logic in the codebase to detect Yarn installations and handle Yarn-specific behaviors, ensuring feature parity across package managers. Ensure detection logic works for 'module' package type and binary definitions.
### Details:
Modify detection logic to recognize Yarn (classic and berry), handle lockfile generation, and resolve any Yarn-specific package resolution or hoisting issues.
Modify detection logic to recognize Yarn (classic and berry), handle lockfile generation, and resolve any Yarn-specific package resolution or hoisting issues. Ensure detection logic supports ESM and binary linking.
## 6. Verify Installation UI/Website Consistency [pending]
### Dependencies: 64.3
### Description: Ensure any installation UIs, websites, or interactive prompts—including any website or prompt shown during install—appear and function identically when installing with Yarn compared to npm. Confirm that the experience is consistent for the 'module' package type and the init process.
### Details:
Identify all user-facing elements during the installation process, including any website or prompt shown during install, and verify they are consistent across package managers. If a website is shown during installation, ensure it appears the same regardless of package manager used. Validate that any prompts or UIs triggered by scripts/init.js are identical.
## 7. Test init.js Script with Yarn [pending]
### Dependencies: 64.3
### Description: Verify that the scripts/init.js file works correctly when Taskmaster is installed via Yarn, creating the proper directory structure and copying all required templates as defined in the project structure.
### Details:
Test the init command to ensure it properly creates .cursor/rules, scripts, and tasks directories, copies templates (.env.example, .gitignore, rule files, dev.js), handles package.json merging, and sets up MCP config (.cursor/mcp.json) as per scripts/init.js.
## 8. Verify Binary Links with Yarn [pending]
### Dependencies: 64.3
### Description: Ensure that the task-master and task-master-mcp binaries are properly defined in package.json, linked, and executable when installed via Yarn, in both global and local installations.
### Details:
Check that the binaries defined in package.json are correctly linked in node_modules/.bin when installed with Yarn, and that they can be executed without errors. Validate that binaries work for ESM ('module') projects and are accessible after both global and local installs.

View File

@@ -3394,7 +3394,7 @@
"title": "Refactor get-subtasks-from-ai.js for Unified AI Service & Config",
"description": "Replace direct AI calls (old `ai-services.js` helpers) with `generateObjectService` or `generateTextService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters; use unified service instead.",
"details": "\n\n<info added on 2025-04-24T17:48:35.005Z>\n**Additional Implementation Notes for Refactoring get-subtasks-from-ai.js**\n\n- **Zod Schema Definition**: \n Define a Zod schema that precisely matches the expected subtask object structure. For example, if a subtask should have an id (string), title (string), and status (string), use:\n ```js\n import { z } from 'zod';\n\n const SubtaskSchema = z.object({\n id: z.string(),\n title: z.string(),\n status: z.string(),\n // Add other fields as needed\n });\n\n const SubtasksArraySchema = z.array(SubtaskSchema);\n ```\n This ensures robust runtime validation and clear error reporting if the AI response does not match expectations[5][1][3].\n\n- **Unified Service Invocation**: \n Replace all direct AI client and config usage with:\n ```js\n import { generateObjectService } from './ai-services-unified';\n\n // Example usage:\n const subtasks = await generateObjectService({\n schema: SubtasksArraySchema,\n prompt,\n role,\n session,\n });\n ```\n This centralizes AI invocation and parameter management, ensuring consistency and easier maintenance.\n\n- **Role Determination**: \n Use the `useResearch` flag to select the AI role:\n ```js\n const role = useResearch ? 'researcher' : 'default';\n ```\n\n- **Error Handling**: \n Implement structured error handling:\n ```js\n try {\n // AI service call\n } catch (err) {\n if (err.name === 'ServiceUnavailableError') {\n // Handle AI service unavailability\n } else if (err.name === 'ZodError') {\n // Handle schema validation errors\n // err.errors contains detailed validation issues\n } else if (err.name === 'PromptConstructionError') {\n // Handle prompt construction issues\n } else {\n // Handle unexpected errors\n }\n throw err; // or wrap and rethrow as needed\n }\n ```\n This pattern ensures that consumers can distinguish between different failure modes and respond appropriately.\n\n- **Consumer Contract**: \n Update the function signature to require both `useResearch` and `session` parameters, and document this in JSDoc/type annotations for clarity.\n\n- **Prompt Construction**: \n Move all prompt construction logic outside the core function if possible, or encapsulate it so that errors can be caught and reported as `PromptConstructionError`.\n\n- **No AI Implementation Details**: \n The refactored function should not expose or depend on any AI implementation specifics—only the unified service interface and schema validation.\n\n- **Testing**: \n Add or update tests to cover:\n - Successful subtask generation\n - Schema validation failures (invalid AI output)\n - Service unavailability scenarios\n - Prompt construction errors\n\nThese enhancements ensure the refactored file is robust, maintainable, and aligned with the unified AI service architecture, leveraging Zod for strict runtime validation and clear error boundaries[5][1][3].\n</info added on 2025-04-24T17:48:35.005Z>",
"status": "pending",
"status": "done",
"dependencies": [],
"parentTaskId": 61
},
@@ -3403,7 +3403,7 @@
"title": "Refactor update-task-by-id.js for Unified AI Service & Config",
"description": "Replace direct AI calls (old `ai-services.js` helpers) with `generateObjectService` or `generateTextService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters and fallback logic; use unified service instead. Keep `getDebugFlag`.",
"details": "\n\n<info added on 2025-04-24T17:48:58.133Z>\n- When defining the Zod schema for task update validation, consider using Zod's function schemas to validate both the input parameters and the expected output of the update function. This approach helps separate validation logic from business logic and ensures type safety throughout the update process[1][2].\n\n- For the core logic, use Zod's `.implement()` method to wrap the update function, so that all inputs (such as task ID, prompt, and options) are validated before execution, and outputs are type-checked. This reduces runtime errors and enforces contract compliance between layers[1][2].\n\n- In the MCP tool definition, ensure that the Zod schema explicitly validates all required parameters (e.g., `id` as a string, `prompt` as a string, `research` as a boolean or optional flag). This guarantees that only well-formed requests reach the core logic, improving reliability and error reporting[3][5].\n\n- When preparing the unified AI service call, pass the validated and sanitized data from the Zod schema directly to `generateObjectService`, ensuring that no unvalidated data is sent to the AI layer.\n\n- For output formatting, leverage Zod's ability to define and enforce the shape of the returned object, ensuring that the response structure (including success/failure status and updated task data) is always consistent and predictable[1][2][3].\n\n- If you need to validate or transform nested objects (such as task metadata or options), use Zod's object and nested schema capabilities to define these structures precisely, catching errors early and simplifying downstream logic[3][5].\n</info added on 2025-04-24T17:48:58.133Z>",
"status": "pending",
"status": "done",
"dependencies": [],
"parentTaskId": 61
},
@@ -3412,6 +3412,24 @@
"title": "Refactor update-tasks.js for Unified AI Service & Config",
"description": "Replace direct AI calls (old `ai-services.js` helpers) with `generateObjectService` or `generateTextService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters and fallback logic; use unified service instead. Keep `getDebugFlag`.",
"details": "\n\n<info added on 2025-04-24T17:49:25.126Z>\n## Additional Implementation Notes for Refactoring update-tasks.js\n\n- **Zod Schema for Batch Updates**: \n Define a Zod schema to validate the structure of the batch update payload. For example, if updating tasks requires an array of task objects with specific fields, use:\n ```typescript\n import { z } from \"zod\";\n\n const TaskUpdateSchema = z.object({\n id: z.number(),\n status: z.string(),\n // add other fields as needed\n });\n\n const BatchUpdateSchema = z.object({\n tasks: z.array(TaskUpdateSchema),\n from: z.number(),\n prompt: z.string().optional(),\n useResearch: z.boolean().optional(),\n });\n ```\n This ensures all incoming data for batch updates is validated at runtime, catching malformed input early and providing clear error messages[4][5].\n\n- **Function Schema Validation**: \n If exposing the update logic as a callable function (e.g., for CLI or API), consider using Zod's function schema to validate both input and output:\n ```typescript\n const updateTasksFunction = z\n .function()\n .args(BatchUpdateSchema, z.object({ session: z.any() }))\n .returns(z.promise(z.object({ success: z.boolean(), updated: z.number() })))\n .implement(async (input, { session }) => {\n // implementation here\n });\n ```\n This pattern enforces correct usage and output shape, improving reliability[1].\n\n- **Error Handling and Reporting**: \n Use Zod's `.safeParse()` or `.parse()` methods to validate input. On validation failure, return or throw a formatted error to the caller (CLI, API, etc.), ensuring actionable feedback for users[5].\n\n- **Consistent JSON Output**: \n When invoking the core update function from wrappers (CLI, MCP), ensure the output is always serialized as JSON. This is critical for downstream consumers and for automated tooling.\n\n- **Logger Wrapper Example**: \n Implement a logger utility that can be toggled for silent mode:\n ```typescript\n function createLogger(silent: boolean) {\n return {\n log: (...args: any[]) => { if (!silent) console.log(...args); },\n error: (...args: any[]) => { if (!silent) console.error(...args); }\n };\n }\n ```\n Pass this logger to the core logic for consistent, suppressible output.\n\n- **Session Context Usage**: \n Ensure all AI service calls and config access are routed through the provided session context, not global config getters. This supports multi-user and multi-session environments.\n\n- **Task Filtering Logic**: \n Before invoking the AI service, filter the tasks array to only include those with `id >= from` and `status === \"pending\"`. This preserves the intended batch update semantics.\n\n- **Preserve File Regeneration**: \n After updating tasks, ensure any logic that regenerates or writes task files is retained and invoked as before.\n\n- **CLI and API Parameter Validation**: \n Use the same Zod schemas to validate CLI arguments and API payloads, ensuring consistency across all entry points[5].\n\n- **Example: Validating CLI Arguments**\n ```typescript\n const cliArgsSchema = z.object({\n from: z.string().regex(/^\\d+$/).transform(Number),\n research: z.boolean().optional(),\n session: z.any(),\n });\n\n const parsedArgs = cliArgsSchema.parse(cliArgs);\n ```\n\nThese enhancements ensure robust validation, unified service usage, and maintainable, predictable batch update behavior.\n</info added on 2025-04-24T17:49:25.126Z>",
"status": "done",
"dependencies": [],
"parentTaskId": 61
},
{
"id": 42,
"title": "Remove all unused imports",
"description": "",
"details": "",
"status": "pending",
"dependencies": [],
"parentTaskId": 61
},
{
"id": 43,
"title": "Remove all unnecessary console logs",
"description": "",
"details": "",
"status": "pending",
"dependencies": [],
"parentTaskId": 61
@@ -3530,130 +3548,196 @@
{
"id": 63,
"title": "Add pnpm Support for the Taskmaster Package",
"description": "Implement full support for pnpm as an alternative package manager in the Taskmaster application, allowing users to install and manage the package using pnpm alongside the existing npm and yarn options.",
"details": "This task involves:\n\n1. Update the installation documentation to include pnpm installation commands (e.g., `pnpm add taskmaster`).\n\n2. Ensure all package scripts are compatible with pnpm's execution model:\n - Review and modify package.json scripts if necessary\n - Test script execution with pnpm syntax (`pnpm run <script>`)\n - Address any pnpm-specific path or execution differences\n\n3. Create a pnpm-lock.yaml file by installing dependencies with pnpm.\n\n4. Test the application's installation and operation when installed via pnpm:\n - Global installation (`pnpm add -g taskmaster`)\n - Local project installation\n - Verify CLI commands work correctly when installed with pnpm\n\n5. Update CI/CD pipelines to include testing with pnpm:\n - Add a pnpm test matrix to GitHub Actions workflows\n - Ensure tests pass when dependencies are installed with pnpm\n\n6. Handle any pnpm-specific dependency resolution issues:\n - Address potential hoisting differences between npm/yarn and pnpm\n - Test with pnpm's strict mode to ensure compatibility\n\n7. Document any pnpm-specific considerations or commands in the README and documentation.\n\n8. Consider adding a pnpm-specific installation script or helper if needed.\n\nThis implementation should maintain full feature parity regardless of which package manager is used to install Taskmaster.",
"testStrategy": "1. Manual Testing:\n - Install Taskmaster globally using pnpm: `pnpm add -g taskmaster`\n - Install Taskmaster locally in a test project: `pnpm add taskmaster`\n - Verify all CLI commands function correctly with both installation methods\n - Test all major features to ensure they work identically to npm/yarn installations\n\n2. Automated Testing:\n - Create a dedicated test workflow in GitHub Actions that uses pnpm\n - Run the full test suite using pnpm to install dependencies\n - Verify all tests pass with the same results as npm/yarn\n\n3. Documentation Testing:\n - Review all documentation to ensure pnpm commands are correctly documented\n - Verify installation instructions work as written\n - Test any pnpm-specific instructions or notes\n\n4. Compatibility Testing:\n - Test on different operating systems (Windows, macOS, Linux)\n - Verify compatibility with different pnpm versions (latest stable and LTS)\n - Test in environments with multiple package managers installed\n\n5. Edge Case Testing:\n - Test installation in a project that uses pnpm workspaces\n - Verify behavior when upgrading from an npm/yarn installation to pnpm\n - Test with pnpm's various flags and modes (--frozen-lockfile, --strict-peer-dependencies)\n\n6. Performance Comparison:\n - Measure and document any performance differences between package managers\n - Compare installation times and disk space usage\n\nSuccess criteria: Taskmaster should install and function identically regardless of whether it was installed via npm, yarn, or pnpm, with no degradation in functionality or performance.",
"description": "Implement full support for pnpm as an alternative package manager in the Taskmaster application, ensuring users have the exact same experience as with npm when installing and managing the package. The installation process, including any CLI prompts or web interfaces, must serve the exact same content and user experience regardless of whether npm or pnpm is used. The project uses 'module' as the package type, defines binaries 'task-master' and 'task-master-mcp', and its core logic resides in 'scripts/modules/'. The 'init' command (via scripts/init.js) creates the directory structure (.cursor/rules, scripts, tasks), copies templates (.env.example, .gitignore, rule files, dev.js), manages package.json merging, and sets up MCP config (.cursor/mcp.json). All dependencies are standard npm dependencies listed in package.json, and manual modifications are being removed.",
"status": "pending",
"dependencies": [],
"priority": "medium",
"details": "This task involves:\n\n1. Update the installation documentation to include pnpm installation commands (e.g., `pnpm add taskmaster`).\n\n2. Ensure all package scripts are compatible with pnpm's execution model:\n - Review and modify package.json scripts if necessary\n - Test script execution with pnpm syntax (`pnpm run <script>`)\n - Address any pnpm-specific path or execution differences\n - Confirm that scripts responsible for showing a website or prompt during install behave identically with pnpm and npm\n\n3. Create a pnpm-lock.yaml file by installing dependencies with pnpm.\n\n4. Test the application's installation and operation when installed via pnpm:\n - Global installation (`pnpm add -g taskmaster`)\n - Local project installation\n - Verify CLI commands work correctly when installed with pnpm\n - Verify binaries `task-master` and `task-master-mcp` are properly linked\n - Ensure the `init` command (scripts/init.js) correctly creates directory structure and copies templates as described\n\n5. Update CI/CD pipelines to include testing with pnpm:\n - Add a pnpm test matrix to GitHub Actions workflows\n - Ensure tests pass when dependencies are installed with pnpm\n\n6. Handle any pnpm-specific dependency resolution issues:\n - Address potential hoisting differences between npm and pnpm\n - Test with pnpm's strict mode to ensure compatibility\n - Verify proper handling of 'module' package type\n\n7. Document any pnpm-specific considerations or commands in the README and documentation.\n\n8. Verify that the `scripts/init.js` file works correctly with pnpm:\n - Ensure it properly creates `.cursor/rules`, `scripts`, and `tasks` directories\n - Verify template copying (`.env.example`, `.gitignore`, rule files, `dev.js`)\n - Confirm `package.json` merging works correctly\n - Test MCP config setup (`.cursor/mcp.json`)\n\n9. Ensure core logic in `scripts/modules/` works correctly when installed via pnpm.\n\nThis implementation should maintain full feature parity and identical user experience regardless of which package manager is used to install Taskmaster.",
"testStrategy": "1. Manual Testing:\n - Install Taskmaster globally using pnpm: `pnpm add -g taskmaster`\n - Install Taskmaster locally in a test project: `pnpm add taskmaster`\n - Verify all CLI commands function correctly with both installation methods\n - Test all major features to ensure they work identically to npm installations\n - Verify binaries `task-master` and `task-master-mcp` are properly linked and executable\n - Test the `init` command to ensure it correctly sets up the directory structure and files as defined in scripts/init.js\n\n2. Automated Testing:\n - Create a dedicated test workflow in GitHub Actions that uses pnpm\n - Run the full test suite using pnpm to install dependencies\n - Verify all tests pass with the same results as npm\n\n3. Documentation Testing:\n - Review all documentation to ensure pnpm commands are correctly documented\n - Verify installation instructions work as written\n - Test any pnpm-specific instructions or notes\n\n4. Compatibility Testing:\n - Test on different operating systems (Windows, macOS, Linux)\n - Verify compatibility with different pnpm versions (latest stable and LTS)\n - Test in environments with multiple package managers installed\n - Verify proper handling of 'module' package type\n\n5. Edge Case Testing:\n - Test installation in a project that uses pnpm workspaces\n - Verify behavior when upgrading from an npm installation to pnpm\n - Test with pnpm's various flags and modes (--frozen-lockfile, --strict-peer-dependencies)\n\n6. Performance Comparison:\n - Measure and document any performance differences between package managers\n - Compare installation times and disk space usage\n\n7. Structure Testing:\n - Verify that the core logic in `scripts/modules/` is accessible and functions correctly\n - Confirm that the `init` command properly creates all required directories and files as per scripts/init.js\n - Test package.json merging functionality\n - Verify MCP config setup\n\nSuccess criteria: Taskmaster should install and function identically regardless of whether it was installed via npm or pnpm, with no degradation in functionality, performance, or user experience. All binaries should be properly linked, and the directory structure should be correctly created.",
"subtasks": [
{
"id": 1,
"title": "Update Documentation for pnpm Support",
"description": "Revise installation and usage documentation to include pnpm commands and instructions for installing and managing Taskmaster with pnpm.",
"description": "Revise installation and usage documentation to include pnpm commands and instructions for installing and managing Taskmaster with pnpm. Clearly state that the installation process, including any website or UI shown, is identical to npm. Ensure documentation reflects the use of 'module' package type, binaries, and the init process as defined in scripts/init.js.",
"dependencies": [],
"details": "Add pnpm installation commands (e.g., `pnpm add taskmaster`) and update all relevant sections in the README and official docs to reflect pnpm as a supported package manager.",
"details": "Add pnpm installation commands (e.g., `pnpm add taskmaster`) and update all relevant sections in the README and official docs to reflect pnpm as a supported package manager. Document that any installation website or prompt is the same as with npm. Include notes on the 'module' package type, binaries, and the directory/template setup performed by scripts/init.js.",
"status": "pending",
"testStrategy": "Verify that documentation changes are clear, accurate, and render correctly in all documentation formats."
"testStrategy": "Verify that documentation changes are clear, accurate, and render correctly in all documentation formats. Confirm that documentation explicitly states the identical experience for npm and pnpm, including any website or UI shown during install, and describes the init process and binaries."
},
{
"id": 2,
"title": "Ensure Package Scripts Compatibility with pnpm",
"description": "Review and update package.json scripts to ensure they work seamlessly with pnpm's execution model.",
"description": "Review and update package.json scripts to ensure they work seamlessly with pnpm's execution model. Confirm that any scripts responsible for showing a website or prompt during install behave identically with pnpm and npm. Ensure compatibility with 'module' package type and correct binary definitions.",
"dependencies": [
1
],
"details": "Test all scripts using `pnpm run <script>`, address any pnpm-specific path or execution differences, and modify scripts as needed for compatibility.",
"details": "Test all scripts using `pnpm run <script>`, address any pnpm-specific path or execution differences, and modify scripts as needed for compatibility. Pay special attention to any scripts that trigger a website or prompt during installation, ensuring they serve the same content as npm. Validate that scripts/init.js and binaries are referenced correctly for ESM ('module') projects.",
"status": "pending",
"testStrategy": "Run all package scripts using pnpm and confirm expected behavior matches npm/yarn."
"testStrategy": "Run all package scripts using pnpm and confirm expected behavior matches npm, especially for any website or UI shown during install. Validate correct execution of scripts/init.js and binary linking."
},
{
"id": 3,
"title": "Generate and Validate pnpm Lockfile",
"description": "Install dependencies using pnpm to create a pnpm-lock.yaml file and ensure it accurately reflects the project's dependency tree.",
"description": "Install dependencies using pnpm to create a pnpm-lock.yaml file and ensure it accurately reflects the project's dependency tree, considering the 'module' package type.",
"dependencies": [
2
],
"details": "Run `pnpm install` to generate the lockfile, check it into version control, and verify that dependency resolution is correct and consistent.",
"details": "Run `pnpm install` to generate the lockfile, check it into version control, and verify that dependency resolution is correct and consistent. Ensure that all dependencies listed in package.json are resolved as expected for an ESM project.",
"status": "pending",
"testStrategy": "Compare dependency trees between npm/yarn and pnpm; ensure no missing or extraneous dependencies."
"testStrategy": "Compare dependency trees between npm and pnpm; ensure no missing or extraneous dependencies. Validate that the lockfile works for both CLI and init.js flows."
},
{
"id": 4,
"title": "Test Taskmaster Installation and Operation with pnpm",
"description": "Thoroughly test Taskmaster's installation and CLI operation when installed via pnpm, both globally and locally.",
"description": "Thoroughly test Taskmaster's installation and CLI operation when installed via pnpm, both globally and locally. Confirm that any website or UI shown during installation is identical to npm. Validate that binaries and the init process (scripts/init.js) work as expected.",
"dependencies": [
3
],
"details": "Perform global (`pnpm add -g taskmaster`) and local installations, verify CLI commands, and check for any pnpm-specific issues or incompatibilities.",
"details": "Perform global (`pnpm add -g taskmaster`) and local installations, verify CLI commands, and check for any pnpm-specific issues or incompatibilities. Ensure any installation UIs or websites appear identical to npm installations, including any website or prompt shown during install. Test that binaries 'task-master' and 'task-master-mcp' are linked and that scripts/init.js creates the correct structure and templates.",
"status": "pending",
"testStrategy": "Document and resolve any errors encountered during installation or usage with pnpm."
"testStrategy": "Document and resolve any errors encountered during installation or usage with pnpm. Compare the installation experience side-by-side with npm, including any website or UI shown during install. Validate directory and template setup as per scripts/init.js."
},
{
"id": 5,
"title": "Integrate pnpm into CI/CD Pipeline",
"description": "Update CI/CD workflows to include pnpm in the test matrix, ensuring all tests pass when dependencies are installed with pnpm.",
"description": "Update CI/CD workflows to include pnpm in the test matrix, ensuring all tests pass when dependencies are installed with pnpm. Confirm that tests cover the 'module' package type, binaries, and init process.",
"dependencies": [
4
],
"details": "Modify GitHub Actions or other CI configurations to use pnpm/action-setup, run tests with pnpm, and cache pnpm dependencies for efficiency.",
"details": "Modify GitHub Actions or other CI configurations to use pnpm/action-setup, run tests with pnpm, and cache pnpm dependencies for efficiency. Ensure that CI covers CLI commands, binary linking, and the directory/template setup performed by scripts/init.js.",
"status": "pending",
"testStrategy": "Confirm that CI passes for all supported package managers, including pnpm, and that pnpm-specific jobs are green."
"testStrategy": "Confirm that CI passes for all supported package managers, including pnpm, and that pnpm-specific jobs are green. Validate that tests cover ESM usage, binaries, and init.js flows."
},
{
"id": 6,
"title": "Verify Installation UI/Website Consistency",
"description": "Ensure any installation UIs, websites, or interactive prompts—including any website or prompt shown during install—appear and function identically when installing with pnpm compared to npm. Confirm that the experience is consistent for the 'module' package type and the init process.",
"dependencies": [
4
],
"details": "Identify all user-facing elements during the installation process, including any website or prompt shown during install, and verify they are consistent across package managers. If a website is shown during installation, ensure it appears the same regardless of package manager used. Validate that any prompts or UIs triggered by scripts/init.js are identical.",
"status": "pending",
"testStrategy": "Perform side-by-side installations with npm and pnpm, capturing screenshots of any UIs or websites for comparison. Test all interactive elements to ensure identical behavior, including any website or prompt shown during install and those from scripts/init.js."
},
{
"id": 7,
"title": "Test init.js Script with pnpm",
"description": "Verify that the scripts/init.js file works correctly when Taskmaster is installed via pnpm, creating the proper directory structure and copying all required templates as defined in the project structure.",
"dependencies": [
4
],
"details": "Test the init command to ensure it properly creates .cursor/rules, scripts, and tasks directories, copies templates (.env.example, .gitignore, rule files, dev.js), handles package.json merging, and sets up MCP config (.cursor/mcp.json) as per scripts/init.js.",
"status": "pending",
"testStrategy": "Run the init command after installing with pnpm and verify all directories and files are created correctly. Compare the results with an npm installation to ensure identical behavior and structure."
},
{
"id": 8,
"title": "Verify Binary Links with pnpm",
"description": "Ensure that the task-master and task-master-mcp binaries are properly defined in package.json, linked, and executable when installed via pnpm, in both global and local installations.",
"dependencies": [
4
],
"details": "Check that the binaries defined in package.json are correctly linked in node_modules/.bin when installed with pnpm, and that they can be executed without errors. Validate that binaries work for ESM ('module') projects and are accessible after both global and local installs.",
"status": "pending",
"testStrategy": "Install Taskmaster with pnpm and verify that the binaries are accessible and executable. Test both global and local installations, ensuring correct behavior for ESM projects."
}
]
},
{
"id": 64,
"title": "Add Yarn Support for Taskmaster Installation",
"description": "Implement full support for installing and managing Taskmaster using Yarn package manager, providing users with an alternative to npm and pnpm.",
"details": "This task involves adding comprehensive Yarn support to the Taskmaster package to ensure it can be properly installed and managed using Yarn. Implementation should include:\n\n1. Update package.json to ensure compatibility with Yarn installation methods\n2. Verify all scripts and dependencies work correctly with Yarn\n3. Add Yarn-specific configuration files (e.g., .yarnrc.yml if needed)\n4. Update installation documentation to include Yarn installation instructions\n5. Ensure all post-install scripts work correctly with Yarn\n6. Verify that all CLI commands function properly when installed via Yarn\n7. Handle any Yarn-specific package resolution or hoisting issues\n8. Test compatibility with different Yarn versions (classic and berry/v2+)\n9. Ensure proper lockfile generation and management\n10. Update any package manager detection logic in the codebase to recognize Yarn installations\n\nThe implementation should maintain feature parity regardless of which package manager (npm, pnpm, or Yarn) is used to install Taskmaster.",
"testStrategy": "Testing should verify complete Yarn support through the following steps:\n\n1. Fresh installation tests:\n - Install Taskmaster using `yarn add taskmaster` (global and local installations)\n - Verify installation completes without errors\n - Check that all binaries and executables are properly linked\n\n2. Functionality tests:\n - Run all Taskmaster commands on a Yarn-installed version\n - Verify all features work identically to npm/pnpm installations\n - Test with both Yarn v1 (classic) and Yarn v2+ (berry)\n\n3. Update/uninstall tests:\n - Test updating the package using Yarn commands\n - Verify clean uninstallation using Yarn\n\n4. CI integration:\n - Add Yarn installation tests to CI pipeline\n - Test on different operating systems (Windows, macOS, Linux)\n\n5. Documentation verification:\n - Ensure all documentation accurately reflects Yarn installation methods\n - Verify any Yarn-specific commands or configurations are properly documented\n\n6. Edge cases:\n - Test installation in monorepo setups using Yarn workspaces\n - Verify compatibility with other Yarn-specific features (plug'n'play, zero-installs)\n\nAll tests should pass with the same results as when using npm or pnpm.",
"description": "Implement full support for installing and managing Taskmaster using Yarn package manager, ensuring users have the exact same experience as with npm or pnpm. The installation process, including any CLI prompts or web interfaces, must serve the exact same content and user experience regardless of whether npm, pnpm, or Yarn is used. The project uses 'module' as the package type, defines binaries 'task-master' and 'task-master-mcp', and its core logic resides in 'scripts/modules/'. The 'init' command (via scripts/init.js) creates the directory structure (.cursor/rules, scripts, tasks), copies templates (.env.example, .gitignore, rule files, dev.js), manages package.json merging, and sets up MCP config (.cursor/mcp.json). All dependencies are standard npm dependencies listed in package.json, and manual modifications are being removed.",
"status": "pending",
"dependencies": [],
"priority": "medium",
"details": "This task involves adding comprehensive Yarn support to the Taskmaster package to ensure it can be properly installed and managed using Yarn. Implementation should include:\n\n1. Update package.json to ensure compatibility with Yarn installation methods, considering the 'module' package type and binary definitions\n2. Verify all scripts and dependencies work correctly with Yarn\n3. Add Yarn-specific configuration files (e.g., .yarnrc.yml if needed)\n4. Update installation documentation to include Yarn installation instructions\n5. Ensure all post-install scripts work correctly with Yarn\n6. Verify that all CLI commands function properly when installed via Yarn\n7. Ensure binaries `task-master` and `task-master-mcp` are properly linked\n8. Test the `scripts/init.js` file with Yarn to verify it correctly:\n - Creates directory structure (`.cursor/rules`, `scripts`, `tasks`)\n - Copies templates (`.env.example`, `.gitignore`, rule files, `dev.js`)\n - Manages `package.json` merging\n - Sets up MCP config (`.cursor/mcp.json`)\n9. Handle any Yarn-specific package resolution or hoisting issues\n10. Test compatibility with different Yarn versions (classic and berry/v2+)\n11. Ensure proper lockfile generation and management\n12. Update any package manager detection logic in the codebase to recognize Yarn installations\n13. Verify that core logic in `scripts/modules/` works correctly when installed via Yarn\n\nThe implementation should maintain feature parity and identical user experience regardless of which package manager (npm, pnpm, or Yarn) is used to install Taskmaster.",
"testStrategy": "Testing should verify complete Yarn support through the following steps:\n\n1. Fresh installation tests:\n - Install Taskmaster using `yarn add taskmaster` (global and local installations)\n - Verify installation completes without errors\n - Check that binaries `task-master` and `task-master-mcp` are properly linked\n - Test the `init` command to ensure it correctly sets up the directory structure and files as defined in scripts/init.js\n\n2. Functionality tests:\n - Run all Taskmaster commands on a Yarn-installed version\n - Verify all features work identically to npm installations\n - Test with both Yarn v1 (classic) and Yarn v2+ (berry)\n - Verify proper handling of 'module' package type\n\n3. Update/uninstall tests:\n - Test updating the package using Yarn commands\n - Verify clean uninstallation using Yarn\n\n4. CI integration:\n - Add Yarn installation tests to CI pipeline\n - Test on different operating systems (Windows, macOS, Linux)\n\n5. Documentation verification:\n - Ensure all documentation accurately reflects Yarn installation methods\n - Verify any Yarn-specific commands or configurations are properly documented\n\n6. Edge cases:\n - Test installation in monorepo setups using Yarn workspaces\n - Verify compatibility with other Yarn-specific features (plug'n'play, zero-installs)\n\n7. Structure Testing:\n - Verify that the core logic in `scripts/modules/` is accessible and functions correctly\n - Confirm that the `init` command properly creates all required directories and files as per scripts/init.js\n - Test package.json merging functionality\n - Verify MCP config setup\n\nAll tests should pass with the same results as when using npm, with identical user experience throughout the installation and usage process.",
"subtasks": [
{
"id": 1,
"title": "Update package.json for Yarn Compatibility",
"description": "Modify the package.json file to ensure all dependencies, scripts, and configurations are compatible with Yarn's installation and resolution methods.",
"description": "Modify the package.json file to ensure all dependencies, scripts, and configurations are compatible with Yarn's installation and resolution methods. Confirm that any scripts responsible for showing a website or prompt during install behave identically with Yarn and npm. Ensure compatibility with 'module' package type and correct binary definitions.",
"dependencies": [],
"details": "Review and update dependency declarations, script syntax, and any package manager-specific fields to avoid conflicts or unsupported features when using Yarn.",
"details": "Review and update dependency declarations, script syntax, and any package manager-specific fields to avoid conflicts or unsupported features when using Yarn. Pay special attention to any scripts that trigger a website or prompt during installation, ensuring they serve the same content as npm. Validate that scripts/init.js and binaries are referenced correctly for ESM ('module') projects.",
"status": "pending",
"testStrategy": "Run 'yarn install' and 'yarn run <script>' for all scripts to confirm successful execution and dependency resolution."
"testStrategy": "Run 'yarn install' and 'yarn run <script>' for all scripts to confirm successful execution and dependency resolution, especially for any website or UI shown during install. Validate correct execution of scripts/init.js and binary linking."
},
{
"id": 2,
"title": "Add Yarn-Specific Configuration Files",
"description": "Introduce Yarn-specific configuration files such as .yarnrc.yml if needed to optimize Yarn behavior and ensure consistent installs.",
"description": "Introduce Yarn-specific configuration files such as .yarnrc.yml if needed to optimize Yarn behavior and ensure consistent installs for 'module' package type and binary definitions.",
"dependencies": [
1
],
"details": "Determine if Yarn v2+ (Berry) or classic requires additional configuration for the project, and add or update .yarnrc.yml or .yarnrc files accordingly.",
"details": "Determine if Yarn v2+ (Berry) or classic requires additional configuration for the project, and add or update .yarnrc.yml or .yarnrc files accordingly. Ensure configuration supports ESM and binary linking.",
"status": "pending",
"testStrategy": "Verify that Yarn respects the configuration by running installs and checking for expected behaviors (e.g., plug'n'play, nodeLinker settings)."
"testStrategy": "Verify that Yarn respects the configuration by running installs and checking for expected behaviors (e.g., plug'n'play, nodeLinker settings, ESM support, binary linking)."
},
{
"id": 3,
"title": "Test and Fix Yarn Compatibility for Scripts and CLI",
"description": "Ensure all scripts, post-install hooks, and CLI commands function correctly when Taskmaster is installed and managed via Yarn.",
"description": "Ensure all scripts, post-install hooks, and CLI commands function correctly when Taskmaster is installed and managed via Yarn. Confirm that any website or UI shown during installation is identical to npm. Validate that binaries and the init process (scripts/init.js) work as expected.",
"dependencies": [
2
],
"details": "Test all lifecycle scripts, post-install actions, and CLI commands using Yarn. Address any issues related to environment variables, script execution, or dependency hoisting.",
"details": "Test all lifecycle scripts, post-install actions, and CLI commands using Yarn. Address any issues related to environment variables, script execution, or dependency hoisting. Ensure any website or prompt shown during install is the same as with npm. Validate that binaries 'task-master' and 'task-master-mcp' are linked and that scripts/init.js creates the correct structure and templates.",
"status": "pending",
"testStrategy": "Install Taskmaster using Yarn and run all documented scripts and CLI commands, comparing results to npm/pnpm installations."
"testStrategy": "Install Taskmaster using Yarn and run all documented scripts and CLI commands, comparing results to npm installations, especially for any website or UI shown during install. Validate directory and template setup as per scripts/init.js."
},
{
"id": 4,
"title": "Update Documentation for Yarn Installation and Usage",
"description": "Revise installation and usage documentation to include clear instructions for installing and managing Taskmaster with Yarn.",
"description": "Revise installation and usage documentation to include clear instructions for installing and managing Taskmaster with Yarn. Clearly state that the installation process, including any website or UI shown, is identical to npm. Ensure documentation reflects the use of 'module' package type, binaries, and the init process as defined in scripts/init.js.",
"dependencies": [
3
],
"details": "Add Yarn-specific installation commands, troubleshooting tips, and notes on version compatibility to the README and any relevant docs.",
"details": "Add Yarn-specific installation commands, troubleshooting tips, and notes on version compatibility to the README and any relevant docs. Document that any installation website or prompt is the same as with npm. Include notes on the 'module' package type, binaries, and the directory/template setup performed by scripts/init.js.",
"status": "pending",
"testStrategy": "Review documentation for accuracy and clarity; have a user follow the Yarn instructions to verify successful installation and usage."
"testStrategy": "Review documentation for accuracy and clarity; have a user follow the Yarn instructions to verify successful installation and usage. Confirm that documentation explicitly states the identical experience for npm and Yarn, including any website or UI shown during install, and describes the init process and binaries."
},
{
"id": 5,
"title": "Implement and Test Package Manager Detection Logic",
"description": "Update or add logic in the codebase to detect Yarn installations and handle Yarn-specific behaviors, ensuring feature parity across package managers.",
"description": "Update or add logic in the codebase to detect Yarn installations and handle Yarn-specific behaviors, ensuring feature parity across package managers. Ensure detection logic works for 'module' package type and binary definitions.",
"dependencies": [
4
],
"details": "Modify detection logic to recognize Yarn (classic and berry), handle lockfile generation, and resolve any Yarn-specific package resolution or hoisting issues.",
"details": "Modify detection logic to recognize Yarn (classic and berry), handle lockfile generation, and resolve any Yarn-specific package resolution or hoisting issues. Ensure detection logic supports ESM and binary linking.",
"status": "pending",
"testStrategy": "Install Taskmaster using npm, pnpm, and Yarn (classic and berry), verifying that the application detects the package manager correctly and behaves consistently."
"testStrategy": "Install Taskmaster using npm, pnpm, and Yarn (classic and berry), verifying that the application detects the package manager correctly and behaves consistently for ESM projects and binaries."
},
{
"id": 6,
"title": "Verify Installation UI/Website Consistency",
"description": "Ensure any installation UIs, websites, or interactive prompts—including any website or prompt shown during install—appear and function identically when installing with Yarn compared to npm. Confirm that the experience is consistent for the 'module' package type and the init process.",
"dependencies": [
3
],
"details": "Identify all user-facing elements during the installation process, including any website or prompt shown during install, and verify they are consistent across package managers. If a website is shown during installation, ensure it appears the same regardless of package manager used. Validate that any prompts or UIs triggered by scripts/init.js are identical.",
"status": "pending",
"testStrategy": "Perform side-by-side installations with npm and Yarn, capturing screenshots of any UIs or websites for comparison. Test all interactive elements to ensure identical behavior, including any website or prompt shown during install and those from scripts/init.js."
},
{
"id": 7,
"title": "Test init.js Script with Yarn",
"description": "Verify that the scripts/init.js file works correctly when Taskmaster is installed via Yarn, creating the proper directory structure and copying all required templates as defined in the project structure.",
"dependencies": [
3
],
"details": "Test the init command to ensure it properly creates .cursor/rules, scripts, and tasks directories, copies templates (.env.example, .gitignore, rule files, dev.js), handles package.json merging, and sets up MCP config (.cursor/mcp.json) as per scripts/init.js.",
"status": "pending",
"testStrategy": "Run the init command after installing with Yarn and verify all directories and files are created correctly. Compare the results with an npm installation to ensure identical behavior and structure."
},
{
"id": 8,
"title": "Verify Binary Links with Yarn",
"description": "Ensure that the task-master and task-master-mcp binaries are properly defined in package.json, linked, and executable when installed via Yarn, in both global and local installations.",
"dependencies": [
3
],
"details": "Check that the binaries defined in package.json are correctly linked in node_modules/.bin when installed with Yarn, and that they can be executed without errors. Validate that binaries work for ESM ('module') projects and are accessible after both global and local installs.",
"status": "pending",
"testStrategy": "Install Taskmaster with Yarn and verify that the binaries are accessible and executable. Test both global and local installations, ensuring correct behavior for ESM projects."
}
]
}