diff --git a/package.json b/package.json
index b73e896e..e824a6ff 100644
--- a/package.json
+++ b/package.json
@@ -11,6 +11,7 @@
},
"scripts": {
"test": "node --experimental-vm-modules node_modules/.bin/jest",
+ "test:fails": "node --experimental-vm-modules node_modules/.bin/jest --onlyFailures",
"test:watch": "node --experimental-vm-modules node_modules/.bin/jest --watch",
"test:coverage": "node --experimental-vm-modules node_modules/.bin/jest --coverage",
"prepare-package": "node scripts/prepare-package.js",
diff --git a/scripts/modules/task-manager.js b/scripts/modules/task-manager.js
index f5dbef3c..f40f2795 100644
--- a/scripts/modules/task-manager.js
+++ b/scripts/modules/task-manager.js
@@ -37,7 +37,9 @@ import {
callClaude,
generateSubtasks,
generateSubtasksWithPerplexity,
- generateComplexityAnalysisPrompt
+ generateComplexityAnalysisPrompt,
+ getAvailableAIModel,
+ handleClaudeError
} from './ai-services.js';
import {
@@ -2978,6 +2980,7 @@ async function removeSubtask(tasksPath, subtaskId, convertToTask = false, genera
* @returns {Object|null} - The updated subtask or null if update failed
*/
async function updateSubtaskById(tasksPath, subtaskId, prompt, useResearch = false) {
+ let loadingIndicator = null;
try {
log('info', `Updating subtask ${subtaskId} with prompt: "${prompt}"`);
@@ -2991,12 +2994,8 @@ async function updateSubtaskById(tasksPath, subtaskId, prompt, useResearch = fal
throw new Error('Prompt cannot be empty. Please provide context for the subtask update.');
}
- // Validate research flag
- if (useResearch && (!perplexity || !process.env.PERPLEXITY_API_KEY)) {
- log('warn', 'Perplexity AI is not available. Falling back to Claude AI.');
- console.log(chalk.yellow('Perplexity AI is not available (API key may be missing). Falling back to Claude AI.'));
- useResearch = false;
- }
+ // Prepare for fallback handling
+ let claudeOverloaded = false;
// Validate tasks file exists
if (!fs.existsSync(tasksPath)) {
@@ -3070,209 +3069,258 @@ async function updateSubtaskById(tasksPath, subtaskId, prompt, useResearch = fal
console.log(table.toString());
- // Build the system prompt
- const systemPrompt = `You are an AI assistant helping to enhance a software development subtask with additional information.
-You will be given a subtask and a prompt requesting specific details or clarification.
-Your job is to generate concise, technically precise information that addresses the prompt.
+ // Start the loading indicator
+ loadingIndicator = startLoadingIndicator('Generating additional information with AI...');
-Guidelines:
-1. Focus ONLY on generating the additional information requested in the prompt
-2. Be specific, technical, and actionable in your response
-3. Keep your response as low level as possible, the goal is to provide the most detailed information possible to complete the task.
-4. Format your response to be easily readable when appended to existing text
-5. Include code snippets, links to documentation, or technical details when appropriate
-6. Do NOT include any preamble, conclusion or meta-commentary
-7. Return ONLY the new information to be added - do not repeat or summarize existing content`;
+ // Create the system prompt (as before)
+ const systemPrompt = `You are an AI assistant helping to update software development subtasks with additional information.
+Given a subtask, you will provide additional details, implementation notes, or technical insights based on user request.
+Focus only on adding content that enhances the subtask - don't repeat existing information.
+Be technical, specific, and implementation-focused rather than general.
+Provide concrete examples, code snippets, or implementation details when relevant.`;
- const subtaskData = JSON.stringify(subtask, null, 2);
+ // Replace the old research/Claude code with the new model selection approach
+ let additionalInformation = '';
+ let modelAttempts = 0;
+ const maxModelAttempts = 2; // Try up to 2 models before giving up
- let additionalInformation;
- const loadingIndicator = startLoadingIndicator(useResearch
- ? 'Generating additional information with Perplexity AI research...'
- : 'Generating additional information with Claude AI...');
-
- try {
- if (useResearch) {
- log('info', 'Using Perplexity AI for research-backed subtask update');
+ while (modelAttempts < maxModelAttempts && !additionalInformation) {
+ modelAttempts++; // Increment attempt counter at the start
+ const isLastAttempt = modelAttempts >= maxModelAttempts;
+ let modelType = null; // Declare modelType outside the try block
+
+ try {
+ // Get the best available model based on our current state
+ const result = getAvailableAIModel({
+ claudeOverloaded,
+ requiresResearch: useResearch
+ });
+ modelType = result.type;
+ const client = result.client;
- // Verify Perplexity API key exists
- if (!process.env.PERPLEXITY_API_KEY) {
- throw new Error('PERPLEXITY_API_KEY environment variable is missing but --research flag was used.');
- }
-
- try {
- // Call Perplexity AI
+ log('info', `Attempt ${modelAttempts}/${maxModelAttempts}: Generating subtask info using ${modelType}`);
+ // Update loading indicator text
+ stopLoadingIndicator(loadingIndicator); // Stop previous indicator
+ loadingIndicator = startLoadingIndicator(`Attempt ${modelAttempts}: Using ${modelType.toUpperCase()}...`);
+
+ const subtaskData = JSON.stringify(subtask, null, 2);
+ const userMessageContent = `Here is the subtask to enhance:\n${subtaskData}\n\nPlease provide additional information addressing this request:\n${prompt}\n\nReturn ONLY the new information to add - do not repeat existing content.`;
+
+ if (modelType === 'perplexity') {
+ // Construct Perplexity payload
const perplexityModel = process.env.PERPLEXITY_MODEL || 'sonar-pro';
- const result = await perplexity.chat.completions.create({
+ const response = await client.chat.completions.create({
model: perplexityModel,
messages: [
- {
- role: "system",
- content: `${systemPrompt}\n\nUse your online search capabilities to research up-to-date information about the technologies and concepts mentioned in the subtask. Look for best practices, common issues, and implementation details that would be helpful.`
- },
- {
- role: "user",
- content: `Here is the subtask to enhance:
-${subtaskData}
-
-Please provide additional information addressing this request:
-${prompt}
-
-Return ONLY the new information to add - do not repeat existing content.`
- }
+ { role: 'system', content: systemPrompt },
+ { role: 'user', content: userMessageContent }
],
temperature: parseFloat(process.env.TEMPERATURE || CONFIG.temperature),
max_tokens: parseInt(process.env.MAX_TOKENS || CONFIG.maxTokens),
});
-
- additionalInformation = result.choices[0].message.content.trim();
- } catch (perplexityError) {
- throw new Error(`Perplexity API error: ${perplexityError.message}`);
- }
- } else {
- // Call Claude to generate additional information
- try {
- // Verify Anthropic API key exists
- if (!process.env.ANTHROPIC_API_KEY) {
- throw new Error('ANTHROPIC_API_KEY environment variable is missing. Required for subtask updates.');
- }
-
- // Use streaming API call
+ additionalInformation = response.choices[0].message.content.trim();
+ } else { // Claude
let responseText = '';
let streamingInterval = null;
-
- // Update loading indicator to show streaming progress
let dotCount = 0;
const readline = await import('readline');
- streamingInterval = setInterval(() => {
- readline.cursorTo(process.stdout, 0);
- process.stdout.write(`Receiving streaming response from Claude${'.'.repeat(dotCount)}`);
- dotCount = (dotCount + 1) % 4;
- }, 500);
-
- // Use streaming API call
- const stream = await anthropic.messages.create({
- model: CONFIG.model,
- max_tokens: CONFIG.maxTokens,
- temperature: CONFIG.temperature,
- system: systemPrompt,
- messages: [
- {
- role: 'user',
- content: `Here is the subtask to enhance:
-${subtaskData}
-Please provide additional information addressing this request:
-${prompt}
+ try {
+ streamingInterval = setInterval(() => {
+ readline.cursorTo(process.stdout, 0);
+ process.stdout.write(`Receiving streaming response from Claude${'.'.repeat(dotCount)}`);
+ dotCount = (dotCount + 1) % 4;
+ }, 500);
-Return ONLY the new information to add - do not repeat existing content.`
+ // Construct Claude payload
+ const stream = await client.messages.create({
+ model: CONFIG.model,
+ max_tokens: CONFIG.maxTokens,
+ temperature: CONFIG.temperature,
+ system: systemPrompt,
+ messages: [
+ { role: 'user', content: userMessageContent }
+ ],
+ stream: true
+ });
+
+ for await (const chunk of stream) {
+ if (chunk.type === 'content_block_delta' && chunk.delta.text) {
+ responseText += chunk.delta.text;
}
- ],
- stream: true
- });
-
- // Process the stream
- for await (const chunk of stream) {
- if (chunk.type === 'content_block_delta' && chunk.delta.text) {
- responseText += chunk.delta.text;
}
+ } finally {
+ if (streamingInterval) clearInterval(streamingInterval);
+ // Clear the loading dots line
+ readline.cursorTo(process.stdout, 0);
+ process.stdout.clearLine(0);
}
-
- if (streamingInterval) clearInterval(streamingInterval);
- log('info', "Completed streaming response from Claude API!");
-
+
+ log('info', `Completed streaming response from Claude API! (Attempt ${modelAttempts})`);
additionalInformation = responseText.trim();
- } catch (claudeError) {
- throw new Error(`Claude API error: ${claudeError.message}`);
}
- }
-
- // Validate the generated information
- if (!additionalInformation || additionalInformation.trim() === '') {
- throw new Error('Received empty response from AI. Unable to generate additional information.');
- }
-
- // Create timestamp
- const currentDate = new Date();
- const timestamp = currentDate.toISOString();
-
- // Format the additional information with timestamp
- const formattedInformation = `\n\n\n${additionalInformation}\n`;
-
- // Append to subtask details and description
- if (subtask.details) {
- subtask.details += formattedInformation;
- } else {
- subtask.details = `${formattedInformation}`;
- }
-
- if (subtask.description) {
- // Only append to description if it makes sense (for shorter updates)
- if (additionalInformation.length < 200) {
- subtask.description += ` [Updated: ${currentDate.toLocaleDateString()}]`;
+
+ // Success - break the loop
+ if (additionalInformation) {
+ log('info', `Successfully generated information using ${modelType} on attempt ${modelAttempts}.`);
+ break;
+ } else {
+ // Handle case where AI gave empty response without erroring
+ log('warn', `AI (${modelType}) returned empty response on attempt ${modelAttempts}.`);
+ if (isLastAttempt) {
+ throw new Error('AI returned empty response after maximum attempts.');
+ }
+ // Allow loop to continue to try another model/attempt if possible
}
- }
-
- // Update the subtask in the parent task
- const subtaskIndex = parentTask.subtasks.findIndex(st => st.id === subtaskIdNum);
- if (subtaskIndex !== -1) {
- parentTask.subtasks[subtaskIndex] = subtask;
- } else {
- throw new Error(`Subtask with ID ${subtaskId} not found in parent task's subtasks array.`);
- }
-
- // Update the parent task in the original data
- const parentIndex = data.tasks.findIndex(t => t.id === parentId);
- if (parentIndex !== -1) {
- data.tasks[parentIndex] = parentTask;
- } else {
- throw new Error(`Parent task with ID ${parentId} not found in tasks array.`);
- }
-
- // Write the updated tasks to the file
- writeJSON(tasksPath, data);
-
- log('success', `Successfully updated subtask ${subtaskId}`);
-
- // Generate individual task files
- await generateTaskFiles(tasksPath, path.dirname(tasksPath));
-
- console.log(boxen(
- chalk.green(`Successfully updated subtask #${subtaskId}`) + '\n\n' +
- chalk.white.bold('Title:') + ' ' + subtask.title + '\n\n' +
- chalk.white.bold('Information Added:') + '\n' +
- chalk.white(truncate(additionalInformation, 300, true)),
- { padding: 1, borderColor: 'green', borderStyle: 'round' }
- ));
-
- // Return the updated subtask for testing purposes
- return subtask;
- } finally {
- stopLoadingIndicator(loadingIndicator);
+
+ } catch (modelError) {
+ const failedModel = modelType || (modelError.modelType || 'unknown model');
+ log('warn', `Attempt ${modelAttempts} failed using ${failedModel}: ${modelError.message}`);
+
+ // --- More robust overload check ---
+ let isOverload = false;
+ // Check 1: SDK specific property (common pattern)
+ if (modelError.type === 'overloaded_error') {
+ isOverload = true;
+ }
+ // Check 2: Check nested error property (as originally intended)
+ else if (modelError.error?.type === 'overloaded_error') {
+ isOverload = true;
+ }
+ // Check 3: Check status code if available (e.g., 429 Too Many Requests or 529 Overloaded)
+ else if (modelError.status === 429 || modelError.status === 529) {
+ isOverload = true;
+ }
+ // Check 4: Check the message string itself (less reliable)
+ else if (modelError.message?.toLowerCase().includes('overloaded')) {
+ isOverload = true;
+ }
+ // --- End robust check ---
+
+ if (isOverload) { // Use the result of the check
+ claudeOverloaded = true; // Mark Claude as overloaded for the *next* potential attempt
+ if (!isLastAttempt) {
+ log('info', 'Claude overloaded. Will attempt fallback model if available.');
+ // Stop the current indicator before continuing
+ if (loadingIndicator) {
+ stopLoadingIndicator(loadingIndicator);
+ loadingIndicator = null; // Reset indicator
+ }
+ continue; // Go to next iteration of the while loop to try fallback
+ } else {
+ // It was the last attempt, and it failed due to overload
+ log('error', `Overload error on final attempt (${modelAttempts}/${maxModelAttempts}). No fallback possible.`);
+ // Let the error be thrown after the loop finishes, as additionalInformation will be empty.
+ // We don't throw immediately here, let the loop exit and the check after the loop handle it.
+ } // <<<< ADD THIS CLOSING BRACE
+ } else { // Error was NOT an overload
+ // If it's not an overload, throw it immediately to be caught by the outer catch.
+ log('error', `Non-overload error on attempt ${modelAttempts}: ${modelError.message}`);
+ throw modelError; // Re-throw non-overload errors immediately.
+ }
+ } // End inner catch
+ } // End while loop
+
+ // If loop finished without getting information
+ if (!additionalInformation) {
+ console.log('>>> DEBUG: additionalInformation is falsy! Value:', additionalInformation); // <<< ADD THIS
+ throw new Error('Failed to generate additional information after all attempts.');
}
+
+ console.log('>>> DEBUG: Got additionalInformation:', additionalInformation.substring(0, 50) + '...'); // <<< ADD THIS
+
+ // Create timestamp
+ const currentDate = new Date();
+ const timestamp = currentDate.toISOString();
+
+ // Format the additional information with timestamp
+ const formattedInformation = `\n\n\n${additionalInformation}\n`;
+ console.log('>>> DEBUG: formattedInformation:', formattedInformation.substring(0, 70) + '...'); // <<< ADD THIS
+
+ // Append to subtask details and description
+ console.log('>>> DEBUG: Subtask details BEFORE append:', subtask.details); // <<< ADD THIS
+ if (subtask.details) {
+ subtask.details += formattedInformation;
+ } else {
+ subtask.details = `${formattedInformation}`;
+ }
+ console.log('>>> DEBUG: Subtask details AFTER append:', subtask.details); // <<< ADD THIS
+
+
+ if (subtask.description) {
+ // Only append to description if it makes sense (for shorter updates)
+ if (additionalInformation.length < 200) {
+ console.log('>>> DEBUG: Subtask description BEFORE append:', subtask.description); // <<< ADD THIS
+ subtask.description += ` [Updated: ${currentDate.toLocaleDateString()}]`;
+ console.log('>>> DEBUG: Subtask description AFTER append:', subtask.description); // <<< ADD THIS
+ }
+ }
+
+ // Update the subtask in the parent task (add log before write)
+ // ... index finding logic ...
+ console.log('>>> DEBUG: About to call writeJSON with updated data...'); // <<< ADD THIS
+ // Write the updated tasks to the file
+ writeJSON(tasksPath, data);
+ console.log('>>> DEBUG: writeJSON call completed.'); // <<< ADD THIS
+
+
+ log('success', `Successfully updated subtask ${subtaskId}`);
+
+ // Generate individual task files
+ await generateTaskFiles(tasksPath, path.dirname(tasksPath)); // <<< Maybe log after this too
+
+ // Stop indicator *before* final console output
+ stopLoadingIndicator(loadingIndicator);
+ loadingIndicator = null;
+
+ console.log(boxen(
+ chalk.green(`Successfully updated subtask #${subtaskId}`) + '\n\n' +
+ chalk.white.bold('Title:') + ' ' + subtask.title + '\n\n' +
+ chalk.white.bold('Information Added:') + '\n' +
+ chalk.white(truncate(additionalInformation, 300, true)),
+ { padding: 1, borderColor: 'green', borderStyle: 'round' }
+ ));
+
+ return subtask;
+
} catch (error) {
+ // Outer catch block handles final errors after loop/attempts
+ stopLoadingIndicator(loadingIndicator); // Ensure indicator is stopped on error
+ loadingIndicator = null;
log('error', `Error updating subtask: ${error.message}`);
console.error(chalk.red(`Error: ${error.message}`));
-
- // Provide more helpful error messages for common issues
- if (error.message.includes('ANTHROPIC_API_KEY')) {
- console.log(chalk.yellow('\nTo fix this issue, set your Anthropic API key:'));
- console.log(' export ANTHROPIC_API_KEY=your_api_key_here');
- } else if (error.message.includes('PERPLEXITY_API_KEY')) {
- console.log(chalk.yellow('\nTo fix this issue:'));
- console.log(' 1. Set your Perplexity API key: export PERPLEXITY_API_KEY=your_api_key_here');
- console.log(' 2. Or run without the research flag: task-master update-subtask --id= --prompt="..."');
- } else if (error.message.includes('not found')) {
- console.log(chalk.yellow('\nTo fix this issue:'));
- console.log(' 1. Run task-master list --with-subtasks to see all available subtask IDs');
- console.log(' 2. Use a valid subtask ID with the --id parameter in format "parentId.subtaskId"');
- }
-
+
+ // ... (existing helpful error message logic based on error type) ...
+ if (error.message?.includes('ANTHROPIC_API_KEY')) {
+ console.log(chalk.yellow('\nTo fix this issue, set your Anthropic API key:'));
+ console.log(' export ANTHROPIC_API_KEY=your_api_key_here');
+ } else if (error.message?.includes('PERPLEXITY_API_KEY')) {
+ console.log(chalk.yellow('\nTo fix this issue:'));
+ console.log(' 1. Set your Perplexity API key: export PERPLEXITY_API_KEY=your_api_key_here');
+ console.log(' 2. Or run without the research flag: task-master update-subtask --id= --prompt=\"...\"');
+ } else if (error.message?.includes('overloaded')) { // Catch final overload error
+ console.log(chalk.yellow('\nAI model overloaded, and fallback failed or was unavailable:'));
+ console.log(' 1. Try again in a few minutes.');
+ console.log(' 2. Ensure PERPLEXITY_API_KEY is set for fallback.');
+ console.log(' 3. Consider breaking your prompt into smaller updates.');
+ } else if (error.message?.includes('not found')) {
+ console.log(chalk.yellow('\nTo fix this issue:'));
+ console.log(' 1. Run task-master list --with-subtasks to see all available subtask IDs');
+ console.log(' 2. Use a valid subtask ID with the --id parameter in format \"parentId.subtaskId\"');
+ } else if (error.message?.includes('empty response from AI')) {
+ console.log(chalk.yellow('\nThe AI model returned an empty response. This might be due to the prompt or API issues. Try rephrasing or trying again later.'));
+ }
+
if (CONFIG.debug) {
console.error(error);
}
-
+
return null;
+ } finally {
+ // Final cleanup check for the indicator, although it should be stopped by now
+ if (loadingIndicator) {
+ stopLoadingIndicator(loadingIndicator);
+ }
}
}
diff --git a/scripts/modules/task-manager.js (lines 3036-3084) b/scripts/modules/task-manager.js (lines 3036-3084)
new file mode 100644
index 00000000..b9b90bb2
--- /dev/null
+++ b/scripts/modules/task-manager.js (lines 3036-3084)
@@ -0,0 +1,32 @@
+async function updateSubtaskById(tasksPath, subtaskId, prompt, useResearch = false) {
+ let loadingIndicator = null;
+ try {
+ log('info', `Updating subtask ${subtaskId} with prompt: "${prompt}"`);
+
+ // Validate subtask ID format
+ if (!subtaskId || typeof subtaskId !== 'string' || !subtaskId.includes('.')) {
+ throw new Error(`Invalid subtask ID format: ${subtaskId}. Subtask ID must be in format "parentId.subtaskId"`);
+ }
+
+ // Validate prompt
+ if (!prompt || typeof prompt !== 'string' || prompt.trim() === '') {
+ throw new Error('Prompt cannot be empty. Please provide context for the subtask update.');
+ }
+
+ // Prepare for fallback handling
+ let claudeOverloaded = false;
+
+ // Validate tasks file exists
+ if (!fs.existsSync(tasksPath)) {
+ throw new Error(`Tasks file not found at path: ${tasksPath}`);
+ }
+
+ // Read the tasks file
+ const data = readJSON(tasksPath);
+ // ... rest of the function
+ } catch (error) {
+ // Handle errors
+ console.error(`Error updating subtask: ${error.message}`);
+ throw error;
+ }
+}
\ No newline at end of file
diff --git a/scripts/modules/ui.js b/scripts/modules/ui.js
index c541b2ff..728e56d8 100644
--- a/scripts/modules/ui.js
+++ b/scripts/modules/ui.js
@@ -677,6 +677,15 @@ async function displayTaskById(tasksPath, taskId) {
console.log(taskTable.toString());
+ // Show details if they exist for subtasks
+ if (task.details && task.details.trim().length > 0) {
+ console.log(boxen(
+ chalk.white.bold('Implementation Details:') + '\n\n' +
+ task.details,
+ { padding: { top: 0, bottom: 0, left: 1, right: 1 }, borderColor: 'cyan', borderStyle: 'round', margin: { top: 1, bottom: 0 } }
+ ));
+ }
+
// Show action suggestions for subtask
console.log(boxen(
chalk.white.bold('Suggested Actions:') + '\n' +
diff --git a/tasks/task_023.txt b/tasks/task_023.txt
index eb4ad391..5842d3c0 100644
--- a/tasks/task_023.txt
+++ b/tasks/task_023.txt
@@ -226,12 +226,69 @@ Testing approach:
### Dependencies: 23.13
### Description: Refactor the MCP server implementation to use direct Task Master function imports instead of the current CLI-based execution using child_process.spawnSync. This will improve performance, reliability, and enable better error handling.
### Details:
-1. Create a new module to import and expose Task Master core functions directly
-2. Modify tools/utils.js to remove executeTaskMasterCommand and replace with direct function calls
-3. Update each tool implementation (listTasks.js, showTask.js, etc.) to use the direct function imports
-4. Implement proper error handling with try/catch blocks and FastMCP's MCPError
-5. Add unit tests to verify the function imports work correctly
-6. Test performance improvements by comparing response times between CLI and function import approaches
+
+
+
+```
+# Refactoring Strategy for Direct Function Imports
+
+## Core Approach
+1. Create a clear separation between data retrieval/processing and presentation logic
+2. Modify function signatures to accept `outputFormat` parameter ('cli'|'json', default: 'cli')
+3. Implement early returns for JSON format to bypass CLI-specific code
+
+## Implementation Details for `listTasks`
+```javascript
+function listTasks(tasksPath, statusFilter, withSubtasks = false, outputFormat = 'cli') {
+ try {
+ // Existing data retrieval logic
+ const filteredTasks = /* ... */;
+
+ // Early return for JSON format
+ if (outputFormat === 'json') return filteredTasks;
+
+ // Existing CLI output logic
+ } catch (error) {
+ if (outputFormat === 'json') {
+ throw {
+ code: 'TASK_LIST_ERROR',
+ message: error.message,
+ details: error.stack
+ };
+ } else {
+ console.error(error);
+ process.exit(1);
+ }
+ }
+}
+```
+
+## Testing Strategy
+- Create integration tests in `tests/integration/mcp-server/`
+- Use FastMCP InMemoryTransport for direct client-server testing
+- Test both JSON and CLI output formats
+- Verify structure consistency with schema validation
+
+## Additional Considerations
+- Update JSDoc comments to document new parameters and return types
+- Ensure backward compatibility with default CLI behavior
+- Add JSON schema validation for consistent output structure
+- Apply similar pattern to other core functions (expandTask, updateTaskById, etc.)
+
+## Error Handling Improvements
+- Standardize error format for JSON returns:
+```javascript
+{
+ code: 'ERROR_CODE',
+ message: 'Human-readable message',
+ details: {}, // Additional context when available
+ stack: process.env.NODE_ENV === 'development' ? error.stack : undefined
+}
+```
+- Enrich JSON errors with error codes and debug info
+- Ensure validation failures return proper objects in JSON mode
+```
+
## 9. Implement Context Management and Caching Mechanisms [deferred]
### Dependencies: 23.1
diff --git a/tasks/tasks.json b/tasks/tasks.json
index e404d3b9..340205bd 100644
--- a/tasks/tasks.json
+++ b/tasks/tasks.json
@@ -1399,7 +1399,7 @@
"dependencies": [
"23.13"
],
- "details": "1. Create a new module to import and expose Task Master core functions directly\n2. Modify tools/utils.js to remove executeTaskMasterCommand and replace with direct function calls\n3. Update each tool implementation (listTasks.js, showTask.js, etc.) to use the direct function imports\n4. Implement proper error handling with try/catch blocks and FastMCP's MCPError\n5. Add unit tests to verify the function imports work correctly\n6. Test performance improvements by comparing response times between CLI and function import approaches",
+ "details": "\n\n\n```\n# Refactoring Strategy for Direct Function Imports\n\n## Core Approach\n1. Create a clear separation between data retrieval/processing and presentation logic\n2. Modify function signatures to accept `outputFormat` parameter ('cli'|'json', default: 'cli')\n3. Implement early returns for JSON format to bypass CLI-specific code\n\n## Implementation Details for `listTasks`\n```javascript\nfunction listTasks(tasksPath, statusFilter, withSubtasks = false, outputFormat = 'cli') {\n try {\n // Existing data retrieval logic\n const filteredTasks = /* ... */;\n \n // Early return for JSON format\n if (outputFormat === 'json') return filteredTasks;\n \n // Existing CLI output logic\n } catch (error) {\n if (outputFormat === 'json') {\n throw {\n code: 'TASK_LIST_ERROR',\n message: error.message,\n details: error.stack\n };\n } else {\n console.error(error);\n process.exit(1);\n }\n }\n}\n```\n\n## Testing Strategy\n- Create integration tests in `tests/integration/mcp-server/`\n- Use FastMCP InMemoryTransport for direct client-server testing\n- Test both JSON and CLI output formats\n- Verify structure consistency with schema validation\n\n## Additional Considerations\n- Update JSDoc comments to document new parameters and return types\n- Ensure backward compatibility with default CLI behavior\n- Add JSON schema validation for consistent output structure\n- Apply similar pattern to other core functions (expandTask, updateTaskById, etc.)\n\n## Error Handling Improvements\n- Standardize error format for JSON returns:\n```javascript\n{\n code: 'ERROR_CODE',\n message: 'Human-readable message',\n details: {}, // Additional context when available\n stack: process.env.NODE_ENV === 'development' ? error.stack : undefined\n}\n```\n- Enrich JSON errors with error codes and debug info\n- Ensure validation failures return proper objects in JSON mode\n```\n",
"status": "in-progress",
"parentTaskId": 23
},
diff --git a/tests/unit/ai-services.test.js b/tests/unit/ai-services.test.js
index c3e8c112..232b93bc 100644
--- a/tests/unit/ai-services.test.js
+++ b/tests/unit/ai-services.test.js
@@ -311,10 +311,17 @@ These subtasks will help you implement the parent task efficiently.`;
}
};
+ // Mock process.env to include PERPLEXITY_API_KEY
+ const originalEnv = process.env;
+ process.env = { ...originalEnv, PERPLEXITY_API_KEY: 'test-key' };
+
const result = handleClaudeError(error);
- expect(result).toContain('Claude is currently experiencing high demand');
- expect(result).toContain('overloaded');
+ // Restore original env
+ process.env = originalEnv;
+
+ expect(result).toContain('Claude is currently overloaded');
+ expect(result).toContain('fall back to Perplexity AI');
});
test('should handle rate_limit_error type', () => {
diff --git a/tests/unit/task-manager.test.js b/tests/unit/task-manager.test.js
index 043e6265..1db520df 100644
--- a/tests/unit/task-manager.test.js
+++ b/tests/unit/task-manager.test.js
@@ -24,6 +24,7 @@ const mockLog = jest.fn();
const mockIsTaskDependentOn = jest.fn().mockReturnValue(false);
const mockCreate = jest.fn(); // Mock for Anthropic messages.create
const mockChatCompletionsCreate = jest.fn(); // Mock for Perplexity chat.completions.create
+const mockGetAvailableAIModel = jest.fn(); // <<<<< Added mock function
// Mock fs module
jest.mock('fs', () => ({
@@ -43,7 +44,12 @@ jest.mock('path', () => ({
jest.mock('../../scripts/modules/ui.js', () => ({
formatDependenciesWithStatus: mockFormatDependenciesWithStatus,
displayBanner: jest.fn(),
- displayTaskList: mockDisplayTaskList
+ displayTaskList: mockDisplayTaskList,
+ startLoadingIndicator: jest.fn(() => ({ stop: jest.fn() })), // <<<<< Added mock
+ stopLoadingIndicator: jest.fn(), // <<<<< Added mock
+ createProgressBar: jest.fn(() => ' MOCK_PROGRESS_BAR '), // <<<<< Added mock (used by listTasks)
+ getStatusWithColor: jest.fn(status => status), // Basic mock for status
+ getComplexityWithColor: jest.fn(score => `Score: ${score}`), // Basic mock for complexity
}));
// Mock dependency-manager
@@ -56,13 +62,31 @@ jest.mock('../../scripts/modules/dependency-manager.js', () => ({
jest.mock('../../scripts/modules/utils.js', () => ({
writeJSON: mockWriteJSON,
readJSON: mockReadJSON,
- log: mockLog
+ log: mockLog,
+ CONFIG: { // <<<<< Added CONFIG mock
+ model: 'mock-claude-model',
+ maxTokens: 4000,
+ temperature: 0.7,
+ debug: false,
+ defaultSubtasks: 3,
+ // Add other necessary CONFIG properties if needed
+ },
+ sanitizePrompt: jest.fn(prompt => prompt), // <<<<< Added mock
+ findTaskById: jest.fn((tasks, id) => tasks.find(t => t.id === parseInt(id))), // <<<<< Added mock
+ readComplexityReport: jest.fn(), // <<<<< Added mock
+ findTaskInComplexityReport: jest.fn(), // <<<<< Added mock
+ truncate: jest.fn((str, len) => str.slice(0, len)), // <<<<< Added mock
}));
-// Mock AI services - This is the correct way to mock the module
+// Mock AI services - Update this mock
jest.mock('../../scripts/modules/ai-services.js', () => ({
callClaude: mockCallClaude,
- callPerplexity: mockCallPerplexity
+ callPerplexity: mockCallPerplexity,
+ generateSubtasks: jest.fn(), // <<<<< Add other functions as needed
+ generateSubtasksWithPerplexity: jest.fn(), // <<<<< Add other functions as needed
+ generateComplexityAnalysisPrompt: jest.fn(), // <<<<< Add other functions as needed
+ getAvailableAIModel: mockGetAvailableAIModel, // <<<<< Use the new mock function
+ handleClaudeError: jest.fn(), // <<<<< Add other functions as needed
}));
// Mock Anthropic SDK
@@ -2397,4 +2421,241 @@ describe.skip('updateSubtaskById function', () => {
expect(closingMatch).toBeTruthy();
expect(openingMatch[1]).toBe(closingMatch[1]);
});
-});
\ No newline at end of file
+
+ let mockTasksData;
+ const tasksPath = 'test-tasks.json';
+ const outputDir = 'test-tasks-output'; // Assuming generateTaskFiles needs this
+
+ beforeEach(() => {
+ // Reset mocks before each test
+ jest.clearAllMocks();
+
+ // Reset mock data (deep copy to avoid test interference)
+ mockTasksData = JSON.parse(JSON.stringify({
+ tasks: [
+ {
+ id: 1,
+ title: 'Parent Task 1',
+ status: 'pending',
+ dependencies: [],
+ priority: 'medium',
+ description: 'Parent description',
+ details: 'Parent details',
+ testStrategy: 'Parent tests',
+ subtasks: [
+ {
+ id: 1,
+ title: 'Subtask 1.1',
+ description: 'Subtask 1.1 description',
+ details: 'Initial subtask details.',
+ status: 'pending',
+ dependencies: [],
+ },
+ {
+ id: 2,
+ title: 'Subtask 1.2',
+ description: 'Subtask 1.2 description',
+ details: 'Initial subtask details for 1.2.',
+ status: 'done', // Completed subtask
+ dependencies: [],
+ }
+ ]
+ }
+ ]
+ }));
+
+ // Default mock behaviors
+ mockReadJSON.mockReturnValue(mockTasksData);
+ mockDirname.mockReturnValue(outputDir); // Mock path.dirname needed by generateTaskFiles
+ mockGenerateTaskFiles.mockResolvedValue(); // Assume generateTaskFiles succeeds
+ });
+
+ test('should successfully update subtask using Claude (non-research)', async () => {
+ const subtaskIdToUpdate = '1.1'; // Valid format
+ const updatePrompt = 'Add more technical details about API integration.'; // Non-empty prompt
+ const expectedClaudeResponse = 'Here are the API integration details you requested.';
+
+ // --- Arrange ---
+ // **Explicitly reset and configure mocks for this test**
+ jest.clearAllMocks(); // Ensure clean state
+
+ // Configure mocks used *before* readJSON
+ mockExistsSync.mockReturnValue(true); // Ensure file is found
+ mockGetAvailableAIModel.mockReturnValue({ // Ensure this returns the correct structure
+ type: 'claude',
+ client: { messages: { create: mockCreate } }
+ });
+
+ // Configure mocks used *after* readJSON (as before)
+ mockReadJSON.mockReturnValue(mockTasksData); // Ensure readJSON returns valid data
+ async function* createMockStream() {
+ yield { type: 'content_block_delta', delta: { text: expectedClaudeResponse.substring(0, 10) } };
+ yield { type: 'content_block_delta', delta: { text: expectedClaudeResponse.substring(10) } };
+ yield { type: 'message_stop' };
+ }
+ mockCreate.mockResolvedValue(createMockStream());
+ mockDirname.mockReturnValue(outputDir);
+ mockGenerateTaskFiles.mockResolvedValue();
+
+ // --- Act ---
+ const updatedSubtask = await taskManager.updateSubtaskById(tasksPath, subtaskIdToUpdate, updatePrompt, false);
+
+ // --- Assert ---
+ // **Add an assertion right at the start to check if readJSON was called**
+ expect(mockReadJSON).toHaveBeenCalledWith(tasksPath); // <<< Let's see if this passes now
+
+ // ... (rest of the assertions as before) ...
+ expect(mockGetAvailableAIModel).toHaveBeenCalledWith({ claudeOverloaded: false, requiresResearch: false });
+ expect(mockCreate).toHaveBeenCalledTimes(1);
+ // ... etc ...
+ });
+
+ test('should successfully update subtask using Perplexity (research)', async () => {
+ const subtaskIdToUpdate = '1.1';
+ const updatePrompt = 'Research best practices for this subtask.';
+ const expectedPerplexityResponse = 'Based on research, here are the best practices...';
+ const perplexityModelName = 'mock-perplexity-model'; // Define a mock model name
+
+ // --- Arrange ---
+ // Mock environment variable for Perplexity model if needed by CONFIG/logic
+ process.env.PERPLEXITY_MODEL = perplexityModelName;
+
+ // Mock getAvailableAIModel to return Perplexity client when research is required
+ mockGetAvailableAIModel.mockReturnValue({
+ type: 'perplexity',
+ client: { chat: { completions: { create: mockChatCompletionsCreate } } } // Match the mocked structure
+ });
+
+ // Mock Perplexity's response
+ mockChatCompletionsCreate.mockResolvedValue({
+ choices: [{ message: { content: expectedPerplexityResponse } }]
+ });
+
+ // --- Act ---
+ const updatedSubtask = await taskManager.updateSubtaskById(tasksPath, subtaskIdToUpdate, updatePrompt, true); // useResearch = true
+
+ // --- Assert ---
+ expect(mockReadJSON).toHaveBeenCalledWith(tasksPath);
+ // Verify getAvailableAIModel was called correctly for research
+ expect(mockGetAvailableAIModel).toHaveBeenCalledWith({ claudeOverloaded: false, requiresResearch: true });
+ expect(mockChatCompletionsCreate).toHaveBeenCalledTimes(1);
+
+ // Verify Perplexity API call parameters
+ expect(mockChatCompletionsCreate).toHaveBeenCalledWith(expect.objectContaining({
+ model: perplexityModelName, // Check the correct model is used
+ temperature: 0.7, // From CONFIG mock
+ max_tokens: 4000, // From CONFIG mock
+ messages: expect.arrayContaining([
+ expect.objectContaining({ role: 'system', content: expect.any(String) }),
+ expect.objectContaining({
+ role: 'user',
+ content: expect.stringContaining(updatePrompt) // Check prompt is included
+ })
+ ])
+ }));
+
+ // Verify subtask data was updated
+ const writtenData = mockWriteJSON.mock.calls[0][1]; // Get data passed to writeJSON
+ const parentTask = writtenData.tasks.find(t => t.id === 1);
+ const targetSubtask = parentTask.subtasks.find(st => st.id === 1);
+
+ expect(targetSubtask.details).toContain(expectedPerplexityResponse);
+ expect(targetSubtask.details).toMatch(//); // Check for timestamp tag
+ expect(targetSubtask.description).toMatch(/\[Updated: .*]/); // Check description update
+
+ // Verify writeJSON and generateTaskFiles were called
+ expect(mockWriteJSON).toHaveBeenCalledWith(tasksPath, writtenData);
+ expect(mockGenerateTaskFiles).toHaveBeenCalledWith(tasksPath, outputDir);
+
+ // Verify the function returned the updated subtask
+ expect(updatedSubtask).toBeDefined();
+ expect(updatedSubtask.id).toBe(1);
+ expect(updatedSubtask.parentTaskId).toBe(1);
+ expect(updatedSubtask.details).toContain(expectedPerplexityResponse);
+
+ // Clean up env var if set
+ delete process.env.PERPLEXITY_MODEL;
+ });
+
+ test('should fall back to Perplexity if Claude is overloaded', async () => {
+ const subtaskIdToUpdate = '1.1';
+ const updatePrompt = 'Add details, trying Claude first.';
+ const expectedPerplexityResponse = 'Perplexity provided these details as fallback.';
+ const perplexityModelName = 'mock-perplexity-model-fallback';
+
+ // --- Arrange ---
+ // Mock environment variable for Perplexity model
+ process.env.PERPLEXITY_MODEL = perplexityModelName;
+
+ // Mock getAvailableAIModel: Return Claude first, then Perplexity
+ mockGetAvailableAIModel
+ .mockReturnValueOnce({ // First call: Return Claude
+ type: 'claude',
+ client: { messages: { create: mockCreate } }
+ })
+ .mockReturnValueOnce({ // Second call: Return Perplexity (after overload)
+ type: 'perplexity',
+ client: { chat: { completions: { create: mockChatCompletionsCreate } } }
+ });
+
+ // Mock Claude to throw an overload error
+ const overloadError = new Error('Claude API is overloaded.');
+ overloadError.type = 'overloaded_error'; // Match one of the specific checks
+ mockCreate.mockRejectedValue(overloadError); // Simulate Claude failing
+
+ // Mock Perplexity's successful response
+ mockChatCompletionsCreate.mockResolvedValue({
+ choices: [{ message: { content: expectedPerplexityResponse } }]
+ });
+
+ // --- Act ---
+ const updatedSubtask = await taskManager.updateSubtaskById(tasksPath, subtaskIdToUpdate, updatePrompt, false); // Start with useResearch = false
+
+ // --- Assert ---
+ expect(mockReadJSON).toHaveBeenCalledWith(tasksPath);
+
+ // Verify getAvailableAIModel calls
+ expect(mockGetAvailableAIModel).toHaveBeenCalledTimes(2);
+ expect(mockGetAvailableAIModel).toHaveBeenNthCalledWith(1, { claudeOverloaded: false, requiresResearch: false });
+ expect(mockGetAvailableAIModel).toHaveBeenNthCalledWith(2, { claudeOverloaded: true, requiresResearch: false }); // claudeOverloaded should now be true
+
+ // Verify Claude was attempted and failed
+ expect(mockCreate).toHaveBeenCalledTimes(1);
+ // Verify Perplexity was called as fallback
+ expect(mockChatCompletionsCreate).toHaveBeenCalledTimes(1);
+
+ // Verify Perplexity API call parameters
+ expect(mockChatCompletionsCreate).toHaveBeenCalledWith(expect.objectContaining({
+ model: perplexityModelName,
+ messages: expect.arrayContaining([
+ expect.objectContaining({
+ role: 'user',
+ content: expect.stringContaining(updatePrompt)
+ })
+ ])
+ }));
+
+ // Verify subtask data was updated with Perplexity's response
+ const writtenData = mockWriteJSON.mock.calls[0][1];
+ const parentTask = writtenData.tasks.find(t => t.id === 1);
+ const targetSubtask = parentTask.subtasks.find(st => st.id === 1);
+
+ expect(targetSubtask.details).toContain(expectedPerplexityResponse); // Should contain fallback response
+ expect(targetSubtask.details).toMatch(//);
+ expect(targetSubtask.description).toMatch(/\[Updated: .*]/);
+
+ // Verify writeJSON and generateTaskFiles were called
+ expect(mockWriteJSON).toHaveBeenCalledWith(tasksPath, writtenData);
+ expect(mockGenerateTaskFiles).toHaveBeenCalledWith(tasksPath, outputDir);
+
+ // Verify the function returned the updated subtask
+ expect(updatedSubtask).toBeDefined();
+ expect(updatedSubtask.details).toContain(expectedPerplexityResponse);
+
+ // Clean up env var if set
+ delete process.env.PERPLEXITY_MODEL;
+ });
+
+ // More tests will go here...
+
+});