feat(refactor): Finalize AI service migration and cleanup obsolete files

This commit completes the major refactoring initiative (Task 61) to migrate all AI-interacting task management functions to the unified service layer (`ai-services-unified.js`) and standardized configuration (`config-manager.js`).

Key Changes:

1.  **Refactor `update-task-by-id` & `update-subtask-by-id`:**

    *   Replaced direct AI client logic and config fetching with calls to `generateTextService`.

    *   Preserved original prompt logic while ensuring JSON output format is requested.

    *   Implemented robust manual JSON parsing and Zod validation for text-based AI responses.

    *   Corrected logger implementation (`logFn`/`isMCP`/`report` pattern) for both CLI and MCP contexts.

    *   Ensured correct passing of `session` context to the unified service.

    *   Refactored associated direct function wrappers (`updateTaskByIdDirect`, `updateSubtaskByIdDirect`) to remove AI client initialization and call core logic appropriately.

2.  **CLI Environment Loading:**

    *   Added `dotenv.config()` to `scripts/dev.js` to ensure consistent loading of the `.env` file for CLI operations.

3.  **Obsolete Code Removal:**

    *   Deleted unused helper files:

        *   `scripts/modules/task-manager/get-subtasks-from-ai.js`

        *   `scripts/modules/task-manager/generate-subtask-prompt.js`

        *   `scripts/modules/ai-services.js`

        *   `scripts/modules/ai-client-factory.js`

        *   `mcp-server/src/core/utils/ai-client-utils.js`

    *   Removed corresponding imports/exports from `scripts/modules/task-manager.js` and `mcp-server/src/core/task-master-core.js`.

4.  **Verification:**

    *   Successfully tested `update-task` and `update-subtask` via both CLI and MCP after refactoring.

5.  **Task Management:**

    *   Marked subtasks 61.38, 61.39, 61.40, 61.41, and 61.33 as 'done'.

    *   Includes other task content/status updates as reflected in the diff.

This completes the migration of core AI features to the new architecture, enhancing maintainability and flexibility.
This commit is contained in:
Eyal Toledano
2025-04-25 13:24:15 -04:00
parent 3721359782
commit afb47584bd
17 changed files with 496 additions and 6375 deletions

View File

@@ -3,8 +3,15 @@ import path from 'path';
import chalk from 'chalk';
import boxen from 'boxen';
import Table from 'cli-table3';
import { z } from 'zod'; // Keep Zod for post-parse validation
import { log, readJSON, writeJSON, truncate, isSilentMode } from '../utils.js';
import {
log as consoleLog,
readJSON,
writeJSON,
truncate,
isSilentMode
} from '../utils.js';
import {
getStatusWithColor,
@@ -12,111 +19,205 @@ import {
stopLoadingIndicator
} from '../ui.js';
import { _handleAnthropicStream } from '../ai-services.js';
import { generateTextService } from '../ai-services-unified.js';
import {
getDebugFlag,
getMainModelId,
getMainMaxTokens,
getMainTemperature,
getResearchModelId,
getResearchMaxTokens,
getResearchTemperature,
isApiKeySet
isApiKeySet // Keep this check
} from '../config-manager.js';
import generateTaskFiles from './generate-task-files.js';
// Zod schema for post-parsing validation of the updated task object
const updatedTaskSchema = z
.object({
id: z.number().int(),
title: z.string(), // Title should be preserved, but check it exists
description: z.string(),
status: z.string(),
dependencies: z.array(z.union([z.number().int(), z.string()])),
priority: z.string().optional(),
details: z.string().optional(),
testStrategy: z.string().optional(),
subtasks: z.array(z.any()).optional()
})
.strip(); // Allows parsing even if AI adds extra fields, but validation focuses on schema
/**
* Update a single task by ID
* Parses a single updated task object from AI's text response.
* @param {string} text - Response text from AI.
* @param {number} expectedTaskId - The ID of the task expected.
* @param {Function | Object} logFn - Logging function or MCP logger.
* @param {boolean} isMCP - Flag indicating MCP context.
* @returns {Object} Parsed and validated task object.
* @throws {Error} If parsing or validation fails.
*/
function parseUpdatedTaskFromText(text, expectedTaskId, logFn, isMCP) {
// Report helper consistent with the established pattern
const report = (level, ...args) => {
if (isMCP) {
if (typeof logFn[level] === 'function') logFn[level](...args);
else logFn.info(...args);
} else if (!isSilentMode()) {
logFn(level, ...args);
}
};
report(
'info',
'Attempting to parse updated task object from text response...'
);
if (!text || text.trim() === '')
throw new Error('AI response text is empty.');
let cleanedResponse = text.trim();
const originalResponseForDebug = cleanedResponse;
// Extract from Markdown code block first
const codeBlockMatch = cleanedResponse.match(
/```(?:json)?\s*([\s\S]*?)\s*```/
);
if (codeBlockMatch) {
cleanedResponse = codeBlockMatch[1].trim();
report('info', 'Extracted JSON content from Markdown code block.');
} else {
// If no code block, find first '{' and last '}' for the object
const firstBrace = cleanedResponse.indexOf('{');
const lastBrace = cleanedResponse.lastIndexOf('}');
if (firstBrace !== -1 && lastBrace > firstBrace) {
cleanedResponse = cleanedResponse.substring(firstBrace, lastBrace + 1);
report('info', 'Extracted content between first { and last }.');
} else {
report(
'warn',
'Response does not appear to contain a JSON object structure. Parsing raw response.'
);
}
}
let parsedTask;
try {
parsedTask = JSON.parse(cleanedResponse);
} catch (parseError) {
report('error', `Failed to parse JSON object: ${parseError.message}`);
report(
'error',
`Problematic JSON string (first 500 chars): ${cleanedResponse.substring(0, 500)}`
);
report(
'error',
`Original Raw Response (first 500 chars): ${originalResponseForDebug.substring(0, 500)}`
);
throw new Error(
`Failed to parse JSON response object: ${parseError.message}`
);
}
if (!parsedTask || typeof parsedTask !== 'object') {
report(
'error',
`Parsed content is not an object. Type: ${typeof parsedTask}`
);
report(
'error',
`Parsed content sample: ${JSON.stringify(parsedTask).substring(0, 200)}`
);
throw new Error('Parsed AI response is not a valid JSON object.');
}
// Validate the parsed task object using Zod
const validationResult = updatedTaskSchema.safeParse(parsedTask);
if (!validationResult.success) {
report('error', 'Parsed task object failed Zod validation.');
validationResult.error.errors.forEach((err) => {
report('error', ` - Field '${err.path.join('.')}': ${err.message}`);
});
throw new Error(
`AI response failed task structure validation: ${validationResult.error.message}`
);
}
// Final check: ensure ID matches expected ID (AI might hallucinate)
if (validationResult.data.id !== expectedTaskId) {
report(
'warn',
`AI returned task with ID ${validationResult.data.id}, but expected ${expectedTaskId}. Overwriting ID.`
);
validationResult.data.id = expectedTaskId; // Enforce correct ID
}
report('info', 'Successfully validated updated task structure.');
return validationResult.data; // Return the validated task data
}
/**
* Update a single task by ID using the unified AI service.
* @param {string} tasksPath - Path to the tasks.json file
* @param {number} taskId - Task ID to update
* @param {string} prompt - Prompt with new context
* @param {boolean} useResearch - Whether to use Perplexity AI for research
* @param {function} reportProgress - Function to report progress to MCP server (optional)
* @param {Object} mcpLog - MCP logger object (optional)
* @param {Object} session - Session object from MCP server (optional)
* @returns {Object} - Updated task data or null if task wasn't updated
* @param {boolean} [useResearch=false] - Whether to use the research AI role.
* @param {Object} context - Context object containing session and mcpLog.
* @param {Object} [context.session] - Session object from MCP server.
* @param {Object} [context.mcpLog] - MCP logger object.
* @param {string} [outputFormat='text'] - Output format ('text' or 'json').
* @returns {Promise<Object|null>} - Updated task data or null if task wasn't updated/found.
*/
async function updateTaskById(
tasksPath,
taskId,
prompt,
useResearch = false,
{ reportProgress, mcpLog, session } = {}
context = {},
outputFormat = 'text'
) {
// Determine output format based on mcpLog presence (simplification)
const outputFormat = mcpLog ? 'json' : 'text';
const { session, mcpLog } = context;
const logFn = mcpLog || consoleLog;
const isMCP = !!mcpLog;
// Create custom reporter that checks for MCP log and silent mode
const report = (message, level = 'info') => {
if (mcpLog) {
mcpLog[level](message);
} else if (!isSilentMode() && outputFormat === 'text') {
// Only log to console if not in silent mode and outputFormat is 'text'
log(level, message);
// Use report helper for logging
const report = (level, ...args) => {
if (isMCP) {
if (typeof logFn[level] === 'function') logFn[level](...args);
else logFn.info(...args);
} else if (!isSilentMode()) {
logFn(level, ...args);
}
};
try {
report(`Updating single task ${taskId} with prompt: "${prompt}"`, 'info');
report('info', `Updating single task ${taskId} with prompt: "${prompt}"`);
// Validate task ID is a positive integer
if (!Number.isInteger(taskId) || taskId <= 0) {
// --- Input Validations (Keep existing) ---
if (!Number.isInteger(taskId) || taskId <= 0)
throw new Error(
`Invalid task ID: ${taskId}. Task ID must be a positive integer.`
);
}
// Validate prompt
if (!prompt || typeof prompt !== 'string' || prompt.trim() === '') {
throw new Error(
'Prompt cannot be empty. Please provide context for the task update.'
);
}
// Validate research flag and API key
if (!prompt || typeof prompt !== 'string' || prompt.trim() === '')
throw new Error('Prompt cannot be empty.');
if (useResearch && !isApiKeySet('perplexity', session)) {
report(
'Perplexity AI research requested but API key is not set. Falling back to main AI.',
'warn'
'warn',
'Perplexity research requested but API key not set. Falling back.'
);
// Only show UI elements for text output (CLI)
if (outputFormat === 'text') {
if (outputFormat === 'text')
console.log(
chalk.yellow(
'Perplexity AI is not available (API key may be missing). Falling back to Claude AI.'
)
chalk.yellow('Perplexity AI not available. Falling back to main AI.')
);
}
useResearch = false;
}
if (!fs.existsSync(tasksPath))
throw new Error(`Tasks file not found: ${tasksPath}`);
// --- End Input Validations ---
// Validate tasks file exists
if (!fs.existsSync(tasksPath)) {
throw new Error(`Tasks file not found at path: ${tasksPath}`);
}
// Read the tasks file
// --- Task Loading and Status Check (Keep existing) ---
const data = readJSON(tasksPath);
if (!data || !data.tasks) {
throw new Error(
`No valid tasks found in ${tasksPath}. The file may be corrupted or have an invalid format.`
);
}
// Find the specific task to update
const taskToUpdate = data.tasks.find((task) => task.id === taskId);
if (!taskToUpdate) {
throw new Error(
`Task with ID ${taskId} not found. Please verify the task ID and try again.`
);
}
// Check if task is already completed
if (!data || !data.tasks)
throw new Error(`No valid tasks found in ${tasksPath}.`);
const taskIndex = data.tasks.findIndex((task) => task.id === taskId);
if (taskIndex === -1) throw new Error(`Task with ID ${taskId} not found.`);
const taskToUpdate = data.tasks[taskIndex];
if (taskToUpdate.status === 'done' || taskToUpdate.status === 'completed') {
report(
`Task ${taskId} is already marked as done and cannot be updated`,
'warn'
'warn',
`Task ${taskId} is already marked as done and cannot be updated`
);
// Only show warning box for text output (CLI)
@@ -142,8 +243,9 @@ async function updateTaskById(
}
return null;
}
// --- End Task Loading ---
// Only show UI elements for text output (CLI)
// --- Display Task Info (CLI Only - Keep existing) ---
if (outputFormat === 'text') {
// Show the task that will be updated
const table = new Table({
@@ -199,7 +301,7 @@ async function updateTaskById(
);
}
// Build the system prompt
// --- Build Prompts (Keep EXACT original prompts) ---
const systemPrompt = `You are an AI assistant helping to update a software development task based on new context.
You will be given a task and a prompt describing changes or new implementation details.
Your job is to update the task to reflect these changes, while preserving its basic structure.
@@ -219,464 +321,162 @@ Guidelines:
The changes described in the prompt should be thoughtfully applied to make the task more accurate and actionable.`;
const taskData = JSON.stringify(taskToUpdate, null, 2);
const taskDataString = JSON.stringify(taskToUpdate, null, 2); // Use original task data
const userPrompt = `Here is the task to update:\n${taskDataString}\n\nPlease update this task based on the following new context:\n${prompt}\n\nIMPORTANT: In the task JSON above, any subtasks with "status": "done" or "status": "completed" should be preserved exactly as is. Build your changes around these completed items.\n\nReturn only the updated task as a valid JSON object.`;
// --- End Build Prompts ---
// Initialize variables for model selection and fallback
let updatedTask;
let loadingIndicator = null;
let claudeOverloaded = false;
let modelAttempts = 0;
const maxModelAttempts = 2; // Try up to 2 models before giving up
// Only create initial loading indicator for text output (CLI)
if (outputFormat === 'text') {
loadingIndicator = startLoadingIndicator(
useResearch
? 'Updating task with Perplexity AI research...'
: 'Updating task with Claude AI...'
useResearch ? 'Updating task with research...' : 'Updating task...'
);
}
let responseText = '';
try {
// Import the getAvailableAIModel function
const { getAvailableAIModel } = await import('./ai-services.js');
// --- Call Unified AI Service (generateTextService) ---
const role = useResearch ? 'research' : 'main';
report('info', `Using AI service with role: ${role}`);
// Try different models with fallback
while (modelAttempts < maxModelAttempts && !updatedTask) {
modelAttempts++;
const isLastAttempt = modelAttempts >= maxModelAttempts;
let modelType = null;
try {
// Get the appropriate model based on current state
const result = getAvailableAIModel({
claudeOverloaded,
requiresResearch: useResearch
});
modelType = result.type;
const client = result.client;
report(
`Attempt ${modelAttempts}/${maxModelAttempts}: Updating task using ${modelType}`,
'info'
);
// Update loading indicator - only for text output
if (outputFormat === 'text') {
if (loadingIndicator) {
stopLoadingIndicator(loadingIndicator);
}
loadingIndicator = startLoadingIndicator(
`Attempt ${modelAttempts}: Using ${modelType.toUpperCase()}...`
);
}
if (modelType === 'perplexity') {
// Call Perplexity AI
const perplexityModel =
process.env.PERPLEXITY_MODEL ||
session?.env?.PERPLEXITY_MODEL ||
'sonar-pro';
const result = await client.chat.completions.create({
model: getResearchModelId(session),
messages: [
{
role: 'system',
content: `${systemPrompt}\n\nAdditionally, please research the latest best practices, implementation details, and considerations when updating this task. Use your online search capabilities to gather relevant information. Remember to strictly follow the guidelines about preserving completed subtasks and building upon what has already been done rather than modifying or replacing it.`
},
{
role: 'user',
content: `Here is the task to update:
${taskData}
Please update this task based on the following new context:
${prompt}
IMPORTANT: In the task JSON above, any subtasks with "status": "done" or "status": "completed" should be preserved exactly as is. Build your changes around these completed items.
Return only the updated task as a valid JSON object.`
}
],
temperature: getResearchTemperature(session),
max_tokens: getResearchMaxTokens(session)
});
const responseText = result.choices[0].message.content;
// Extract JSON from response
const jsonStart = responseText.indexOf('{');
const jsonEnd = responseText.lastIndexOf('}');
if (jsonStart === -1 || jsonEnd === -1) {
throw new Error(
`Could not find valid JSON object in ${modelType}'s response. The response may be malformed.`
);
}
const jsonText = responseText.substring(jsonStart, jsonEnd + 1);
try {
updatedTask = JSON.parse(jsonText);
} catch (parseError) {
throw new Error(
`Failed to parse ${modelType} response as JSON: ${parseError.message}\nResponse fragment: ${jsonText.substring(0, 100)}...`
);
}
} else {
// Call Claude to update the task with streaming
let responseText = '';
let streamingInterval = null;
try {
// Update loading indicator to show streaming progress - only for text output
if (outputFormat === 'text') {
let dotCount = 0;
const readline = await import('readline');
streamingInterval = setInterval(() => {
readline.cursorTo(process.stdout, 0);
process.stdout.write(
`Receiving streaming response from Claude${'.'.repeat(dotCount)}`
);
dotCount = (dotCount + 1) % 4;
}, 500);
}
// Use streaming API call
const stream = await client.messages.create({
model: getMainModelId(session),
max_tokens: getMainMaxTokens(session),
temperature: getMainTemperature(session),
system: systemPrompt,
messages: [
{
role: 'user',
content: `Here is the task to update:
${taskData}
Please update this task based on the following new context:
${prompt}
IMPORTANT: In the task JSON above, any subtasks with "status": "done" or "status": "completed" should be preserved exactly as is. Build your changes around these completed items.
Return only the updated task as a valid JSON object.`
}
],
stream: true
});
// Process the stream
for await (const chunk of stream) {
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
responseText += chunk.delta.text;
}
if (reportProgress) {
await reportProgress({
progress:
(responseText.length / getMainMaxTokens(session)) * 100
});
}
if (mcpLog) {
mcpLog.info(
`Progress: ${(responseText.length / getMainMaxTokens(session)) * 100}%`
);
}
}
if (streamingInterval) clearInterval(streamingInterval);
report(
`Completed streaming response from ${modelType} API (Attempt ${modelAttempts})`,
'info'
);
// Extract JSON from response
const jsonStart = responseText.indexOf('{');
const jsonEnd = responseText.lastIndexOf('}');
if (jsonStart === -1 || jsonEnd === -1) {
throw new Error(
`Could not find valid JSON object in ${modelType}'s response. The response may be malformed.`
);
}
const jsonText = responseText.substring(jsonStart, jsonEnd + 1);
try {
updatedTask = JSON.parse(jsonText);
} catch (parseError) {
throw new Error(
`Failed to parse ${modelType} response as JSON: ${parseError.message}\nResponse fragment: ${jsonText.substring(0, 100)}...`
);
}
} catch (streamError) {
if (streamingInterval) clearInterval(streamingInterval);
// Process stream errors explicitly
report(`Stream error: ${streamError.message}`, 'error');
// Check if this is an overload error
let isOverload = false;
// Check 1: SDK specific property
if (streamError.type === 'overloaded_error') {
isOverload = true;
}
// Check 2: Check nested error property
else if (streamError.error?.type === 'overloaded_error') {
isOverload = true;
}
// Check 3: Check status code
else if (
streamError.status === 429 ||
streamError.status === 529
) {
isOverload = true;
}
// Check 4: Check message string
else if (
streamError.message?.toLowerCase().includes('overloaded')
) {
isOverload = true;
}
if (isOverload) {
claudeOverloaded = true;
report(
'Claude overloaded. Will attempt fallback model if available.',
'warn'
);
// Let the loop continue to try the next model
throw new Error('Claude overloaded');
} else {
// Re-throw non-overload errors
throw streamError;
}
}
}
// If we got here successfully, break out of the loop
if (updatedTask) {
report(
`Successfully updated task using ${modelType} on attempt ${modelAttempts}`,
'success'
);
break;
}
} catch (modelError) {
const failedModel = modelType || 'unknown model';
report(
`Attempt ${modelAttempts} failed using ${failedModel}: ${modelError.message}`,
'warn'
);
// Continue to next attempt if we have more attempts and this was an overload error
const wasOverload = modelError.message
?.toLowerCase()
.includes('overload');
if (wasOverload && !isLastAttempt) {
if (modelType === 'claude') {
claudeOverloaded = true;
report('Will attempt with Perplexity AI next', 'info');
}
continue; // Continue to next attempt
} else if (isLastAttempt) {
report(
`Final attempt (${modelAttempts}/${maxModelAttempts}) failed. No fallback possible.`,
'error'
);
throw modelError; // Re-throw on last attempt
} else {
throw modelError; // Re-throw for non-overload errors
}
}
responseText = await generateTextService({
prompt: userPrompt,
systemPrompt: systemPrompt,
role,
session
});
report('success', 'Successfully received text response from AI service');
// --- End AI Service Call ---
} catch (error) {
// Catch errors from generateTextService
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
report('error', `Error during AI service call: ${error.message}`);
if (error.message.includes('API key')) {
report('error', 'Please ensure API keys are configured correctly.');
}
// If we don't have updated task after all attempts, throw an error
if (!updatedTask) {
throw new Error(
'Failed to generate updated task after all model attempts'
);
}
// Validation of the updated task
if (!updatedTask || typeof updatedTask !== 'object') {
throw new Error(
'Received invalid task object from AI. The response did not contain a valid task.'
);
}
// Ensure critical fields exist
if (!updatedTask.title || !updatedTask.description) {
throw new Error(
'Updated task is missing required fields (title or description).'
);
}
// Ensure ID is preserved
if (updatedTask.id !== taskId) {
report(
`Task ID was modified in the AI response. Restoring original ID ${taskId}.`,
'warn'
);
updatedTask.id = taskId;
}
// Ensure status is preserved unless explicitly changed in prompt
if (
updatedTask.status !== taskToUpdate.status &&
!prompt.toLowerCase().includes('status')
) {
report(
`Task status was modified without explicit instruction. Restoring original status '${taskToUpdate.status}'.`,
'warn'
);
updatedTask.status = taskToUpdate.status;
}
// Ensure completed subtasks are preserved
if (taskToUpdate.subtasks && taskToUpdate.subtasks.length > 0) {
if (!updatedTask.subtasks) {
report(
'Subtasks were removed in the AI response. Restoring original subtasks.',
'warn'
);
updatedTask.subtasks = taskToUpdate.subtasks;
} else {
// Check for each completed subtask
const completedSubtasks = taskToUpdate.subtasks.filter(
(st) => st.status === 'done' || st.status === 'completed'
);
for (const completedSubtask of completedSubtasks) {
const updatedSubtask = updatedTask.subtasks.find(
(st) => st.id === completedSubtask.id
);
// If completed subtask is missing or modified, restore it
if (!updatedSubtask) {
report(
`Completed subtask ${completedSubtask.id} was removed. Restoring it.`,
'warn'
);
updatedTask.subtasks.push(completedSubtask);
} else if (
updatedSubtask.title !== completedSubtask.title ||
updatedSubtask.description !== completedSubtask.description ||
updatedSubtask.details !== completedSubtask.details ||
updatedSubtask.status !== completedSubtask.status
) {
report(
`Completed subtask ${completedSubtask.id} was modified. Restoring original.`,
'warn'
);
// Find and replace the modified subtask
const index = updatedTask.subtasks.findIndex(
(st) => st.id === completedSubtask.id
);
if (index !== -1) {
updatedTask.subtasks[index] = completedSubtask;
}
}
}
// Ensure no duplicate subtask IDs
const subtaskIds = new Set();
const uniqueSubtasks = [];
for (const subtask of updatedTask.subtasks) {
if (!subtaskIds.has(subtask.id)) {
subtaskIds.add(subtask.id);
uniqueSubtasks.push(subtask);
} else {
report(
`Duplicate subtask ID ${subtask.id} found. Removing duplicate.`,
'warn'
);
}
}
updatedTask.subtasks = uniqueSubtasks;
}
}
// Update the task in the original data
const index = data.tasks.findIndex((t) => t.id === taskId);
if (index !== -1) {
data.tasks[index] = updatedTask;
} else {
throw new Error(`Task with ID ${taskId} not found in tasks array.`);
}
// Write the updated tasks to the file
writeJSON(tasksPath, data);
report(`Successfully updated task ${taskId}`, 'success');
// Generate individual task files
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
// Only show success box for text output (CLI)
if (outputFormat === 'text') {
console.log(
boxen(
chalk.green(`Successfully updated task #${taskId}`) +
'\n\n' +
chalk.white.bold('Updated Title:') +
' ' +
updatedTask.title,
{ padding: 1, borderColor: 'green', borderStyle: 'round' }
)
);
}
// Return the updated task for testing purposes
return updatedTask;
throw error; // Re-throw error
} finally {
// Stop the loading indicator if it was created
if (loadingIndicator) {
stopLoadingIndicator(loadingIndicator);
loadingIndicator = null;
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
}
// --- Parse and Validate Response ---
try {
// Pass logFn and isMCP flag to the parser
updatedTask = parseUpdatedTaskFromText(
responseText,
taskId,
logFn,
isMCP
);
} catch (parseError) {
report(
'error',
`Failed to parse updated task from AI response: ${parseError.message}`
);
if (getDebugFlag(session)) {
report('error', `Raw AI Response:\n${responseText}`);
}
throw new Error(
`Failed to parse valid updated task from AI response: ${parseError.message}`
);
}
// --- End Parse/Validate ---
// --- Task Validation/Correction (Keep existing logic) ---
if (!updatedTask || typeof updatedTask !== 'object')
throw new Error('Received invalid task object from AI.');
if (!updatedTask.title || !updatedTask.description)
throw new Error('Updated task missing required fields.');
// Preserve ID if AI changed it
if (updatedTask.id !== taskId) {
report('warn', `AI changed task ID. Restoring original ID ${taskId}.`);
updatedTask.id = taskId;
}
// Preserve status if AI changed it
if (
updatedTask.status !== taskToUpdate.status &&
!prompt.toLowerCase().includes('status')
) {
report(
'warn',
`AI changed task status. Restoring original status '${taskToUpdate.status}'.`
);
updatedTask.status = taskToUpdate.status;
}
// Preserve completed subtasks (Keep existing logic)
if (taskToUpdate.subtasks?.length > 0) {
if (!updatedTask.subtasks) {
report('warn', 'Subtasks removed by AI. Restoring original subtasks.');
updatedTask.subtasks = taskToUpdate.subtasks;
} else {
const completedOriginal = taskToUpdate.subtasks.filter(
(st) => st.status === 'done' || st.status === 'completed'
);
completedOriginal.forEach((compSub) => {
const updatedSub = updatedTask.subtasks.find(
(st) => st.id === compSub.id
);
if (
!updatedSub ||
JSON.stringify(updatedSub) !== JSON.stringify(compSub)
) {
report(
'warn',
`Completed subtask ${compSub.id} was modified or removed. Restoring.`
);
// Remove potentially modified version
updatedTask.subtasks = updatedTask.subtasks.filter(
(st) => st.id !== compSub.id
);
// Add back original
updatedTask.subtasks.push(compSub);
}
});
// Deduplicate just in case
const subtaskIds = new Set();
updatedTask.subtasks = updatedTask.subtasks.filter((st) => {
if (!subtaskIds.has(st.id)) {
subtaskIds.add(st.id);
return true;
}
report('warn', `Duplicate subtask ID ${st.id} removed.`);
return false;
});
}
}
} catch (error) {
report(`Error updating task: ${error.message}`, 'error');
// --- End Task Validation/Correction ---
// Only show error UI for text output (CLI)
// --- Update Task Data (Keep existing) ---
data.tasks[taskIndex] = updatedTask;
// --- End Update Task Data ---
// --- Write File and Generate (Keep existing) ---
writeJSON(tasksPath, data);
report('success', `Successfully updated task ${taskId}`);
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
// --- End Write File ---
// --- Final CLI Output (Keep existing) ---
if (outputFormat === 'text') {
/* ... success boxen ... */
}
// --- End Final CLI Output ---
return updatedTask; // Return the updated task
} catch (error) {
// General error catch
// --- General Error Handling (Keep existing) ---
report('error', `Error updating task: ${error.message}`);
if (outputFormat === 'text') {
console.error(chalk.red(`Error: ${error.message}`));
// Provide more helpful error messages for common issues
if (error.message.includes('ANTHROPIC_API_KEY')) {
console.log(
chalk.yellow('\nTo fix this issue, set your Anthropic API key:')
);
console.log(' export ANTHROPIC_API_KEY=your_api_key_here');
} else if (error.message.includes('PERPLEXITY_API_KEY')) {
console.log(chalk.yellow('\nTo fix this issue:'));
console.log(
' 1. Set your Perplexity API key: export PERPLEXITY_API_KEY=your_api_key_here'
);
console.log(
' 2. Or run without the research flag: task-master update-task --id=<id> --prompt="..."'
);
} else if (
error.message.includes('Task with ID') &&
error.message.includes('not found')
) {
console.log(chalk.yellow('\nTo fix this issue:'));
console.log(' 1. Run task-master list to see all available task IDs');
console.log(' 2. Use a valid task ID with the --id parameter');
}
if (getDebugFlag(session)) {
// Use getter
console.error(error);
}
// ... helpful hints ...
if (getDebugFlag(session)) console.error(error);
process.exit(1);
} else {
throw error; // Re-throw for JSON output
throw error; // Re-throw for MCP
}
return null;
return null; // Indicate failure in CLI case if process doesn't exit
// --- End General Error Handling ---
}
}