feat(telemetry): Implement AI usage telemetry pattern and apply to add-task
This commit introduces a standardized pattern for capturing and propagating AI usage telemetry (cost, tokens, model used) across the Task Master stack and applies it to the 'add-task' functionality. Key changes include: - **Telemetry Pattern Definition:** - Added defining the integration pattern for core logic, direct functions, MCP tools, and CLI commands. - Updated related rules (, , Usage: mcp [OPTIONS] COMMAND [ARGS]... MCP development tools ╭─ Options ──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮ │ --help Show this message and exit. │ ╰────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ ╭─ Commands ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮ │ version Show the MCP version. │ │ dev Run a MCP server with the MCP Inspector. │ │ run Run a MCP server. │ │ install Install a MCP server in the Claude desktop app. │ ╰────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯, , ) to reference the new telemetry rule. - **Core Telemetry Implementation ():** - Refactored the unified AI service to generate and return a object alongside the main AI result. - Fixed an MCP server startup crash by removing redundant local loading of and instead using the imported from for cost calculations. - Added to the object. - ** Integration:** - Modified (core) to receive from the AI service, return it, and call the new UI display function for CLI output. - Updated to receive from the core function and include it in the payload of its response. - Ensured (MCP tool) correctly passes the through via . - Updated to correctly pass context (, ) to the core function and rely on it for CLI telemetry display. - **UI Enhancement:** - Added function to to show telemetry details in the CLI. - **Project Management:** - Added subtasks 77.6 through 77.12 to track the rollout of this telemetry pattern to other AI-powered commands (, , , , , , ). This establishes the foundation for tracking AI usage across the application.
This commit is contained in:
@@ -8,7 +8,8 @@ import {
|
||||
displayBanner,
|
||||
getStatusWithColor,
|
||||
startLoadingIndicator,
|
||||
stopLoadingIndicator
|
||||
stopLoadingIndicator,
|
||||
displayAiUsageSummary
|
||||
} from '../ui.js';
|
||||
import { readJSON, writeJSON, log as consoleLog, truncate } from '../utils.js';
|
||||
import { generateObjectService } from '../ai-services-unified.js';
|
||||
@@ -44,7 +45,9 @@ const AiTaskDataSchema = z.object({
|
||||
* @param {boolean} useResearch - Whether to use the research model (passed to unified service)
|
||||
* @param {Object} context - Context object containing session and potentially projectRoot
|
||||
* @param {string} [context.projectRoot] - Project root path (for MCP/env fallback)
|
||||
* @returns {number} The new task ID
|
||||
* @param {string} [context.commandName] - The name of the command being executed (for telemetry)
|
||||
* @param {string} [context.outputType] - The output type ('cli' or 'mcp', for telemetry)
|
||||
* @returns {Promise<object>} An object containing newTaskId and telemetryData
|
||||
*/
|
||||
async function addTask(
|
||||
tasksPath,
|
||||
@@ -56,7 +59,7 @@ async function addTask(
|
||||
manualTaskData = null,
|
||||
useResearch = false
|
||||
) {
|
||||
const { session, mcpLog, projectRoot } = context;
|
||||
const { session, mcpLog, projectRoot, commandName, outputType } = context;
|
||||
const isMCP = !!mcpLog;
|
||||
|
||||
// Create a consistent logFn object regardless of context
|
||||
@@ -78,6 +81,7 @@ async function addTask(
|
||||
);
|
||||
|
||||
let loadingIndicator = null;
|
||||
let aiServiceResponse = null; // To store the full response from AI service
|
||||
|
||||
// Create custom reporter that checks for MCP log
|
||||
const report = (message, level = 'info') => {
|
||||
@@ -229,29 +233,40 @@ async function addTask(
|
||||
// Start the loading indicator - only for text mode
|
||||
if (outputFormat === 'text') {
|
||||
loadingIndicator = startLoadingIndicator(
|
||||
`Generating new task with ${useResearch ? 'Research' : 'Main'} AI...`
|
||||
`Generating new task with ${useResearch ? 'Research' : 'Main'} AI..\n`
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
// Determine the service role based on the useResearch flag
|
||||
const serviceRole = useResearch ? 'research' : 'main';
|
||||
|
||||
report('DEBUG: Calling generateObjectService...', 'debug');
|
||||
// Call the unified AI service
|
||||
const aiGeneratedTaskData = await generateObjectService({
|
||||
role: serviceRole, // <-- Use the determined role
|
||||
session: session, // Pass session for API key resolution
|
||||
projectRoot: projectRoot, // <<< Pass projectRoot here
|
||||
schema: AiTaskDataSchema, // Pass the Zod schema
|
||||
objectName: 'newTaskData', // Name for the object
|
||||
|
||||
aiServiceResponse = await generateObjectService({
|
||||
// Capture the full response
|
||||
role: serviceRole,
|
||||
session: session,
|
||||
projectRoot: projectRoot,
|
||||
schema: AiTaskDataSchema,
|
||||
objectName: 'newTaskData',
|
||||
systemPrompt: systemPrompt,
|
||||
prompt: userPrompt
|
||||
prompt: userPrompt,
|
||||
commandName: commandName || 'add-task', // Use passed commandName or default
|
||||
outputType: outputType || (isMCP ? 'mcp' : 'cli') // Use passed outputType or derive
|
||||
});
|
||||
report('DEBUG: generateObjectService returned successfully.', 'debug');
|
||||
|
||||
if (
|
||||
!aiServiceResponse ||
|
||||
!aiServiceResponse.mainResult ||
|
||||
!aiServiceResponse.mainResult.object
|
||||
) {
|
||||
throw new Error(
|
||||
'AI service did not return the expected object structure.'
|
||||
);
|
||||
}
|
||||
taskData = aiServiceResponse.mainResult.object; // Extract the AI-generated task data
|
||||
|
||||
report('Successfully generated task data from AI.', 'success');
|
||||
taskData = aiGeneratedTaskData; // Assign the validated object
|
||||
} catch (error) {
|
||||
report(
|
||||
`DEBUG: generateObjectService caught error: ${error.message}`,
|
||||
@@ -362,11 +377,25 @@ async function addTask(
|
||||
{ padding: 1, borderColor: 'green', borderStyle: 'round' }
|
||||
)
|
||||
);
|
||||
|
||||
// Display AI Usage Summary if telemetryData is available
|
||||
if (
|
||||
aiServiceResponse &&
|
||||
aiServiceResponse.telemetryData &&
|
||||
(outputType === 'cli' || outputType === 'text')
|
||||
) {
|
||||
displayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');
|
||||
}
|
||||
}
|
||||
|
||||
// Return the new task ID
|
||||
report(`DEBUG: Returning new task ID: ${newTaskId}`, 'debug');
|
||||
return newTaskId;
|
||||
report(
|
||||
`DEBUG: Returning new task ID: ${newTaskId} and telemetry.`,
|
||||
'debug'
|
||||
);
|
||||
return {
|
||||
newTaskId: newTaskId,
|
||||
telemetryData: aiServiceResponse ? aiServiceResponse.telemetryData : null
|
||||
};
|
||||
} catch (error) {
|
||||
// Stop any loading indicator on error
|
||||
if (loadingIndicator) {
|
||||
|
||||
@@ -3,7 +3,6 @@ import path from 'path';
|
||||
import chalk from 'chalk';
|
||||
import boxen from 'boxen';
|
||||
import Table from 'cli-table3';
|
||||
import { z } from 'zod';
|
||||
|
||||
import {
|
||||
getStatusWithColor,
|
||||
@@ -17,10 +16,7 @@ import {
|
||||
truncate,
|
||||
isSilentMode
|
||||
} from '../utils.js';
|
||||
import {
|
||||
generateObjectService,
|
||||
generateTextService
|
||||
} from '../ai-services-unified.js';
|
||||
import { generateTextService } from '../ai-services-unified.js';
|
||||
import { getDebugFlag } from '../config-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
|
||||
@@ -64,7 +60,6 @@ async function updateSubtaskById(
|
||||
try {
|
||||
report('info', `Updating subtask ${subtaskId} with prompt: "${prompt}"`);
|
||||
|
||||
// Validate subtask ID format
|
||||
if (
|
||||
!subtaskId ||
|
||||
typeof subtaskId !== 'string' ||
|
||||
@@ -75,19 +70,16 @@ async function updateSubtaskById(
|
||||
);
|
||||
}
|
||||
|
||||
// Validate prompt
|
||||
if (!prompt || typeof prompt !== 'string' || prompt.trim() === '') {
|
||||
throw new Error(
|
||||
'Prompt cannot be empty. Please provide context for the subtask update.'
|
||||
);
|
||||
}
|
||||
|
||||
// Validate tasks file exists
|
||||
if (!fs.existsSync(tasksPath)) {
|
||||
throw new Error(`Tasks file not found at path: ${tasksPath}`);
|
||||
}
|
||||
|
||||
// Read the tasks file
|
||||
const data = readJSON(tasksPath);
|
||||
if (!data || !data.tasks) {
|
||||
throw new Error(
|
||||
@@ -95,7 +87,6 @@ async function updateSubtaskById(
|
||||
);
|
||||
}
|
||||
|
||||
// Parse parent and subtask IDs
|
||||
const [parentIdStr, subtaskIdStr] = subtaskId.split('.');
|
||||
const parentId = parseInt(parentIdStr, 10);
|
||||
const subtaskIdNum = parseInt(subtaskIdStr, 10);
|
||||
@@ -111,7 +102,6 @@ async function updateSubtaskById(
|
||||
);
|
||||
}
|
||||
|
||||
// Find the parent task
|
||||
const parentTask = data.tasks.find((task) => task.id === parentId);
|
||||
if (!parentTask) {
|
||||
throw new Error(
|
||||
@@ -119,7 +109,6 @@ async function updateSubtaskById(
|
||||
);
|
||||
}
|
||||
|
||||
// Find the subtask
|
||||
if (!parentTask.subtasks || !Array.isArray(parentTask.subtasks)) {
|
||||
throw new Error(`Parent task ${parentId} has no subtasks.`);
|
||||
}
|
||||
@@ -135,20 +124,7 @@ async function updateSubtaskById(
|
||||
|
||||
const subtask = parentTask.subtasks[subtaskIndex];
|
||||
|
||||
const subtaskSchema = z.object({
|
||||
id: z.number().int().positive(),
|
||||
title: z.string(),
|
||||
description: z.string().optional(),
|
||||
status: z.string(),
|
||||
dependencies: z.array(z.union([z.string(), z.number()])).optional(),
|
||||
priority: z.string().optional(),
|
||||
details: z.string().optional(),
|
||||
testStrategy: z.string().optional()
|
||||
});
|
||||
|
||||
// Only show UI elements for text output (CLI)
|
||||
if (outputFormat === 'text') {
|
||||
// Show the subtask that will be updated
|
||||
const table = new Table({
|
||||
head: [
|
||||
chalk.cyan.bold('ID'),
|
||||
@@ -157,13 +133,11 @@ async function updateSubtaskById(
|
||||
],
|
||||
colWidths: [10, 55, 10]
|
||||
});
|
||||
|
||||
table.push([
|
||||
subtaskId,
|
||||
truncate(subtask.title, 52),
|
||||
getStatusWithColor(subtask.status)
|
||||
]);
|
||||
|
||||
console.log(
|
||||
boxen(chalk.white.bold(`Updating Subtask #${subtaskId}`), {
|
||||
padding: 1,
|
||||
@@ -172,10 +146,7 @@ async function updateSubtaskById(
|
||||
margin: { top: 1, bottom: 0 }
|
||||
})
|
||||
);
|
||||
|
||||
console.log(table.toString());
|
||||
|
||||
// Start the loading indicator - only for text output
|
||||
loadingIndicator = startLoadingIndicator(
|
||||
useResearch
|
||||
? 'Updating subtask with research...'
|
||||
@@ -183,15 +154,13 @@ async function updateSubtaskById(
|
||||
);
|
||||
}
|
||||
|
||||
let parsedAIResponse;
|
||||
let generatedContentString = ''; // Initialize to empty string
|
||||
let newlyAddedSnippet = ''; // <--- ADD THIS LINE: Variable to store the snippet for CLI display
|
||||
try {
|
||||
// --- GET PARENT & SIBLING CONTEXT ---
|
||||
const parentContext = {
|
||||
id: parentTask.id,
|
||||
title: parentTask.title
|
||||
// Avoid sending full parent description/details unless necessary
|
||||
};
|
||||
|
||||
const prevSubtask =
|
||||
subtaskIndex > 0
|
||||
? {
|
||||
@@ -200,7 +169,6 @@ async function updateSubtaskById(
|
||||
status: parentTask.subtasks[subtaskIndex - 1].status
|
||||
}
|
||||
: null;
|
||||
|
||||
const nextSubtask =
|
||||
subtaskIndex < parentTask.subtasks.length - 1
|
||||
? {
|
||||
@@ -214,45 +182,61 @@ async function updateSubtaskById(
|
||||
Parent Task: ${JSON.stringify(parentContext)}
|
||||
${prevSubtask ? `Previous Subtask: ${JSON.stringify(prevSubtask)}` : ''}
|
||||
${nextSubtask ? `Next Subtask: ${JSON.stringify(nextSubtask)}` : ''}
|
||||
Current Subtask Details (for context only):\n${subtask.details || '(No existing details)'}
|
||||
`;
|
||||
|
||||
const systemPrompt = `You are an AI assistant updating a parent task's subtask. This subtask will be part of a larger parent task and will be used to direct AI agents to complete the subtask. Your goal is to GENERATE new, relevant information based on the user's request (which may be high-level, mid-level or low-level) and APPEND it to the existing subtask 'details' field, wrapped in specific XML-like tags with an ISO 8601 timestamp. Intelligently determine the level of detail to include based on the user's request. Some requests are meant simply to update the subtask with some mid-implementation details, while others are meant to update the subtask with a detailed plan or strategy.
|
||||
const systemPrompt = `You are an AI assistant helping to update a subtask. You will be provided with the subtask's existing details, context about its parent and sibling tasks, and a user request string.
|
||||
|
||||
Context Provided:
|
||||
- The current subtask object.
|
||||
- Basic info about the parent task (ID, title).
|
||||
- Basic info about the immediately preceding subtask (ID, title, status), if it exists.
|
||||
- Basic info about the immediately succeeding subtask (ID, title, status), if it exists.
|
||||
- A user request string.
|
||||
Your Goal: Based *only* on the user's request and all the provided context (including existing details if relevant to the request), GENERATE the new text content that should be added to the subtask's details.
|
||||
Focus *only* on generating the substance of the update.
|
||||
|
||||
Guidelines:
|
||||
1. Analyze the user request considering the provided subtask details AND the context of the parent and sibling tasks.
|
||||
2. GENERATE new, relevant text content that should be added to the 'details' field. Focus *only* on the substance of the update based on the user request and context. Do NOT add timestamps or any special formatting yourself. Avoid over-engineering the details, provide .
|
||||
3. Update the 'details' field in the subtask object with the GENERATED text content. It's okay if this overwrites previous details in the object you return, as the calling code will handle the final appending.
|
||||
4. Return the *entire* updated subtask object (with your generated content in the 'details' field) as a valid JSON object conforming to the provided schema. Do NOT return explanations or markdown formatting.`;
|
||||
Output Requirements:
|
||||
1. Return *only* the newly generated text content as a plain string. Do NOT return a JSON object or any other structured data.
|
||||
2. Your string response should NOT include any of the subtask's original details, unless the user's request explicitly asks to rephrase, summarize, or directly modify existing text.
|
||||
3. Do NOT include any timestamps, XML-like tags, markdown, or any other special formatting in your string response.
|
||||
4. Ensure the generated text is concise yet complete for the update based on the user request. Avoid conversational fillers or explanations about what you are doing (e.g., do not start with "Okay, here's the update...").`;
|
||||
|
||||
const subtaskDataString = JSON.stringify(subtask, null, 2);
|
||||
// Updated user prompt including context
|
||||
const userPrompt = `Task Context:\n${contextString}\nCurrent Subtask:\n${subtaskDataString}\n\nUser Request: "${prompt}"\n\nPlease GENERATE new, relevant text content for the 'details' field based on the user request and the provided context. Return the entire updated subtask object as a valid JSON object matching the schema, with the newly generated text placed in the 'details' field.`;
|
||||
// --- END UPDATED PROMPTS ---
|
||||
// Pass the existing subtask.details in the user prompt for the AI's context.
|
||||
const userPrompt = `Task Context:\n${contextString}\n\nUser Request: "${prompt}"\n\nBased on the User Request and all the Task Context (including current subtask details provided above), what is the new information or text that should be appended to this subtask's details? Return ONLY this new text as a plain string.`;
|
||||
|
||||
// Call Unified AI Service using generateObjectService
|
||||
const role = useResearch ? 'research' : 'main';
|
||||
report('info', `Using AI object service with role: ${role}`);
|
||||
report('info', `Using AI text service with role: ${role}`);
|
||||
|
||||
parsedAIResponse = await generateObjectService({
|
||||
// Store the entire response object from the AI service
|
||||
const aiServiceResponse = await generateTextService({
|
||||
prompt: userPrompt,
|
||||
systemPrompt: systemPrompt,
|
||||
schema: subtaskSchema,
|
||||
objectName: 'updatedSubtask',
|
||||
role,
|
||||
session,
|
||||
projectRoot,
|
||||
maxRetries: 2
|
||||
});
|
||||
|
||||
report(
|
||||
'info',
|
||||
`>>> DEBUG: AI Service Response Object: ${JSON.stringify(aiServiceResponse, null, 2)}`
|
||||
);
|
||||
report(
|
||||
'info',
|
||||
`>>> DEBUG: Extracted generatedContentString: "${generatedContentString}"`
|
||||
);
|
||||
|
||||
// Extract the actual text content from the mainResult property
|
||||
// and ensure it's a string, defaulting to empty if not.
|
||||
if (
|
||||
aiServiceResponse &&
|
||||
aiServiceResponse.mainResult &&
|
||||
typeof aiServiceResponse.mainResult.text === 'string'
|
||||
) {
|
||||
generatedContentString = aiServiceResponse.mainResult.text;
|
||||
} else {
|
||||
generatedContentString = ''; // Default to empty if mainResult.text is not a string or the path is invalid
|
||||
}
|
||||
// The telemetryData would be in aiServiceResponse.telemetryData if needed elsewhere
|
||||
|
||||
report(
|
||||
'success',
|
||||
'Successfully received object response from AI service'
|
||||
'Successfully received response object from AI service' // Log message updated for clarity
|
||||
);
|
||||
|
||||
if (outputFormat === 'text' && loadingIndicator) {
|
||||
@@ -260,14 +244,21 @@ Guidelines:
|
||||
loadingIndicator = null;
|
||||
}
|
||||
|
||||
if (!parsedAIResponse || typeof parsedAIResponse !== 'object') {
|
||||
throw new Error('AI did not return a valid object.');
|
||||
// This check now correctly validates the extracted string
|
||||
if (typeof generatedContentString !== 'string') {
|
||||
report(
|
||||
'warn',
|
||||
'AI mainResult was not a valid text string. Treating as empty.'
|
||||
);
|
||||
generatedContentString = ''; // Ensure it's a string for trim() later
|
||||
} else if (generatedContentString.trim() !== '') {
|
||||
report(
|
||||
'success',
|
||||
`Successfully extracted text from AI response using role: ${role}.`
|
||||
);
|
||||
}
|
||||
|
||||
report(
|
||||
'success',
|
||||
`Successfully generated object using AI role: ${role}.`
|
||||
);
|
||||
// No need for an else here, as an empty string from mainResult is a valid scenario
|
||||
// that will be handled by the `if (generatedContentString && generatedContentString.trim())` later.
|
||||
} catch (aiError) {
|
||||
report('error', `AI service call failed: ${aiError.message}`);
|
||||
if (outputFormat === 'text' && loadingIndicator) {
|
||||
@@ -278,19 +269,14 @@ Guidelines:
|
||||
}
|
||||
|
||||
// --- TIMESTAMP & FORMATTING LOGIC (Handled Locally) ---
|
||||
// Extract only the generated content from the AI's response details field.
|
||||
const generatedContent = parsedAIResponse.details || ''; // Default to empty string
|
||||
if (generatedContentString && generatedContentString.trim()) {
|
||||
// Check if the string is not empty
|
||||
const timestamp = new Date().toISOString();
|
||||
const formattedBlock = `<info added on ${timestamp}>\n${generatedContentString.trim()}\n</info added on ${timestamp}>`;
|
||||
newlyAddedSnippet = formattedBlock; // <--- ADD THIS LINE: Store for display
|
||||
|
||||
if (generatedContent.trim()) {
|
||||
// Generate timestamp locally
|
||||
const timestamp = new Date().toISOString(); // <<< Local Timestamp
|
||||
|
||||
// Format the content with XML-like tags and timestamp LOCALLY
|
||||
const formattedBlock = `<info added on ${timestamp}>\n${generatedContent.trim()}\n</info added on ${timestamp}>`; // <<< Local Formatting
|
||||
|
||||
// Append the formatted block to the *original* subtask details
|
||||
subtask.details =
|
||||
(subtask.details ? subtask.details + '\n' : '') + formattedBlock; // <<< Local Appending
|
||||
(subtask.details ? subtask.details + '\n' : '') + formattedBlock;
|
||||
report(
|
||||
'info',
|
||||
'Appended timestamped, formatted block with AI-generated content to subtask.details.'
|
||||
@@ -298,70 +284,56 @@ Guidelines:
|
||||
} else {
|
||||
report(
|
||||
'warn',
|
||||
'AI response object did not contain generated content in the "details" field. Original details remain unchanged.'
|
||||
'AI response was empty or whitespace after trimming. Original details remain unchanged.'
|
||||
);
|
||||
newlyAddedSnippet = 'No new details were added by the AI.'; // <--- ADD THIS LINE: Set message for CLI
|
||||
}
|
||||
// --- END TIMESTAMP & FORMATTING LOGIC ---
|
||||
|
||||
// Get a reference to the subtask *after* its details have been updated
|
||||
const updatedSubtask = parentTask.subtasks[subtaskIndex]; // subtask === updatedSubtask now
|
||||
|
||||
const updatedSubtask = parentTask.subtasks[subtaskIndex];
|
||||
report('info', 'Updated subtask details locally after AI generation.');
|
||||
// --- END UPDATE SUBTASK ---
|
||||
|
||||
// Only show debug info for text output (CLI)
|
||||
if (outputFormat === 'text' && getDebugFlag(session)) {
|
||||
console.log(
|
||||
'>>> DEBUG: Subtask details AFTER AI update:',
|
||||
updatedSubtask.details // Use updatedSubtask
|
||||
updatedSubtask.details
|
||||
);
|
||||
}
|
||||
|
||||
// Description update logic (keeping as is for now)
|
||||
if (updatedSubtask.description) {
|
||||
// Use updatedSubtask
|
||||
if (prompt.length < 100) {
|
||||
if (outputFormat === 'text' && getDebugFlag(session)) {
|
||||
console.log(
|
||||
'>>> DEBUG: Subtask description BEFORE append:',
|
||||
updatedSubtask.description // Use updatedSubtask
|
||||
updatedSubtask.description
|
||||
);
|
||||
}
|
||||
updatedSubtask.description += ` [Updated: ${new Date().toLocaleDateString()}]`; // Use updatedSubtask
|
||||
updatedSubtask.description += ` [Updated: ${new Date().toLocaleDateString()}]`;
|
||||
if (outputFormat === 'text' && getDebugFlag(session)) {
|
||||
console.log(
|
||||
'>>> DEBUG: Subtask description AFTER append:',
|
||||
updatedSubtask.description // Use updatedSubtask
|
||||
updatedSubtask.description
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Only show debug info for text output (CLI)
|
||||
if (outputFormat === 'text' && getDebugFlag(session)) {
|
||||
console.log('>>> DEBUG: About to call writeJSON with updated data...');
|
||||
}
|
||||
|
||||
// Write the updated tasks to the file (parentTask already contains the updated subtask)
|
||||
writeJSON(tasksPath, data);
|
||||
|
||||
// Only show debug info for text output (CLI)
|
||||
if (outputFormat === 'text' && getDebugFlag(session)) {
|
||||
console.log('>>> DEBUG: writeJSON call completed.');
|
||||
}
|
||||
|
||||
report('success', `Successfully updated subtask ${subtaskId}`);
|
||||
|
||||
// Generate individual task files
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||
|
||||
// Stop indicator before final console output - only for text output (CLI)
|
||||
if (outputFormat === 'text') {
|
||||
if (loadingIndicator) {
|
||||
stopLoadingIndicator(loadingIndicator);
|
||||
loadingIndicator = null;
|
||||
}
|
||||
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.green(`Successfully updated subtask #${subtaskId}`) +
|
||||
@@ -370,31 +342,22 @@ Guidelines:
|
||||
' ' +
|
||||
updatedSubtask.title +
|
||||
'\n\n' +
|
||||
// Update the display to show the new details field
|
||||
chalk.white.bold('Updated Details:') +
|
||||
chalk.white.bold('Newly Added Snippet:') +
|
||||
'\n' +
|
||||
chalk.white(truncate(updatedSubtask.details || '', 500, true)), // Use updatedSubtask
|
||||
chalk.white(newlyAddedSnippet),
|
||||
{ padding: 1, borderColor: 'green', borderStyle: 'round' }
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
return updatedSubtask; // Return the modified subtask object
|
||||
return updatedSubtask;
|
||||
} catch (error) {
|
||||
// Outer catch block handles final errors after loop/attempts
|
||||
// Stop indicator on error - only for text output (CLI)
|
||||
if (outputFormat === 'text' && loadingIndicator) {
|
||||
stopLoadingIndicator(loadingIndicator);
|
||||
loadingIndicator = null;
|
||||
}
|
||||
|
||||
report('error', `Error updating subtask: ${error.message}`);
|
||||
|
||||
// Only show error UI for text output (CLI)
|
||||
if (outputFormat === 'text') {
|
||||
console.error(chalk.red(`Error: ${error.message}`));
|
||||
|
||||
// Provide helpful error messages based on error type
|
||||
if (error.message?.includes('ANTHROPIC_API_KEY')) {
|
||||
console.log(
|
||||
chalk.yellow('\nTo fix this issue, set your Anthropic API key:')
|
||||
@@ -409,7 +372,6 @@ Guidelines:
|
||||
' 2. Or run without the research flag: task-master update-subtask --id=<id> --prompt="..."'
|
||||
);
|
||||
} else if (error.message?.includes('overloaded')) {
|
||||
// Catch final overload error
|
||||
console.log(
|
||||
chalk.yellow(
|
||||
'\nAI model overloaded, and fallback failed or was unavailable:'
|
||||
@@ -417,7 +379,6 @@ Guidelines:
|
||||
);
|
||||
console.log(' 1. Try again in a few minutes.');
|
||||
console.log(' 2. Ensure PERPLEXITY_API_KEY is set for fallback.');
|
||||
console.log(' 3. Consider breaking your prompt into smaller updates.');
|
||||
} else if (error.message?.includes('not found')) {
|
||||
console.log(chalk.yellow('\nTo fix this issue:'));
|
||||
console.log(
|
||||
@@ -426,22 +387,22 @@ Guidelines:
|
||||
console.log(
|
||||
' 2. Use a valid subtask ID with the --id parameter in format "parentId.subtaskId"'
|
||||
);
|
||||
} else if (error.message?.includes('empty stream response')) {
|
||||
} else if (
|
||||
error.message?.includes('empty stream response') ||
|
||||
error.message?.includes('AI did not return a valid text string')
|
||||
) {
|
||||
console.log(
|
||||
chalk.yellow(
|
||||
'\nThe AI model returned an empty response. This might be due to the prompt or API issues. Try rephrasing or trying again later.'
|
||||
'\nThe AI model returned an empty or invalid response. This might be due to the prompt or API issues. Try rephrasing or trying again later.'
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
if (getDebugFlag(session)) {
|
||||
// Use getter
|
||||
console.error(error);
|
||||
}
|
||||
} else {
|
||||
throw error; // Re-throw for JSON output
|
||||
throw error;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user