fix(ui): Display subtask details in 'show' command output
Ensures that the 'details' field, which can be updated via 'update-subtask', is correctly rendered when viewing a specific subtask. fix(test): Remove empty describe block causing Jest error Removes a redundant block in that contained a hook but no tests. chore: Add npm script
This commit is contained in:
@@ -37,7 +37,9 @@ import {
|
||||
callClaude,
|
||||
generateSubtasks,
|
||||
generateSubtasksWithPerplexity,
|
||||
generateComplexityAnalysisPrompt
|
||||
generateComplexityAnalysisPrompt,
|
||||
getAvailableAIModel,
|
||||
handleClaudeError
|
||||
} from './ai-services.js';
|
||||
|
||||
import {
|
||||
@@ -2978,6 +2980,7 @@ async function removeSubtask(tasksPath, subtaskId, convertToTask = false, genera
|
||||
* @returns {Object|null} - The updated subtask or null if update failed
|
||||
*/
|
||||
async function updateSubtaskById(tasksPath, subtaskId, prompt, useResearch = false) {
|
||||
let loadingIndicator = null;
|
||||
try {
|
||||
log('info', `Updating subtask ${subtaskId} with prompt: "${prompt}"`);
|
||||
|
||||
@@ -2991,12 +2994,8 @@ async function updateSubtaskById(tasksPath, subtaskId, prompt, useResearch = fal
|
||||
throw new Error('Prompt cannot be empty. Please provide context for the subtask update.');
|
||||
}
|
||||
|
||||
// Validate research flag
|
||||
if (useResearch && (!perplexity || !process.env.PERPLEXITY_API_KEY)) {
|
||||
log('warn', 'Perplexity AI is not available. Falling back to Claude AI.');
|
||||
console.log(chalk.yellow('Perplexity AI is not available (API key may be missing). Falling back to Claude AI.'));
|
||||
useResearch = false;
|
||||
}
|
||||
// Prepare for fallback handling
|
||||
let claudeOverloaded = false;
|
||||
|
||||
// Validate tasks file exists
|
||||
if (!fs.existsSync(tasksPath)) {
|
||||
@@ -3070,209 +3069,258 @@ async function updateSubtaskById(tasksPath, subtaskId, prompt, useResearch = fal
|
||||
|
||||
console.log(table.toString());
|
||||
|
||||
// Build the system prompt
|
||||
const systemPrompt = `You are an AI assistant helping to enhance a software development subtask with additional information.
|
||||
You will be given a subtask and a prompt requesting specific details or clarification.
|
||||
Your job is to generate concise, technically precise information that addresses the prompt.
|
||||
// Start the loading indicator
|
||||
loadingIndicator = startLoadingIndicator('Generating additional information with AI...');
|
||||
|
||||
Guidelines:
|
||||
1. Focus ONLY on generating the additional information requested in the prompt
|
||||
2. Be specific, technical, and actionable in your response
|
||||
3. Keep your response as low level as possible, the goal is to provide the most detailed information possible to complete the task.
|
||||
4. Format your response to be easily readable when appended to existing text
|
||||
5. Include code snippets, links to documentation, or technical details when appropriate
|
||||
6. Do NOT include any preamble, conclusion or meta-commentary
|
||||
7. Return ONLY the new information to be added - do not repeat or summarize existing content`;
|
||||
// Create the system prompt (as before)
|
||||
const systemPrompt = `You are an AI assistant helping to update software development subtasks with additional information.
|
||||
Given a subtask, you will provide additional details, implementation notes, or technical insights based on user request.
|
||||
Focus only on adding content that enhances the subtask - don't repeat existing information.
|
||||
Be technical, specific, and implementation-focused rather than general.
|
||||
Provide concrete examples, code snippets, or implementation details when relevant.`;
|
||||
|
||||
const subtaskData = JSON.stringify(subtask, null, 2);
|
||||
// Replace the old research/Claude code with the new model selection approach
|
||||
let additionalInformation = '';
|
||||
let modelAttempts = 0;
|
||||
const maxModelAttempts = 2; // Try up to 2 models before giving up
|
||||
|
||||
let additionalInformation;
|
||||
const loadingIndicator = startLoadingIndicator(useResearch
|
||||
? 'Generating additional information with Perplexity AI research...'
|
||||
: 'Generating additional information with Claude AI...');
|
||||
|
||||
try {
|
||||
if (useResearch) {
|
||||
log('info', 'Using Perplexity AI for research-backed subtask update');
|
||||
while (modelAttempts < maxModelAttempts && !additionalInformation) {
|
||||
modelAttempts++; // Increment attempt counter at the start
|
||||
const isLastAttempt = modelAttempts >= maxModelAttempts;
|
||||
let modelType = null; // Declare modelType outside the try block
|
||||
|
||||
try {
|
||||
// Get the best available model based on our current state
|
||||
const result = getAvailableAIModel({
|
||||
claudeOverloaded,
|
||||
requiresResearch: useResearch
|
||||
});
|
||||
modelType = result.type;
|
||||
const client = result.client;
|
||||
|
||||
// Verify Perplexity API key exists
|
||||
if (!process.env.PERPLEXITY_API_KEY) {
|
||||
throw new Error('PERPLEXITY_API_KEY environment variable is missing but --research flag was used.');
|
||||
}
|
||||
|
||||
try {
|
||||
// Call Perplexity AI
|
||||
log('info', `Attempt ${modelAttempts}/${maxModelAttempts}: Generating subtask info using ${modelType}`);
|
||||
// Update loading indicator text
|
||||
stopLoadingIndicator(loadingIndicator); // Stop previous indicator
|
||||
loadingIndicator = startLoadingIndicator(`Attempt ${modelAttempts}: Using ${modelType.toUpperCase()}...`);
|
||||
|
||||
const subtaskData = JSON.stringify(subtask, null, 2);
|
||||
const userMessageContent = `Here is the subtask to enhance:\n${subtaskData}\n\nPlease provide additional information addressing this request:\n${prompt}\n\nReturn ONLY the new information to add - do not repeat existing content.`;
|
||||
|
||||
if (modelType === 'perplexity') {
|
||||
// Construct Perplexity payload
|
||||
const perplexityModel = process.env.PERPLEXITY_MODEL || 'sonar-pro';
|
||||
const result = await perplexity.chat.completions.create({
|
||||
const response = await client.chat.completions.create({
|
||||
model: perplexityModel,
|
||||
messages: [
|
||||
{
|
||||
role: "system",
|
||||
content: `${systemPrompt}\n\nUse your online search capabilities to research up-to-date information about the technologies and concepts mentioned in the subtask. Look for best practices, common issues, and implementation details that would be helpful.`
|
||||
},
|
||||
{
|
||||
role: "user",
|
||||
content: `Here is the subtask to enhance:
|
||||
${subtaskData}
|
||||
|
||||
Please provide additional information addressing this request:
|
||||
${prompt}
|
||||
|
||||
Return ONLY the new information to add - do not repeat existing content.`
|
||||
}
|
||||
{ role: 'system', content: systemPrompt },
|
||||
{ role: 'user', content: userMessageContent }
|
||||
],
|
||||
temperature: parseFloat(process.env.TEMPERATURE || CONFIG.temperature),
|
||||
max_tokens: parseInt(process.env.MAX_TOKENS || CONFIG.maxTokens),
|
||||
});
|
||||
|
||||
additionalInformation = result.choices[0].message.content.trim();
|
||||
} catch (perplexityError) {
|
||||
throw new Error(`Perplexity API error: ${perplexityError.message}`);
|
||||
}
|
||||
} else {
|
||||
// Call Claude to generate additional information
|
||||
try {
|
||||
// Verify Anthropic API key exists
|
||||
if (!process.env.ANTHROPIC_API_KEY) {
|
||||
throw new Error('ANTHROPIC_API_KEY environment variable is missing. Required for subtask updates.');
|
||||
}
|
||||
|
||||
// Use streaming API call
|
||||
additionalInformation = response.choices[0].message.content.trim();
|
||||
} else { // Claude
|
||||
let responseText = '';
|
||||
let streamingInterval = null;
|
||||
|
||||
// Update loading indicator to show streaming progress
|
||||
let dotCount = 0;
|
||||
const readline = await import('readline');
|
||||
streamingInterval = setInterval(() => {
|
||||
readline.cursorTo(process.stdout, 0);
|
||||
process.stdout.write(`Receiving streaming response from Claude${'.'.repeat(dotCount)}`);
|
||||
dotCount = (dotCount + 1) % 4;
|
||||
}, 500);
|
||||
|
||||
// Use streaming API call
|
||||
const stream = await anthropic.messages.create({
|
||||
model: CONFIG.model,
|
||||
max_tokens: CONFIG.maxTokens,
|
||||
temperature: CONFIG.temperature,
|
||||
system: systemPrompt,
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: `Here is the subtask to enhance:
|
||||
${subtaskData}
|
||||
|
||||
Please provide additional information addressing this request:
|
||||
${prompt}
|
||||
try {
|
||||
streamingInterval = setInterval(() => {
|
||||
readline.cursorTo(process.stdout, 0);
|
||||
process.stdout.write(`Receiving streaming response from Claude${'.'.repeat(dotCount)}`);
|
||||
dotCount = (dotCount + 1) % 4;
|
||||
}, 500);
|
||||
|
||||
Return ONLY the new information to add - do not repeat existing content.`
|
||||
// Construct Claude payload
|
||||
const stream = await client.messages.create({
|
||||
model: CONFIG.model,
|
||||
max_tokens: CONFIG.maxTokens,
|
||||
temperature: CONFIG.temperature,
|
||||
system: systemPrompt,
|
||||
messages: [
|
||||
{ role: 'user', content: userMessageContent }
|
||||
],
|
||||
stream: true
|
||||
});
|
||||
|
||||
for await (const chunk of stream) {
|
||||
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
|
||||
responseText += chunk.delta.text;
|
||||
}
|
||||
],
|
||||
stream: true
|
||||
});
|
||||
|
||||
// Process the stream
|
||||
for await (const chunk of stream) {
|
||||
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
|
||||
responseText += chunk.delta.text;
|
||||
}
|
||||
} finally {
|
||||
if (streamingInterval) clearInterval(streamingInterval);
|
||||
// Clear the loading dots line
|
||||
readline.cursorTo(process.stdout, 0);
|
||||
process.stdout.clearLine(0);
|
||||
}
|
||||
|
||||
if (streamingInterval) clearInterval(streamingInterval);
|
||||
log('info', "Completed streaming response from Claude API!");
|
||||
|
||||
|
||||
log('info', `Completed streaming response from Claude API! (Attempt ${modelAttempts})`);
|
||||
additionalInformation = responseText.trim();
|
||||
} catch (claudeError) {
|
||||
throw new Error(`Claude API error: ${claudeError.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Validate the generated information
|
||||
if (!additionalInformation || additionalInformation.trim() === '') {
|
||||
throw new Error('Received empty response from AI. Unable to generate additional information.');
|
||||
}
|
||||
|
||||
// Create timestamp
|
||||
const currentDate = new Date();
|
||||
const timestamp = currentDate.toISOString();
|
||||
|
||||
// Format the additional information with timestamp
|
||||
const formattedInformation = `\n\n<info added on ${timestamp}>\n${additionalInformation}\n</info added on ${timestamp}>`;
|
||||
|
||||
// Append to subtask details and description
|
||||
if (subtask.details) {
|
||||
subtask.details += formattedInformation;
|
||||
} else {
|
||||
subtask.details = `${formattedInformation}`;
|
||||
}
|
||||
|
||||
if (subtask.description) {
|
||||
// Only append to description if it makes sense (for shorter updates)
|
||||
if (additionalInformation.length < 200) {
|
||||
subtask.description += ` [Updated: ${currentDate.toLocaleDateString()}]`;
|
||||
|
||||
// Success - break the loop
|
||||
if (additionalInformation) {
|
||||
log('info', `Successfully generated information using ${modelType} on attempt ${modelAttempts}.`);
|
||||
break;
|
||||
} else {
|
||||
// Handle case where AI gave empty response without erroring
|
||||
log('warn', `AI (${modelType}) returned empty response on attempt ${modelAttempts}.`);
|
||||
if (isLastAttempt) {
|
||||
throw new Error('AI returned empty response after maximum attempts.');
|
||||
}
|
||||
// Allow loop to continue to try another model/attempt if possible
|
||||
}
|
||||
}
|
||||
|
||||
// Update the subtask in the parent task
|
||||
const subtaskIndex = parentTask.subtasks.findIndex(st => st.id === subtaskIdNum);
|
||||
if (subtaskIndex !== -1) {
|
||||
parentTask.subtasks[subtaskIndex] = subtask;
|
||||
} else {
|
||||
throw new Error(`Subtask with ID ${subtaskId} not found in parent task's subtasks array.`);
|
||||
}
|
||||
|
||||
// Update the parent task in the original data
|
||||
const parentIndex = data.tasks.findIndex(t => t.id === parentId);
|
||||
if (parentIndex !== -1) {
|
||||
data.tasks[parentIndex] = parentTask;
|
||||
} else {
|
||||
throw new Error(`Parent task with ID ${parentId} not found in tasks array.`);
|
||||
}
|
||||
|
||||
// Write the updated tasks to the file
|
||||
writeJSON(tasksPath, data);
|
||||
|
||||
log('success', `Successfully updated subtask ${subtaskId}`);
|
||||
|
||||
// Generate individual task files
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||
|
||||
console.log(boxen(
|
||||
chalk.green(`Successfully updated subtask #${subtaskId}`) + '\n\n' +
|
||||
chalk.white.bold('Title:') + ' ' + subtask.title + '\n\n' +
|
||||
chalk.white.bold('Information Added:') + '\n' +
|
||||
chalk.white(truncate(additionalInformation, 300, true)),
|
||||
{ padding: 1, borderColor: 'green', borderStyle: 'round' }
|
||||
));
|
||||
|
||||
// Return the updated subtask for testing purposes
|
||||
return subtask;
|
||||
} finally {
|
||||
stopLoadingIndicator(loadingIndicator);
|
||||
|
||||
} catch (modelError) {
|
||||
const failedModel = modelType || (modelError.modelType || 'unknown model');
|
||||
log('warn', `Attempt ${modelAttempts} failed using ${failedModel}: ${modelError.message}`);
|
||||
|
||||
// --- More robust overload check ---
|
||||
let isOverload = false;
|
||||
// Check 1: SDK specific property (common pattern)
|
||||
if (modelError.type === 'overloaded_error') {
|
||||
isOverload = true;
|
||||
}
|
||||
// Check 2: Check nested error property (as originally intended)
|
||||
else if (modelError.error?.type === 'overloaded_error') {
|
||||
isOverload = true;
|
||||
}
|
||||
// Check 3: Check status code if available (e.g., 429 Too Many Requests or 529 Overloaded)
|
||||
else if (modelError.status === 429 || modelError.status === 529) {
|
||||
isOverload = true;
|
||||
}
|
||||
// Check 4: Check the message string itself (less reliable)
|
||||
else if (modelError.message?.toLowerCase().includes('overloaded')) {
|
||||
isOverload = true;
|
||||
}
|
||||
// --- End robust check ---
|
||||
|
||||
if (isOverload) { // Use the result of the check
|
||||
claudeOverloaded = true; // Mark Claude as overloaded for the *next* potential attempt
|
||||
if (!isLastAttempt) {
|
||||
log('info', 'Claude overloaded. Will attempt fallback model if available.');
|
||||
// Stop the current indicator before continuing
|
||||
if (loadingIndicator) {
|
||||
stopLoadingIndicator(loadingIndicator);
|
||||
loadingIndicator = null; // Reset indicator
|
||||
}
|
||||
continue; // Go to next iteration of the while loop to try fallback
|
||||
} else {
|
||||
// It was the last attempt, and it failed due to overload
|
||||
log('error', `Overload error on final attempt (${modelAttempts}/${maxModelAttempts}). No fallback possible.`);
|
||||
// Let the error be thrown after the loop finishes, as additionalInformation will be empty.
|
||||
// We don't throw immediately here, let the loop exit and the check after the loop handle it.
|
||||
} // <<<< ADD THIS CLOSING BRACE
|
||||
} else { // Error was NOT an overload
|
||||
// If it's not an overload, throw it immediately to be caught by the outer catch.
|
||||
log('error', `Non-overload error on attempt ${modelAttempts}: ${modelError.message}`);
|
||||
throw modelError; // Re-throw non-overload errors immediately.
|
||||
}
|
||||
} // End inner catch
|
||||
} // End while loop
|
||||
|
||||
// If loop finished without getting information
|
||||
if (!additionalInformation) {
|
||||
console.log('>>> DEBUG: additionalInformation is falsy! Value:', additionalInformation); // <<< ADD THIS
|
||||
throw new Error('Failed to generate additional information after all attempts.');
|
||||
}
|
||||
|
||||
console.log('>>> DEBUG: Got additionalInformation:', additionalInformation.substring(0, 50) + '...'); // <<< ADD THIS
|
||||
|
||||
// Create timestamp
|
||||
const currentDate = new Date();
|
||||
const timestamp = currentDate.toISOString();
|
||||
|
||||
// Format the additional information with timestamp
|
||||
const formattedInformation = `\n\n<info added on ${timestamp}>\n${additionalInformation}\n</info added on ${timestamp}>`;
|
||||
console.log('>>> DEBUG: formattedInformation:', formattedInformation.substring(0, 70) + '...'); // <<< ADD THIS
|
||||
|
||||
// Append to subtask details and description
|
||||
console.log('>>> DEBUG: Subtask details BEFORE append:', subtask.details); // <<< ADD THIS
|
||||
if (subtask.details) {
|
||||
subtask.details += formattedInformation;
|
||||
} else {
|
||||
subtask.details = `${formattedInformation}`;
|
||||
}
|
||||
console.log('>>> DEBUG: Subtask details AFTER append:', subtask.details); // <<< ADD THIS
|
||||
|
||||
|
||||
if (subtask.description) {
|
||||
// Only append to description if it makes sense (for shorter updates)
|
||||
if (additionalInformation.length < 200) {
|
||||
console.log('>>> DEBUG: Subtask description BEFORE append:', subtask.description); // <<< ADD THIS
|
||||
subtask.description += ` [Updated: ${currentDate.toLocaleDateString()}]`;
|
||||
console.log('>>> DEBUG: Subtask description AFTER append:', subtask.description); // <<< ADD THIS
|
||||
}
|
||||
}
|
||||
|
||||
// Update the subtask in the parent task (add log before write)
|
||||
// ... index finding logic ...
|
||||
console.log('>>> DEBUG: About to call writeJSON with updated data...'); // <<< ADD THIS
|
||||
// Write the updated tasks to the file
|
||||
writeJSON(tasksPath, data);
|
||||
console.log('>>> DEBUG: writeJSON call completed.'); // <<< ADD THIS
|
||||
|
||||
|
||||
log('success', `Successfully updated subtask ${subtaskId}`);
|
||||
|
||||
// Generate individual task files
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath)); // <<< Maybe log after this too
|
||||
|
||||
// Stop indicator *before* final console output
|
||||
stopLoadingIndicator(loadingIndicator);
|
||||
loadingIndicator = null;
|
||||
|
||||
console.log(boxen(
|
||||
chalk.green(`Successfully updated subtask #${subtaskId}`) + '\n\n' +
|
||||
chalk.white.bold('Title:') + ' ' + subtask.title + '\n\n' +
|
||||
chalk.white.bold('Information Added:') + '\n' +
|
||||
chalk.white(truncate(additionalInformation, 300, true)),
|
||||
{ padding: 1, borderColor: 'green', borderStyle: 'round' }
|
||||
));
|
||||
|
||||
return subtask;
|
||||
|
||||
} catch (error) {
|
||||
// Outer catch block handles final errors after loop/attempts
|
||||
stopLoadingIndicator(loadingIndicator); // Ensure indicator is stopped on error
|
||||
loadingIndicator = null;
|
||||
log('error', `Error updating subtask: ${error.message}`);
|
||||
console.error(chalk.red(`Error: ${error.message}`));
|
||||
|
||||
// Provide more helpful error messages for common issues
|
||||
if (error.message.includes('ANTHROPIC_API_KEY')) {
|
||||
console.log(chalk.yellow('\nTo fix this issue, set your Anthropic API key:'));
|
||||
console.log(' export ANTHROPIC_API_KEY=your_api_key_here');
|
||||
} else if (error.message.includes('PERPLEXITY_API_KEY')) {
|
||||
console.log(chalk.yellow('\nTo fix this issue:'));
|
||||
console.log(' 1. Set your Perplexity API key: export PERPLEXITY_API_KEY=your_api_key_here');
|
||||
console.log(' 2. Or run without the research flag: task-master update-subtask --id=<id> --prompt="..."');
|
||||
} else if (error.message.includes('not found')) {
|
||||
console.log(chalk.yellow('\nTo fix this issue:'));
|
||||
console.log(' 1. Run task-master list --with-subtasks to see all available subtask IDs');
|
||||
console.log(' 2. Use a valid subtask ID with the --id parameter in format "parentId.subtaskId"');
|
||||
}
|
||||
|
||||
|
||||
// ... (existing helpful error message logic based on error type) ...
|
||||
if (error.message?.includes('ANTHROPIC_API_KEY')) {
|
||||
console.log(chalk.yellow('\nTo fix this issue, set your Anthropic API key:'));
|
||||
console.log(' export ANTHROPIC_API_KEY=your_api_key_here');
|
||||
} else if (error.message?.includes('PERPLEXITY_API_KEY')) {
|
||||
console.log(chalk.yellow('\nTo fix this issue:'));
|
||||
console.log(' 1. Set your Perplexity API key: export PERPLEXITY_API_KEY=your_api_key_here');
|
||||
console.log(' 2. Or run without the research flag: task-master update-subtask --id=<id> --prompt=\"...\"');
|
||||
} else if (error.message?.includes('overloaded')) { // Catch final overload error
|
||||
console.log(chalk.yellow('\nAI model overloaded, and fallback failed or was unavailable:'));
|
||||
console.log(' 1. Try again in a few minutes.');
|
||||
console.log(' 2. Ensure PERPLEXITY_API_KEY is set for fallback.');
|
||||
console.log(' 3. Consider breaking your prompt into smaller updates.');
|
||||
} else if (error.message?.includes('not found')) {
|
||||
console.log(chalk.yellow('\nTo fix this issue:'));
|
||||
console.log(' 1. Run task-master list --with-subtasks to see all available subtask IDs');
|
||||
console.log(' 2. Use a valid subtask ID with the --id parameter in format \"parentId.subtaskId\"');
|
||||
} else if (error.message?.includes('empty response from AI')) {
|
||||
console.log(chalk.yellow('\nThe AI model returned an empty response. This might be due to the prompt or API issues. Try rephrasing or trying again later.'));
|
||||
}
|
||||
|
||||
if (CONFIG.debug) {
|
||||
console.error(error);
|
||||
}
|
||||
|
||||
|
||||
return null;
|
||||
} finally {
|
||||
// Final cleanup check for the indicator, although it should be stopped by now
|
||||
if (loadingIndicator) {
|
||||
stopLoadingIndicator(loadingIndicator);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user