refactor(tasks): Align add-task with unified AI service and add research flag
This commit is contained in:
@@ -1,6 +1,8 @@
|
||||
import path from 'path';
|
||||
|
||||
import { log, readJSON, writeJSON } from '../utils.js';
|
||||
import { isTaskDependentOn } from '../task-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
|
||||
/**
|
||||
* Add a subtask to a parent task
|
||||
|
||||
@@ -2,6 +2,7 @@ import path from 'path';
|
||||
import chalk from 'chalk';
|
||||
import boxen from 'boxen';
|
||||
import Table from 'cli-table3';
|
||||
import { z } from 'zod';
|
||||
|
||||
import {
|
||||
displayBanner,
|
||||
@@ -10,16 +11,23 @@ import {
|
||||
stopLoadingIndicator
|
||||
} from '../ui.js';
|
||||
import { log, readJSON, writeJSON, truncate } from '../utils.js';
|
||||
import { _handleAnthropicStream } from '../ai-services.js';
|
||||
import {
|
||||
getDefaultPriority,
|
||||
getResearchModelId,
|
||||
getResearchTemperature,
|
||||
getResearchMaxTokens,
|
||||
getMainModelId,
|
||||
getMainTemperature,
|
||||
getMainMaxTokens
|
||||
} from '../config-manager.js';
|
||||
import { generateObjectService } from '../ai-services-unified.js';
|
||||
import { getDefaultPriority } from '../config-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
|
||||
// Define Zod schema for the expected AI output object
|
||||
const AiTaskDataSchema = z.object({
|
||||
title: z.string().describe('Clear, concise title for the task'),
|
||||
description: z
|
||||
.string()
|
||||
.describe('A one or two sentence description of the task'),
|
||||
details: z
|
||||
.string()
|
||||
.describe('In-depth implementation details, considerations, and guidance'),
|
||||
testStrategy: z
|
||||
.string()
|
||||
.describe('Detailed approach for verifying task completion')
|
||||
});
|
||||
|
||||
/**
|
||||
* Add a new task using AI
|
||||
@@ -31,21 +39,32 @@ import {
|
||||
* @param {Object} mcpLog - MCP logger object (optional)
|
||||
* @param {Object} session - Session object from MCP server (optional)
|
||||
* @param {string} outputFormat - Output format (text or json)
|
||||
* @param {Object} customEnv - Custom environment variables (optional)
|
||||
* @param {Object} customEnv - Custom environment variables (optional) - Note: AI params override deprecated
|
||||
* @param {Object} manualTaskData - Manual task data (optional, for direct task creation without AI)
|
||||
* @param {boolean} useResearch - Whether to use the research model (passed to unified service)
|
||||
* @returns {number} The new task ID
|
||||
*/
|
||||
async function addTask(
|
||||
tasksPath,
|
||||
prompt,
|
||||
dependencies = [],
|
||||
priority = getDefaultPriority(), // Use getter
|
||||
priority = getDefaultPriority(), // Keep getter for default priority
|
||||
{ reportProgress, mcpLog, session } = {},
|
||||
outputFormat = 'text',
|
||||
customEnv = null,
|
||||
manualTaskData = null
|
||||
// customEnv = null, // Removed as AI param overrides are deprecated
|
||||
manualTaskData = null,
|
||||
useResearch = false // <-- Add useResearch parameter
|
||||
) {
|
||||
let loadingIndicator = null; // Keep indicator variable accessible
|
||||
let loadingIndicator = null;
|
||||
|
||||
// Create custom reporter that checks for MCP log
|
||||
const report = (message, level = 'info') => {
|
||||
if (mcpLog) {
|
||||
mcpLog[level](message);
|
||||
} else if (outputFormat === 'text') {
|
||||
log(level, message);
|
||||
}
|
||||
};
|
||||
|
||||
try {
|
||||
// Only display banner and UI elements for text output (CLI)
|
||||
@@ -65,12 +84,13 @@ async function addTask(
|
||||
// Read the existing tasks
|
||||
const data = readJSON(tasksPath);
|
||||
if (!data || !data.tasks) {
|
||||
log('error', 'Invalid or missing tasks.json.');
|
||||
report('Invalid or missing tasks.json.', 'error');
|
||||
throw new Error('Invalid or missing tasks.json.');
|
||||
}
|
||||
|
||||
// Find the highest task ID to determine the next ID
|
||||
const highestId = Math.max(...data.tasks.map((t) => t.id));
|
||||
const highestId =
|
||||
data.tasks.length > 0 ? Math.max(...data.tasks.map((t) => t.id)) : 0;
|
||||
const newTaskId = highestId + 1;
|
||||
|
||||
// Only show UI box for CLI mode
|
||||
@@ -87,251 +107,119 @@ async function addTask(
|
||||
|
||||
// Validate dependencies before proceeding
|
||||
const invalidDeps = dependencies.filter((depId) => {
|
||||
return !data.tasks.some((t) => t.id === depId);
|
||||
// Ensure depId is parsed as a number for comparison
|
||||
const numDepId = parseInt(depId, 10);
|
||||
return isNaN(numDepId) || !data.tasks.some((t) => t.id === numDepId);
|
||||
});
|
||||
|
||||
if (invalidDeps.length > 0) {
|
||||
log(
|
||||
'warn',
|
||||
`The following dependencies do not exist: ${invalidDeps.join(', ')}`
|
||||
report(
|
||||
`The following dependencies do not exist or are invalid: ${invalidDeps.join(', ')}`,
|
||||
'warn'
|
||||
);
|
||||
log('info', 'Removing invalid dependencies...');
|
||||
report('Removing invalid dependencies...', 'info');
|
||||
dependencies = dependencies.filter(
|
||||
(depId) => !invalidDeps.includes(depId)
|
||||
);
|
||||
}
|
||||
// Ensure dependencies are numbers
|
||||
const numericDependencies = dependencies.map((dep) => parseInt(dep, 10));
|
||||
|
||||
let taskData;
|
||||
|
||||
// Check if manual task data is provided
|
||||
if (manualTaskData) {
|
||||
// Use manual task data directly
|
||||
log('info', 'Using manually provided task data');
|
||||
report('Using manually provided task data', 'info');
|
||||
taskData = manualTaskData;
|
||||
|
||||
// Basic validation for manual data
|
||||
if (
|
||||
!taskData.title ||
|
||||
typeof taskData.title !== 'string' ||
|
||||
!taskData.description ||
|
||||
typeof taskData.description !== 'string'
|
||||
) {
|
||||
throw new Error(
|
||||
'Manual task data must include at least a title and description.'
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// Use AI to generate task data
|
||||
// --- Refactored AI Interaction ---
|
||||
report('Generating task data with AI...', 'info');
|
||||
|
||||
// Create context string for task creation prompt
|
||||
let contextTasks = '';
|
||||
if (dependencies.length > 0) {
|
||||
// Provide context for the dependent tasks
|
||||
if (numericDependencies.length > 0) {
|
||||
const dependentTasks = data.tasks.filter((t) =>
|
||||
dependencies.includes(t.id)
|
||||
numericDependencies.includes(t.id)
|
||||
);
|
||||
contextTasks = `\nThis task depends on the following tasks:\n${dependentTasks
|
||||
.map((t) => `- Task ${t.id}: ${t.title} - ${t.description}`)
|
||||
.join('\n')}`;
|
||||
} else {
|
||||
// Provide a few recent tasks as context
|
||||
const recentTasks = [...data.tasks]
|
||||
.sort((a, b) => b.id - a.id)
|
||||
.slice(0, 3);
|
||||
contextTasks = `\nRecent tasks in the project:\n${recentTasks
|
||||
.map((t) => `- Task ${t.id}: ${t.title} - ${t.description}`)
|
||||
.join('\n')}`;
|
||||
if (recentTasks.length > 0) {
|
||||
contextTasks = `\nRecent tasks in the project:\n${recentTasks
|
||||
.map((t) => `- Task ${t.id}: ${t.title} - ${t.description}`)
|
||||
.join('\n')}`;
|
||||
}
|
||||
}
|
||||
|
||||
// System Prompt
|
||||
const systemPrompt =
|
||||
"You are a helpful assistant that creates well-structured tasks for a software development project. Generate a single new task based on the user's description, adhering strictly to the provided JSON schema.";
|
||||
|
||||
// Task Structure Description (for user prompt)
|
||||
const taskStructureDesc = `
|
||||
{
|
||||
"title": "Task title goes here",
|
||||
"description": "A concise one or two sentence description of what the task involves",
|
||||
"details": "In-depth implementation details, considerations, and guidance.",
|
||||
"testStrategy": "Detailed approach for verifying task completion."
|
||||
}`;
|
||||
|
||||
// User Prompt
|
||||
const userPrompt = `Create a comprehensive new task (Task #${newTaskId}) for a software development project based on this description: "${prompt}"
|
||||
|
||||
${contextTasks}
|
||||
|
||||
Return your answer as a single JSON object matching the schema precisely.
|
||||
Make sure the details and test strategy are thorough and specific.`;
|
||||
|
||||
// Start the loading indicator - only for text mode
|
||||
if (outputFormat === 'text') {
|
||||
loadingIndicator = startLoadingIndicator(
|
||||
'Generating new task with Claude AI...'
|
||||
`Generating new task with ${useResearch ? 'Research' : 'Main'} AI...`
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
// Import the AI services - explicitly importing here to avoid circular dependencies
|
||||
const {
|
||||
_handleAnthropicStream,
|
||||
_buildAddTaskPrompt,
|
||||
parseTaskJsonResponse,
|
||||
getAvailableAIModel
|
||||
} = await import('./ai-services.js');
|
||||
// Determine the service role based on the useResearch flag
|
||||
const serviceRole = useResearch ? 'research' : 'main';
|
||||
|
||||
// Initialize model state variables
|
||||
let claudeOverloaded = false;
|
||||
let modelAttempts = 0;
|
||||
const maxModelAttempts = 2; // Try up to 2 models before giving up
|
||||
let aiGeneratedTaskData = null;
|
||||
// Call the unified AI service
|
||||
const aiGeneratedTaskData = await generateObjectService({
|
||||
role: serviceRole, // <-- Use the determined role
|
||||
session: session, // Pass session for API key resolution
|
||||
schema: AiTaskDataSchema, // Pass the Zod schema
|
||||
objectName: 'newTaskData', // Name for the object
|
||||
systemPrompt: systemPrompt,
|
||||
prompt: userPrompt,
|
||||
reportProgress // Pass progress reporter if available
|
||||
});
|
||||
|
||||
// Loop through model attempts
|
||||
while (modelAttempts < maxModelAttempts && !aiGeneratedTaskData) {
|
||||
modelAttempts++; // Increment attempt counter
|
||||
const isLastAttempt = modelAttempts >= maxModelAttempts;
|
||||
let modelType = null; // Track which model we're using
|
||||
|
||||
try {
|
||||
// Get the best available model based on our current state
|
||||
const result = getAvailableAIModel({
|
||||
claudeOverloaded,
|
||||
requiresResearch: false // We're not using the research flag here
|
||||
});
|
||||
modelType = result.type;
|
||||
const client = result.client;
|
||||
|
||||
log(
|
||||
'info',
|
||||
`Attempt ${modelAttempts}/${maxModelAttempts}: Generating task using ${modelType}`
|
||||
);
|
||||
|
||||
// Update loading indicator text - only for text output
|
||||
if (outputFormat === 'text') {
|
||||
if (loadingIndicator) {
|
||||
stopLoadingIndicator(loadingIndicator); // Stop previous indicator
|
||||
}
|
||||
loadingIndicator = startLoadingIndicator(
|
||||
`Attempt ${modelAttempts}: Using ${modelType.toUpperCase()}...`
|
||||
);
|
||||
}
|
||||
|
||||
// Build the prompts using the helper
|
||||
const { systemPrompt, userPrompt } = _buildAddTaskPrompt(
|
||||
prompt,
|
||||
contextTasks,
|
||||
{ newTaskId }
|
||||
);
|
||||
|
||||
if (modelType === 'perplexity') {
|
||||
// Use Perplexity AI
|
||||
const response = await client.chat.completions.create({
|
||||
model: getResearchModelId(session),
|
||||
messages: [
|
||||
{ role: 'system', content: systemPrompt },
|
||||
{ role: 'user', content: userPrompt }
|
||||
],
|
||||
temperature: getResearchTemperature(session),
|
||||
max_tokens: getResearchMaxTokens(session)
|
||||
});
|
||||
|
||||
const responseText = response.choices[0].message.content;
|
||||
aiGeneratedTaskData = parseTaskJsonResponse(responseText);
|
||||
} else {
|
||||
// Use Claude (default)
|
||||
// Prepare API parameters using getters, preserving customEnv override
|
||||
const apiParams = {
|
||||
model: customEnv?.ANTHROPIC_MODEL || getMainModelId(session),
|
||||
max_tokens: customEnv?.MAX_TOKENS || getMainMaxTokens(session),
|
||||
temperature:
|
||||
customEnv?.TEMPERATURE || getMainTemperature(session),
|
||||
system: systemPrompt,
|
||||
messages: [{ role: 'user', content: userPrompt }]
|
||||
};
|
||||
|
||||
// Call the streaming API using our helper
|
||||
try {
|
||||
const fullResponse = await _handleAnthropicStream(
|
||||
client,
|
||||
apiParams,
|
||||
{ reportProgress, mcpLog },
|
||||
outputFormat === 'text' // CLI mode flag
|
||||
);
|
||||
|
||||
log(
|
||||
'debug',
|
||||
`Streaming response length: ${fullResponse.length} characters`
|
||||
);
|
||||
|
||||
// Parse the response using our helper
|
||||
aiGeneratedTaskData = parseTaskJsonResponse(fullResponse);
|
||||
} catch (streamError) {
|
||||
// Process stream errors explicitly
|
||||
log('error', `Stream error: ${streamError.message}`);
|
||||
|
||||
// Check if this is an overload error
|
||||
let isOverload = false;
|
||||
// Check 1: SDK specific property
|
||||
if (streamError.type === 'overloaded_error') {
|
||||
isOverload = true;
|
||||
}
|
||||
// Check 2: Check nested error property
|
||||
else if (streamError.error?.type === 'overloaded_error') {
|
||||
isOverload = true;
|
||||
}
|
||||
// Check 3: Check status code
|
||||
else if (
|
||||
streamError.status === 429 ||
|
||||
streamError.status === 529
|
||||
) {
|
||||
isOverload = true;
|
||||
}
|
||||
// Check 4: Check message string
|
||||
else if (
|
||||
streamError.message?.toLowerCase().includes('overloaded')
|
||||
) {
|
||||
isOverload = true;
|
||||
}
|
||||
|
||||
if (isOverload) {
|
||||
claudeOverloaded = true;
|
||||
log(
|
||||
'warn',
|
||||
'Claude overloaded. Will attempt fallback model if available.'
|
||||
);
|
||||
// Throw to continue to next model attempt
|
||||
throw new Error('Claude overloaded');
|
||||
} else {
|
||||
// Re-throw non-overload errors
|
||||
throw streamError;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we got here without errors and have task data, we're done
|
||||
if (aiGeneratedTaskData) {
|
||||
log(
|
||||
'info',
|
||||
`Successfully generated task data using ${modelType} on attempt ${modelAttempts}`
|
||||
);
|
||||
break;
|
||||
}
|
||||
} catch (modelError) {
|
||||
const failedModel = modelType || 'unknown model';
|
||||
log(
|
||||
'warn',
|
||||
`Attempt ${modelAttempts} failed using ${failedModel}: ${modelError.message}`
|
||||
);
|
||||
|
||||
// Continue to next attempt if we have more attempts and this was specifically an overload error
|
||||
const wasOverload = modelError.message
|
||||
?.toLowerCase()
|
||||
.includes('overload');
|
||||
|
||||
if (wasOverload && !isLastAttempt) {
|
||||
if (modelType === 'claude') {
|
||||
claudeOverloaded = true;
|
||||
log('info', 'Will attempt with Perplexity AI next');
|
||||
}
|
||||
continue; // Continue to next attempt
|
||||
} else if (isLastAttempt) {
|
||||
log(
|
||||
'error',
|
||||
`Final attempt (${modelAttempts}/${maxModelAttempts}) failed. No fallback possible.`
|
||||
);
|
||||
throw modelError; // Re-throw on last attempt
|
||||
} else {
|
||||
throw modelError; // Re-throw for non-overload errors
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we don't have task data after all attempts, throw an error
|
||||
if (!aiGeneratedTaskData) {
|
||||
throw new Error(
|
||||
'Failed to generate task data after all model attempts'
|
||||
);
|
||||
}
|
||||
|
||||
// Set the AI-generated task data
|
||||
taskData = aiGeneratedTaskData;
|
||||
report('Successfully generated task data from AI.', 'success');
|
||||
taskData = aiGeneratedTaskData; // Assign the validated object
|
||||
} catch (error) {
|
||||
// Handle AI errors
|
||||
log('error', `Error generating task with AI: ${error.message}`);
|
||||
|
||||
// Stop any loading indicator
|
||||
if (outputFormat === 'text' && loadingIndicator) {
|
||||
stopLoadingIndicator(loadingIndicator);
|
||||
}
|
||||
|
||||
throw error;
|
||||
report(`Error generating task with AI: ${error.message}`, 'error');
|
||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
||||
throw error; // Re-throw error after logging
|
||||
} finally {
|
||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator); // Ensure indicator stops
|
||||
}
|
||||
// --- End Refactored AI Interaction ---
|
||||
}
|
||||
|
||||
// Create the new task object
|
||||
@@ -342,8 +230,9 @@ async function addTask(
|
||||
details: taskData.details || '',
|
||||
testStrategy: taskData.testStrategy || '',
|
||||
status: 'pending',
|
||||
dependencies: dependencies,
|
||||
priority: priority
|
||||
dependencies: numericDependencies, // Use validated numeric dependencies
|
||||
priority: priority,
|
||||
subtasks: [] // Initialize with empty subtasks array
|
||||
};
|
||||
|
||||
// Add the task to the tasks array
|
||||
@@ -353,13 +242,9 @@ async function addTask(
|
||||
writeJSON(tasksPath, data);
|
||||
|
||||
// Generate markdown task files
|
||||
log('info', 'Generating task files...');
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||
|
||||
// Stop the loading indicator if it's still running
|
||||
if (outputFormat === 'text' && loadingIndicator) {
|
||||
stopLoadingIndicator(loadingIndicator);
|
||||
}
|
||||
report('Generating task files...', 'info');
|
||||
// Pass mcpLog if available to generateTaskFiles
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath), { mcpLog });
|
||||
|
||||
// Show success message - only for text output (CLI)
|
||||
if (outputFormat === 'text') {
|
||||
@@ -369,7 +254,7 @@ async function addTask(
|
||||
chalk.cyan.bold('Title'),
|
||||
chalk.cyan.bold('Description')
|
||||
],
|
||||
colWidths: [5, 30, 50]
|
||||
colWidths: [5, 30, 50] // Adjust widths as needed
|
||||
});
|
||||
|
||||
table.push([
|
||||
@@ -381,7 +266,20 @@ async function addTask(
|
||||
console.log(chalk.green('✅ New task created successfully:'));
|
||||
console.log(table.toString());
|
||||
|
||||
// Show success message
|
||||
// Helper to get priority color
|
||||
const getPriorityColor = (p) => {
|
||||
switch (p?.toLowerCase()) {
|
||||
case 'high':
|
||||
return 'red';
|
||||
case 'low':
|
||||
return 'gray';
|
||||
case 'medium':
|
||||
default:
|
||||
return 'yellow';
|
||||
}
|
||||
};
|
||||
|
||||
// Show success message box
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.white.bold(`Task ${newTaskId} Created Successfully`) +
|
||||
@@ -394,8 +292,9 @@ async function addTask(
|
||||
`Priority: ${chalk.keyword(getPriorityColor(newTask.priority))(newTask.priority)}`
|
||||
) +
|
||||
'\n' +
|
||||
(dependencies.length > 0
|
||||
? chalk.white(`Dependencies: ${dependencies.join(', ')}`) + '\n'
|
||||
(numericDependencies.length > 0
|
||||
? chalk.white(`Dependencies: ${numericDependencies.join(', ')}`) +
|
||||
'\n'
|
||||
: '') +
|
||||
'\n' +
|
||||
chalk.white.bold('Next Steps:') +
|
||||
@@ -419,15 +318,16 @@ async function addTask(
|
||||
// Return the new task ID
|
||||
return newTaskId;
|
||||
} catch (error) {
|
||||
// Stop any loading indicator
|
||||
if (outputFormat === 'text' && loadingIndicator) {
|
||||
// Stop any loading indicator on error
|
||||
if (loadingIndicator) {
|
||||
stopLoadingIndicator(loadingIndicator);
|
||||
}
|
||||
|
||||
log('error', `Error adding task: ${error.message}`);
|
||||
report(`Error adding task: ${error.message}`, 'error');
|
||||
if (outputFormat === 'text') {
|
||||
console.error(chalk.red(`Error: ${error.message}`));
|
||||
}
|
||||
// In MCP mode, we let the direct function handler catch and format
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
42
scripts/modules/task-manager/is-task-dependent.js
Normal file
42
scripts/modules/task-manager/is-task-dependent.js
Normal file
@@ -0,0 +1,42 @@
|
||||
/**
|
||||
* Check if a task is dependent on another task (directly or indirectly)
|
||||
* Used to prevent circular dependencies
|
||||
* @param {Array} allTasks - Array of all tasks
|
||||
* @param {Object} task - The task to check
|
||||
* @param {number} targetTaskId - The task ID to check dependency against
|
||||
* @returns {boolean} Whether the task depends on the target task
|
||||
*/
|
||||
function isTaskDependentOn(allTasks, task, targetTaskId) {
|
||||
// If the task is a subtask, check if its parent is the target
|
||||
if (task.parentTaskId === targetTaskId) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check direct dependencies
|
||||
if (task.dependencies && task.dependencies.includes(targetTaskId)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check dependencies of dependencies (recursive)
|
||||
if (task.dependencies) {
|
||||
for (const depId of task.dependencies) {
|
||||
const depTask = allTasks.find((t) => t.id === depId);
|
||||
if (depTask && isTaskDependentOn(allTasks, depTask, targetTaskId)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check subtasks for dependencies
|
||||
if (task.subtasks) {
|
||||
for (const subtask of task.subtasks) {
|
||||
if (isTaskDependentOn(allTasks, subtask, targetTaskId)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
export default isTaskDependentOn;
|
||||
Reference in New Issue
Block a user