refactor(tasks): Align add-task with unified AI service and add research flag

This commit is contained in:
Eyal Toledano
2025-04-24 01:59:41 -04:00
parent 90c6c1e587
commit be3f68e777
10 changed files with 351 additions and 404 deletions

View File

@@ -5,4 +5,5 @@
- Adds a 'models' CLI and MCP command to get the current model configuration, available models, and gives the ability to set main/research/fallback models."
- In the CLI, `task-master models` shows the current models config. Using the `--setup` flag launches an interactive set up that allows you to easily select the models you want to use for each of the three roles. Use `q` during the interactive setup to cancel the setup.
- In the MCP, responses are simplified in RESTful format (instead of the full CLI output). The agent can use the `models` tool with different arguments, including `listAvailableModels` to get available models. Run without arguments, it will return the current configuration. Arguments are available to set the model for each of the three roles. This allows you to manage Taskmaster AI providers and models directly from either the CLI or MCP or both.
- Updated the CLI help menu when you run `task-master` to include missing commands and .taskmasterconfig information.
- Updated the CLI help menu when you run `task-master` to include missing commands and .taskmasterconfig information.
- Adds `--research` flag to `add-task` so you can hit up Perplexity right from the add-task flow, rather than having to add a task and then update it.

View File

@@ -5,7 +5,7 @@
"args": ["./mcp-server/server.js"],
"env": {
"ANTHROPIC_API_KEY": "sk-ant-apikeyhere",
"PERPLEXITY_API_KEY": "pplx-1234567890",
"PERPLEXITY_API_KEY": "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",
"OPENAI_API_KEY": "sk-proj-1234567890",
"GOOGLE_API_KEY": "AIzaSyB1234567890",
"GROK_API_KEY": "gsk_1234567890",

View File

@@ -8,15 +8,6 @@ import {
enableSilentMode,
disableSilentMode
} from '../../../../scripts/modules/utils.js';
import {
getAnthropicClientForMCP,
getModelConfig
} from '../utils/ai-client-utils.js';
import {
_buildAddTaskPrompt,
parseTaskJsonResponse,
_handleAnthropicStream
} from '../../../../scripts/modules/ai-services.js';
/**
* Direct function wrapper for adding a new task with error handling.
@@ -29,16 +20,26 @@ import {
* @param {string} [args.testStrategy] - Test strategy (for manual task creation)
* @param {string} [args.dependencies] - Comma-separated list of task IDs this task depends on
* @param {string} [args.priority='medium'] - Task priority (high, medium, low)
* @param {string} [args.file='tasks/tasks.json'] - Path to the tasks file
* @param {string} [args.projectRoot] - Project root directory
* @param {string} [args.tasksJsonPath] - Path to the tasks.json file (resolved by tool)
* @param {boolean} [args.research=false] - Whether to use research capabilities for task creation
* @param {Object} log - Logger object
* @param {Object} context - Additional context (reportProgress, session)
* @param {Object} context - Additional context (session)
* @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }
*/
export async function addTaskDirect(args, log, context = {}) {
// Destructure expected args
// Destructure expected args (including research)
const { tasksJsonPath, prompt, dependencies, priority, research } = args;
const { session } = context; // Destructure session from context
// Define the logger wrapper to ensure compatibility with core report function
const logWrapper = {
info: (message, ...args) => log.info(message, ...args),
warn: (message, ...args) => log.warn(message, ...args),
error: (message, ...args) => log.error(message, ...args),
debug: (message, ...args) => log.debug && log.debug(message, ...args), // Handle optional debug
success: (message, ...args) => log.info(message, ...args) // Map success to info if needed
};
try {
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
@@ -79,20 +80,17 @@ export async function addTaskDirect(args, log, context = {}) {
}
// Extract and prepare parameters
const taskPrompt = prompt;
const taskDependencies = Array.isArray(dependencies)
? dependencies
: dependencies
? dependencies // Already an array if passed directly
: dependencies // Check if dependencies exist and are a string
? String(dependencies)
.split(',')
.map((id) => parseInt(id.trim(), 10))
: [];
const taskPriority = priority || 'medium';
// Extract context parameters for advanced functionality
const { session } = context;
.map((id) => parseInt(id.trim(), 10)) // Split, trim, and parse
: []; // Default to empty array if null/undefined
const taskPriority = priority || 'medium'; // Default priority
let manualTaskData = null;
let newTaskId;
if (isManualCreation) {
// Create manual task data object
@@ -108,150 +106,61 @@ export async function addTaskDirect(args, log, context = {}) {
);
// Call the addTask function with manual task data
const newTaskId = await addTask(
newTaskId = await addTask(
tasksPath,
null, // No prompt needed for manual creation
null, // prompt is null for manual creation
taskDependencies,
priority,
taskPriority,
{
mcpLog: log,
mcpLog: logWrapper,
session
},
'json', // Use JSON output format to prevent console output
null, // No custom environment
manualTaskData // Pass the manual task data
'json', // outputFormat
manualTaskData, // Pass the manual task data
false // research flag is false for manual creation
);
// Restore normal logging
disableSilentMode();
return {
success: true,
data: {
taskId: newTaskId,
message: `Successfully added new task #${newTaskId}`
}
};
} else {
// AI-driven task creation
log.info(
`Adding new task with prompt: "${prompt}", dependencies: [${taskDependencies.join(', ')}], priority: ${priority}`
`Adding new task with prompt: "${prompt}", dependencies: [${taskDependencies.join(', ')}], priority: ${taskPriority}, research: ${research}`
);
// Initialize AI client with session environment
let localAnthropic;
try {
localAnthropic = getAnthropicClientForMCP(session, log);
} catch (error) {
log.error(`Failed to initialize Anthropic client: ${error.message}`);
disableSilentMode();
return {
success: false,
error: {
code: 'AI_CLIENT_ERROR',
message: `Cannot initialize AI client: ${error.message}`
}
};
}
// Get model configuration from session
const modelConfig = getModelConfig(session);
// Read existing tasks to provide context
let tasksData;
try {
const fs = await import('fs');
tasksData = JSON.parse(fs.readFileSync(tasksPath, 'utf8'));
} catch (error) {
log.warn(`Could not read existing tasks for context: ${error.message}`);
tasksData = { tasks: [] };
}
// Build prompts for AI
const { systemPrompt, userPrompt } = _buildAddTaskPrompt(
prompt,
tasksData.tasks
);
// Make the AI call using the streaming helper
let responseText;
try {
responseText = await _handleAnthropicStream(
localAnthropic,
{
model: modelConfig.model,
max_tokens: modelConfig.maxTokens,
temperature: modelConfig.temperature,
messages: [{ role: 'user', content: userPrompt }],
system: systemPrompt
},
{
mcpLog: log
}
);
} catch (error) {
log.error(`AI processing failed: ${error.message}`);
disableSilentMode();
return {
success: false,
error: {
code: 'AI_PROCESSING_ERROR',
message: `Failed to generate task with AI: ${error.message}`
}
};
}
// Parse the AI response
let taskDataFromAI;
try {
taskDataFromAI = parseTaskJsonResponse(responseText);
} catch (error) {
log.error(`Failed to parse AI response: ${error.message}`);
disableSilentMode();
return {
success: false,
error: {
code: 'RESPONSE_PARSING_ERROR',
message: `Failed to parse AI response: ${error.message}`
}
};
}
// Call the addTask function with 'json' outputFormat to prevent console output when called via MCP
const newTaskId = await addTask(
// Call the addTask function, passing the research flag
newTaskId = await addTask(
tasksPath,
prompt,
prompt, // Use the prompt for AI creation
taskDependencies,
priority,
taskPriority,
{
mcpLog: log,
mcpLog: logWrapper,
session
},
'json',
null,
taskDataFromAI // Pass the parsed AI result as the manual task data
'json', // outputFormat
null, // manualTaskData is null for AI creation
research // Pass the research flag
);
// Restore normal logging
disableSilentMode();
return {
success: true,
data: {
taskId: newTaskId,
message: `Successfully added new task #${newTaskId}`
}
};
}
// Restore normal logging
disableSilentMode();
return {
success: true,
data: {
taskId: newTaskId,
message: `Successfully added new task #${newTaskId}`
}
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error in addTaskDirect: ${error.message}`);
// Add specific error code checks if needed
return {
success: false,
error: {
code: 'ADD_TASK_ERROR',
code: error.code || 'ADD_TASK_ERROR', // Use error code if available
message: error.message
}
};

View File

@@ -913,17 +913,18 @@ function registerCommands(programInstance) {
}
}
// Pass mcpLog and session for MCP mode
const newTaskId = await addTask(
options.file,
options.prompt,
dependencies,
options.priority,
{
session: process.env
session: process.env // Pass environment as session for CLI
},
options.research || false,
null,
manualTaskData
'text', // outputFormat
null, // manualTaskData
options.research || false // Pass the research flag value
);
console.log(chalk.green(`✓ Added new task #${newTaskId}`));

View File

@@ -24,6 +24,7 @@ import removeTask from './task-manager/remove-task.js';
import taskExists from './task-manager/task-exists.js';
import generateSubtaskPrompt from './task-manager/generate-subtask-prompt.js';
import getSubtasksFromAI from './task-manager/get-subtasks-from-ai.js';
import isTaskDependentOn from './task-manager/is-task-dependent.js';
// Export task manager functions
export {
@@ -47,5 +48,6 @@ export {
findTaskById,
taskExists,
generateSubtaskPrompt,
getSubtasksFromAI
getSubtasksFromAI,
isTaskDependentOn
};

View File

@@ -1,6 +1,8 @@
import path from 'path';
import { log, readJSON, writeJSON } from '../utils.js';
import { isTaskDependentOn } from '../task-manager.js';
import generateTaskFiles from './generate-task-files.js';
/**
* Add a subtask to a parent task

View File

@@ -2,6 +2,7 @@ import path from 'path';
import chalk from 'chalk';
import boxen from 'boxen';
import Table from 'cli-table3';
import { z } from 'zod';
import {
displayBanner,
@@ -10,16 +11,23 @@ import {
stopLoadingIndicator
} from '../ui.js';
import { log, readJSON, writeJSON, truncate } from '../utils.js';
import { _handleAnthropicStream } from '../ai-services.js';
import {
getDefaultPriority,
getResearchModelId,
getResearchTemperature,
getResearchMaxTokens,
getMainModelId,
getMainTemperature,
getMainMaxTokens
} from '../config-manager.js';
import { generateObjectService } from '../ai-services-unified.js';
import { getDefaultPriority } from '../config-manager.js';
import generateTaskFiles from './generate-task-files.js';
// Define Zod schema for the expected AI output object
const AiTaskDataSchema = z.object({
title: z.string().describe('Clear, concise title for the task'),
description: z
.string()
.describe('A one or two sentence description of the task'),
details: z
.string()
.describe('In-depth implementation details, considerations, and guidance'),
testStrategy: z
.string()
.describe('Detailed approach for verifying task completion')
});
/**
* Add a new task using AI
@@ -31,21 +39,32 @@ import {
* @param {Object} mcpLog - MCP logger object (optional)
* @param {Object} session - Session object from MCP server (optional)
* @param {string} outputFormat - Output format (text or json)
* @param {Object} customEnv - Custom environment variables (optional)
* @param {Object} customEnv - Custom environment variables (optional) - Note: AI params override deprecated
* @param {Object} manualTaskData - Manual task data (optional, for direct task creation without AI)
* @param {boolean} useResearch - Whether to use the research model (passed to unified service)
* @returns {number} The new task ID
*/
async function addTask(
tasksPath,
prompt,
dependencies = [],
priority = getDefaultPriority(), // Use getter
priority = getDefaultPriority(), // Keep getter for default priority
{ reportProgress, mcpLog, session } = {},
outputFormat = 'text',
customEnv = null,
manualTaskData = null
// customEnv = null, // Removed as AI param overrides are deprecated
manualTaskData = null,
useResearch = false // <-- Add useResearch parameter
) {
let loadingIndicator = null; // Keep indicator variable accessible
let loadingIndicator = null;
// Create custom reporter that checks for MCP log
const report = (message, level = 'info') => {
if (mcpLog) {
mcpLog[level](message);
} else if (outputFormat === 'text') {
log(level, message);
}
};
try {
// Only display banner and UI elements for text output (CLI)
@@ -65,12 +84,13 @@ async function addTask(
// Read the existing tasks
const data = readJSON(tasksPath);
if (!data || !data.tasks) {
log('error', 'Invalid or missing tasks.json.');
report('Invalid or missing tasks.json.', 'error');
throw new Error('Invalid or missing tasks.json.');
}
// Find the highest task ID to determine the next ID
const highestId = Math.max(...data.tasks.map((t) => t.id));
const highestId =
data.tasks.length > 0 ? Math.max(...data.tasks.map((t) => t.id)) : 0;
const newTaskId = highestId + 1;
// Only show UI box for CLI mode
@@ -87,251 +107,119 @@ async function addTask(
// Validate dependencies before proceeding
const invalidDeps = dependencies.filter((depId) => {
return !data.tasks.some((t) => t.id === depId);
// Ensure depId is parsed as a number for comparison
const numDepId = parseInt(depId, 10);
return isNaN(numDepId) || !data.tasks.some((t) => t.id === numDepId);
});
if (invalidDeps.length > 0) {
log(
'warn',
`The following dependencies do not exist: ${invalidDeps.join(', ')}`
report(
`The following dependencies do not exist or are invalid: ${invalidDeps.join(', ')}`,
'warn'
);
log('info', 'Removing invalid dependencies...');
report('Removing invalid dependencies...', 'info');
dependencies = dependencies.filter(
(depId) => !invalidDeps.includes(depId)
);
}
// Ensure dependencies are numbers
const numericDependencies = dependencies.map((dep) => parseInt(dep, 10));
let taskData;
// Check if manual task data is provided
if (manualTaskData) {
// Use manual task data directly
log('info', 'Using manually provided task data');
report('Using manually provided task data', 'info');
taskData = manualTaskData;
// Basic validation for manual data
if (
!taskData.title ||
typeof taskData.title !== 'string' ||
!taskData.description ||
typeof taskData.description !== 'string'
) {
throw new Error(
'Manual task data must include at least a title and description.'
);
}
} else {
// Use AI to generate task data
// --- Refactored AI Interaction ---
report('Generating task data with AI...', 'info');
// Create context string for task creation prompt
let contextTasks = '';
if (dependencies.length > 0) {
// Provide context for the dependent tasks
if (numericDependencies.length > 0) {
const dependentTasks = data.tasks.filter((t) =>
dependencies.includes(t.id)
numericDependencies.includes(t.id)
);
contextTasks = `\nThis task depends on the following tasks:\n${dependentTasks
.map((t) => `- Task ${t.id}: ${t.title} - ${t.description}`)
.join('\n')}`;
} else {
// Provide a few recent tasks as context
const recentTasks = [...data.tasks]
.sort((a, b) => b.id - a.id)
.slice(0, 3);
contextTasks = `\nRecent tasks in the project:\n${recentTasks
.map((t) => `- Task ${t.id}: ${t.title} - ${t.description}`)
.join('\n')}`;
if (recentTasks.length > 0) {
contextTasks = `\nRecent tasks in the project:\n${recentTasks
.map((t) => `- Task ${t.id}: ${t.title} - ${t.description}`)
.join('\n')}`;
}
}
// System Prompt
const systemPrompt =
"You are a helpful assistant that creates well-structured tasks for a software development project. Generate a single new task based on the user's description, adhering strictly to the provided JSON schema.";
// Task Structure Description (for user prompt)
const taskStructureDesc = `
{
"title": "Task title goes here",
"description": "A concise one or two sentence description of what the task involves",
"details": "In-depth implementation details, considerations, and guidance.",
"testStrategy": "Detailed approach for verifying task completion."
}`;
// User Prompt
const userPrompt = `Create a comprehensive new task (Task #${newTaskId}) for a software development project based on this description: "${prompt}"
${contextTasks}
Return your answer as a single JSON object matching the schema precisely.
Make sure the details and test strategy are thorough and specific.`;
// Start the loading indicator - only for text mode
if (outputFormat === 'text') {
loadingIndicator = startLoadingIndicator(
'Generating new task with Claude AI...'
`Generating new task with ${useResearch ? 'Research' : 'Main'} AI...`
);
}
try {
// Import the AI services - explicitly importing here to avoid circular dependencies
const {
_handleAnthropicStream,
_buildAddTaskPrompt,
parseTaskJsonResponse,
getAvailableAIModel
} = await import('./ai-services.js');
// Determine the service role based on the useResearch flag
const serviceRole = useResearch ? 'research' : 'main';
// Initialize model state variables
let claudeOverloaded = false;
let modelAttempts = 0;
const maxModelAttempts = 2; // Try up to 2 models before giving up
let aiGeneratedTaskData = null;
// Call the unified AI service
const aiGeneratedTaskData = await generateObjectService({
role: serviceRole, // <-- Use the determined role
session: session, // Pass session for API key resolution
schema: AiTaskDataSchema, // Pass the Zod schema
objectName: 'newTaskData', // Name for the object
systemPrompt: systemPrompt,
prompt: userPrompt,
reportProgress // Pass progress reporter if available
});
// Loop through model attempts
while (modelAttempts < maxModelAttempts && !aiGeneratedTaskData) {
modelAttempts++; // Increment attempt counter
const isLastAttempt = modelAttempts >= maxModelAttempts;
let modelType = null; // Track which model we're using
try {
// Get the best available model based on our current state
const result = getAvailableAIModel({
claudeOverloaded,
requiresResearch: false // We're not using the research flag here
});
modelType = result.type;
const client = result.client;
log(
'info',
`Attempt ${modelAttempts}/${maxModelAttempts}: Generating task using ${modelType}`
);
// Update loading indicator text - only for text output
if (outputFormat === 'text') {
if (loadingIndicator) {
stopLoadingIndicator(loadingIndicator); // Stop previous indicator
}
loadingIndicator = startLoadingIndicator(
`Attempt ${modelAttempts}: Using ${modelType.toUpperCase()}...`
);
}
// Build the prompts using the helper
const { systemPrompt, userPrompt } = _buildAddTaskPrompt(
prompt,
contextTasks,
{ newTaskId }
);
if (modelType === 'perplexity') {
// Use Perplexity AI
const response = await client.chat.completions.create({
model: getResearchModelId(session),
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: userPrompt }
],
temperature: getResearchTemperature(session),
max_tokens: getResearchMaxTokens(session)
});
const responseText = response.choices[0].message.content;
aiGeneratedTaskData = parseTaskJsonResponse(responseText);
} else {
// Use Claude (default)
// Prepare API parameters using getters, preserving customEnv override
const apiParams = {
model: customEnv?.ANTHROPIC_MODEL || getMainModelId(session),
max_tokens: customEnv?.MAX_TOKENS || getMainMaxTokens(session),
temperature:
customEnv?.TEMPERATURE || getMainTemperature(session),
system: systemPrompt,
messages: [{ role: 'user', content: userPrompt }]
};
// Call the streaming API using our helper
try {
const fullResponse = await _handleAnthropicStream(
client,
apiParams,
{ reportProgress, mcpLog },
outputFormat === 'text' // CLI mode flag
);
log(
'debug',
`Streaming response length: ${fullResponse.length} characters`
);
// Parse the response using our helper
aiGeneratedTaskData = parseTaskJsonResponse(fullResponse);
} catch (streamError) {
// Process stream errors explicitly
log('error', `Stream error: ${streamError.message}`);
// Check if this is an overload error
let isOverload = false;
// Check 1: SDK specific property
if (streamError.type === 'overloaded_error') {
isOverload = true;
}
// Check 2: Check nested error property
else if (streamError.error?.type === 'overloaded_error') {
isOverload = true;
}
// Check 3: Check status code
else if (
streamError.status === 429 ||
streamError.status === 529
) {
isOverload = true;
}
// Check 4: Check message string
else if (
streamError.message?.toLowerCase().includes('overloaded')
) {
isOverload = true;
}
if (isOverload) {
claudeOverloaded = true;
log(
'warn',
'Claude overloaded. Will attempt fallback model if available.'
);
// Throw to continue to next model attempt
throw new Error('Claude overloaded');
} else {
// Re-throw non-overload errors
throw streamError;
}
}
}
// If we got here without errors and have task data, we're done
if (aiGeneratedTaskData) {
log(
'info',
`Successfully generated task data using ${modelType} on attempt ${modelAttempts}`
);
break;
}
} catch (modelError) {
const failedModel = modelType || 'unknown model';
log(
'warn',
`Attempt ${modelAttempts} failed using ${failedModel}: ${modelError.message}`
);
// Continue to next attempt if we have more attempts and this was specifically an overload error
const wasOverload = modelError.message
?.toLowerCase()
.includes('overload');
if (wasOverload && !isLastAttempt) {
if (modelType === 'claude') {
claudeOverloaded = true;
log('info', 'Will attempt with Perplexity AI next');
}
continue; // Continue to next attempt
} else if (isLastAttempt) {
log(
'error',
`Final attempt (${modelAttempts}/${maxModelAttempts}) failed. No fallback possible.`
);
throw modelError; // Re-throw on last attempt
} else {
throw modelError; // Re-throw for non-overload errors
}
}
}
// If we don't have task data after all attempts, throw an error
if (!aiGeneratedTaskData) {
throw new Error(
'Failed to generate task data after all model attempts'
);
}
// Set the AI-generated task data
taskData = aiGeneratedTaskData;
report('Successfully generated task data from AI.', 'success');
taskData = aiGeneratedTaskData; // Assign the validated object
} catch (error) {
// Handle AI errors
log('error', `Error generating task with AI: ${error.message}`);
// Stop any loading indicator
if (outputFormat === 'text' && loadingIndicator) {
stopLoadingIndicator(loadingIndicator);
}
throw error;
report(`Error generating task with AI: ${error.message}`, 'error');
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
throw error; // Re-throw error after logging
} finally {
if (loadingIndicator) stopLoadingIndicator(loadingIndicator); // Ensure indicator stops
}
// --- End Refactored AI Interaction ---
}
// Create the new task object
@@ -342,8 +230,9 @@ async function addTask(
details: taskData.details || '',
testStrategy: taskData.testStrategy || '',
status: 'pending',
dependencies: dependencies,
priority: priority
dependencies: numericDependencies, // Use validated numeric dependencies
priority: priority,
subtasks: [] // Initialize with empty subtasks array
};
// Add the task to the tasks array
@@ -353,13 +242,9 @@ async function addTask(
writeJSON(tasksPath, data);
// Generate markdown task files
log('info', 'Generating task files...');
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
// Stop the loading indicator if it's still running
if (outputFormat === 'text' && loadingIndicator) {
stopLoadingIndicator(loadingIndicator);
}
report('Generating task files...', 'info');
// Pass mcpLog if available to generateTaskFiles
await generateTaskFiles(tasksPath, path.dirname(tasksPath), { mcpLog });
// Show success message - only for text output (CLI)
if (outputFormat === 'text') {
@@ -369,7 +254,7 @@ async function addTask(
chalk.cyan.bold('Title'),
chalk.cyan.bold('Description')
],
colWidths: [5, 30, 50]
colWidths: [5, 30, 50] // Adjust widths as needed
});
table.push([
@@ -381,7 +266,20 @@ async function addTask(
console.log(chalk.green('✅ New task created successfully:'));
console.log(table.toString());
// Show success message
// Helper to get priority color
const getPriorityColor = (p) => {
switch (p?.toLowerCase()) {
case 'high':
return 'red';
case 'low':
return 'gray';
case 'medium':
default:
return 'yellow';
}
};
// Show success message box
console.log(
boxen(
chalk.white.bold(`Task ${newTaskId} Created Successfully`) +
@@ -394,8 +292,9 @@ async function addTask(
`Priority: ${chalk.keyword(getPriorityColor(newTask.priority))(newTask.priority)}`
) +
'\n' +
(dependencies.length > 0
? chalk.white(`Dependencies: ${dependencies.join(', ')}`) + '\n'
(numericDependencies.length > 0
? chalk.white(`Dependencies: ${numericDependencies.join(', ')}`) +
'\n'
: '') +
'\n' +
chalk.white.bold('Next Steps:') +
@@ -419,15 +318,16 @@ async function addTask(
// Return the new task ID
return newTaskId;
} catch (error) {
// Stop any loading indicator
if (outputFormat === 'text' && loadingIndicator) {
// Stop any loading indicator on error
if (loadingIndicator) {
stopLoadingIndicator(loadingIndicator);
}
log('error', `Error adding task: ${error.message}`);
report(`Error adding task: ${error.message}`, 'error');
if (outputFormat === 'text') {
console.error(chalk.red(`Error: ${error.message}`));
}
// In MCP mode, we let the direct function handler catch and format
throw error;
}
}

View File

@@ -0,0 +1,42 @@
/**
* Check if a task is dependent on another task (directly or indirectly)
* Used to prevent circular dependencies
* @param {Array} allTasks - Array of all tasks
* @param {Object} task - The task to check
* @param {number} targetTaskId - The task ID to check dependency against
* @returns {boolean} Whether the task depends on the target task
*/
function isTaskDependentOn(allTasks, task, targetTaskId) {
// If the task is a subtask, check if its parent is the target
if (task.parentTaskId === targetTaskId) {
return true;
}
// Check direct dependencies
if (task.dependencies && task.dependencies.includes(targetTaskId)) {
return true;
}
// Check dependencies of dependencies (recursive)
if (task.dependencies) {
for (const depId of task.dependencies) {
const depTask = allTasks.find((t) => t.id === depId);
if (depTask && isTaskDependentOn(allTasks, depTask, targetTaskId)) {
return true;
}
}
}
// Check subtasks for dependencies
if (task.subtasks) {
for (const subtask of task.subtasks) {
if (isTaskDependentOn(allTasks, subtask, targetTaskId)) {
return true;
}
}
}
return false;
}
export default isTaskDependentOn;

View File

@@ -712,7 +712,7 @@ When implementing the refactored research processing logic, ensure the following
- How to verify configuration is correctly loaded
</info added on 2025-04-20T03:55:20.433Z>
## 11. Refactor PRD Parsing to use generateObjectService [in-progress]
## 11. Refactor PRD Parsing to use generateObjectService [done]
### Dependencies: 61.23
### Description: Update PRD processing logic (callClaude, processClaudeResponse, handleStreamingRequest in ai-services.js) to use the new `generateObjectService` from `ai-services-unified.js` with an appropriate Zod schema.
### Details:
@@ -747,7 +747,7 @@ const result = await generateObjectService({
5. Ensure any default values previously hardcoded are now retrieved from the configuration system.
</info added on 2025-04-20T03:55:01.707Z>
## 12. Refactor Basic Subtask Generation to use generateObjectService [pending]
## 12. Refactor Basic Subtask Generation to use generateObjectService [cancelled]
### Dependencies: 61.23
### Description: Update the `generateSubtasks` function in `ai-services.js` to use the new `generateObjectService` from `ai-services-unified.js` with a Zod schema for the subtask array.
### Details:
@@ -798,7 +798,7 @@ The refactoring should leverage the new configuration system:
```
</info added on 2025-04-20T03:54:45.542Z>
## 13. Refactor Research Subtask Generation to use generateObjectService [pending]
## 13. Refactor Research Subtask Generation to use generateObjectService [cancelled]
### Dependencies: 61.23
### Description: Update the `generateSubtasksWithPerplexity` function in `ai-services.js` to first perform research (potentially keeping the Perplexity call separate or adapting it) and then use `generateObjectService` from `ai-services-unified.js` with research results included in the prompt.
### Details:
@@ -828,7 +828,7 @@ const { verbose } = getLoggingConfig();
5. Ensure the transition to generateObjectService maintains all existing functionality while leveraging the new configuration system
</info added on 2025-04-20T03:54:26.882Z>
## 14. Refactor Research Task Description Generation to use generateObjectService [pending]
## 14. Refactor Research Task Description Generation to use generateObjectService [cancelled]
### Dependencies: 61.23
### Description: Update the `generateTaskDescriptionWithPerplexity` function in `ai-services.js` to first perform research and then use `generateObjectService` from `ai-services-unified.js` to generate the structured task description.
### Details:
@@ -869,7 +869,7 @@ return generateObjectService({
5. Remove any hardcoded configuration values, ensuring all settings are retrieved from the centralized configuration system.
</info added on 2025-04-20T03:54:04.420Z>
## 15. Refactor Complexity Analysis AI Call to use generateObjectService [pending]
## 15. Refactor Complexity Analysis AI Call to use generateObjectService [cancelled]
### Dependencies: 61.23
### Description: Update the logic that calls the AI after using `generateComplexityAnalysisPrompt` in `ai-services.js` to use the new `generateObjectService` from `ai-services-unified.js` with a Zod schema for the complexity report.
### Details:
@@ -916,7 +916,7 @@ The complexity analysis AI call should be updated to align with the new configur
```
</info added on 2025-04-20T03:53:46.120Z>
## 16. Refactor Task Addition AI Call to use generateObjectService [pending]
## 16. Refactor Task Addition AI Call to use generateObjectService [cancelled]
### Dependencies: 61.23
### Description: Update the logic that calls the AI after using `_buildAddTaskPrompt` in `ai-services.js` to use the new `generateObjectService` from `ai-services-unified.js` with a Zod schema for the single task object.
### Details:
@@ -1276,7 +1276,7 @@ When testing the non-streaming `generateTextService` call in `updateSubtaskById`
</info added on 2025-04-22T06:35:14.892Z>
</info added on 2025-04-22T06:23:23.247Z>
## 20. Implement `anthropic.js` Provider Module using Vercel AI SDK [in-progress]
## 20. Implement `anthropic.js` Provider Module using Vercel AI SDK [done]
### Dependencies: None
### Description: Create and implement the `anthropic.js` module within `src/ai-providers/`. This module should contain functions to interact with the Anthropic API (streaming and non-streaming) using the **Vercel AI SDK**, adhering to the standardized input/output format defined for `ai-services-unified.js`.
### Details:
@@ -1813,9 +1813,45 @@ This separation ensures security best practices for credentials while centralizi
This piecemeal approach aims to establish the refactoring pattern before tackling the entire codebase.
</info added on 2025-04-20T06:58:36.731Z>
## 35. Review/Refactor MCP Direct Functions for Explicit Config Root Passing [done]
## 35. Refactor add-task.js for Unified AI Service & Config [done]
### Dependencies: None
### Description: Review all functions in mcp-server/src/core/direct-functions/*.js. Ensure that any calls made from these functions to getters in scripts/modules/config-manager.js (e.g., getMainProvider, getDefaultPriority, getLogLevel, etc.) explicitly pass the projectRoot (obtained from the args object, which is derived from the session context) as the first argument to the getter. This prevents the getters from incorrectly falling back to using findProjectRoot() based on the server's cwd when running in an MCP context. This is crucial for loading the correct .taskmasterconfig settings based on the user's project.
### Description: Replace direct AI calls (old `ai-services.js` helpers) with `generateObjectService` or `generateTextService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters; use unified service instead. Keep `getDefaultPriority` usage.
### Details:
## 36. Refactor analyze-task-complexity.js for Unified AI Service & Config [pending]
### Dependencies: None
### Description: Replace direct AI calls with `generateObjectService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters; use unified service instead. Keep config getters needed for report metadata (`getProjectName`, `getDefaultSubtasks`).
### Details:
## 37. Refactor expand-task.js for Unified AI Service & Config [pending]
### Dependencies: None
### Description: Replace direct AI calls (old `ai-services.js` helpers like `generateSubtasksWithPerplexity`) with `generateObjectService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters; use unified service instead. Keep `getDefaultSubtasks` usage.
### Details:
## 38. Refactor expand-all-tasks.js for Unified AI Helpers & Config [pending]
### Dependencies: None
### Description: Ensure this file correctly calls the refactored `getSubtasksFromAI` helper. Update config usage to only use `getDefaultSubtasks` from `config-manager.js` directly. AI interaction itself is handled by the helper.
### Details:
## 39. Refactor get-subtasks-from-ai.js for Unified AI Service & Config [pending]
### Dependencies: None
### Description: Replace direct AI calls (old `ai-services.js` helpers) with `generateObjectService` or `generateTextService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters; use unified service instead.
### Details:
## 40. Refactor update-task-by-id.js for Unified AI Service & Config [pending]
### Dependencies: None
### Description: Replace direct AI calls (old `ai-services.js` helpers) with `generateObjectService` or `generateTextService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters and fallback logic; use unified service instead. Keep `getDebugFlag`.
### Details:
## 41. Refactor update-tasks.js for Unified AI Service & Config [pending]
### Dependencies: None
### Description: Replace direct AI calls (old `ai-services.js` helpers) with `generateObjectService` or `generateTextService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters and fallback logic; use unified service instead. Keep `getDebugFlag`.
### Details:

View File

@@ -2874,7 +2874,7 @@
"title": "Refactor PRD Parsing to use generateObjectService",
"description": "Update PRD processing logic (callClaude, processClaudeResponse, handleStreamingRequest in ai-services.js) to use the new `generateObjectService` from `ai-services-unified.js` with an appropriate Zod schema.",
"details": "\n\n<info added on 2025-04-20T03:55:01.707Z>\nThe PRD parsing refactoring should align with the new configuration system architecture. When implementing this change:\n\n1. Replace direct environment variable access with `resolveEnvVariable` calls for API keys.\n\n2. Remove any hardcoded model names or parameters in the PRD processing functions. Instead, use the config-manager.js getters:\n - `getModelForRole('prd')` to determine the appropriate model\n - `getModelParameters('prd')` to retrieve temperature, maxTokens, etc.\n\n3. When constructing the generateObjectService call, ensure parameters are sourced from config:\n```javascript\nconst modelConfig = getModelParameters('prd');\nconst model = getModelForRole('prd');\n\nconst result = await generateObjectService({\n model,\n temperature: modelConfig.temperature,\n maxTokens: modelConfig.maxTokens,\n // other parameters as needed\n schema: prdSchema,\n // existing prompt/context parameters\n});\n```\n\n4. Update any logging to respect the logging configuration from config-manager (e.g., `isLoggingEnabled('ai')`)\n\n5. Ensure any default values previously hardcoded are now retrieved from the configuration system.\n</info added on 2025-04-20T03:55:01.707Z>",
"status": "in-progress",
"status": "done",
"dependencies": [
"61.23"
],
@@ -2885,7 +2885,7 @@
"title": "Refactor Basic Subtask Generation to use generateObjectService",
"description": "Update the `generateSubtasks` function in `ai-services.js` to use the new `generateObjectService` from `ai-services-unified.js` with a Zod schema for the subtask array.",
"details": "\n\n<info added on 2025-04-20T03:54:45.542Z>\nThe refactoring should leverage the new configuration system:\n\n1. Replace direct model references with calls to config-manager.js getters:\n ```javascript\n const { getModelForRole, getModelParams } = require('./config-manager');\n \n // Instead of hardcoded models/parameters:\n const model = getModelForRole('subtask-generator');\n const modelParams = getModelParams('subtask-generator');\n ```\n\n2. Update API key handling to use the resolveEnvVariable pattern:\n ```javascript\n const { resolveEnvVariable } = require('./utils');\n const apiKey = resolveEnvVariable('OPENAI_API_KEY');\n ```\n\n3. When calling generateObjectService, pass the configuration parameters:\n ```javascript\n const result = await generateObjectService({\n schema: subtasksArraySchema,\n prompt: subtaskPrompt,\n model: model,\n temperature: modelParams.temperature,\n maxTokens: modelParams.maxTokens,\n // Other parameters from config\n });\n ```\n\n4. Add error handling that respects logging configuration:\n ```javascript\n const { isLoggingEnabled } = require('./config-manager');\n \n try {\n // Generation code\n } catch (error) {\n if (isLoggingEnabled('errors')) {\n console.error('Subtask generation error:', error);\n }\n throw error;\n }\n ```\n</info added on 2025-04-20T03:54:45.542Z>",
"status": "pending",
"status": "cancelled",
"dependencies": [
"61.23"
],
@@ -2896,7 +2896,7 @@
"title": "Refactor Research Subtask Generation to use generateObjectService",
"description": "Update the `generateSubtasksWithPerplexity` function in `ai-services.js` to first perform research (potentially keeping the Perplexity call separate or adapting it) and then use `generateObjectService` from `ai-services-unified.js` with research results included in the prompt.",
"details": "\n\n<info added on 2025-04-20T03:54:26.882Z>\nThe refactoring should align with the new configuration system by:\n\n1. Replace direct environment variable access with `resolveEnvVariable` for API keys\n2. Use the config-manager.js getters to retrieve model parameters:\n - Replace hardcoded model names with `getModelForRole('research')`\n - Use `getParametersForRole('research')` to get temperature, maxTokens, etc.\n3. Implement proper error handling that respects the `getLoggingConfig()` settings\n4. Example implementation pattern:\n```javascript\nconst { getModelForRole, getParametersForRole, getLoggingConfig } = require('./config-manager');\nconst { resolveEnvVariable } = require('./environment-utils');\n\n// In the refactored function:\nconst researchModel = getModelForRole('research');\nconst { temperature, maxTokens } = getParametersForRole('research');\nconst apiKey = resolveEnvVariable('PERPLEXITY_API_KEY');\nconst { verbose } = getLoggingConfig();\n\n// Then use these variables in the API call configuration\n```\n5. Ensure the transition to generateObjectService maintains all existing functionality while leveraging the new configuration system\n</info added on 2025-04-20T03:54:26.882Z>",
"status": "pending",
"status": "cancelled",
"dependencies": [
"61.23"
],
@@ -2907,7 +2907,7 @@
"title": "Refactor Research Task Description Generation to use generateObjectService",
"description": "Update the `generateTaskDescriptionWithPerplexity` function in `ai-services.js` to first perform research and then use `generateObjectService` from `ai-services-unified.js` to generate the structured task description.",
"details": "\n\n<info added on 2025-04-20T03:54:04.420Z>\nThe refactoring should incorporate the new configuration management system:\n\n1. Update imports to include the config-manager:\n```javascript\nconst { getModelForRole, getParametersForRole } = require('./config-manager');\n```\n\n2. Replace any hardcoded model selections or parameters with config-manager calls:\n```javascript\n// Replace direct model references like:\n// const model = \"perplexity-model-7b-online\" \n// With:\nconst model = getModelForRole('research');\nconst parameters = getParametersForRole('research');\n```\n\n3. For API key handling, use the resolveEnvVariable pattern:\n```javascript\nconst apiKey = resolveEnvVariable('PERPLEXITY_API_KEY');\n```\n\n4. When calling generateObjectService, pass the configuration-derived parameters:\n```javascript\nreturn generateObjectService({\n prompt: researchResults,\n schema: taskDescriptionSchema,\n role: 'taskDescription',\n // Config-driven parameters will be applied within generateObjectService\n});\n```\n\n5. Remove any hardcoded configuration values, ensuring all settings are retrieved from the centralized configuration system.\n</info added on 2025-04-20T03:54:04.420Z>",
"status": "pending",
"status": "cancelled",
"dependencies": [
"61.23"
],
@@ -2918,7 +2918,7 @@
"title": "Refactor Complexity Analysis AI Call to use generateObjectService",
"description": "Update the logic that calls the AI after using `generateComplexityAnalysisPrompt` in `ai-services.js` to use the new `generateObjectService` from `ai-services-unified.js` with a Zod schema for the complexity report.",
"details": "\n\n<info added on 2025-04-20T03:53:46.120Z>\nThe complexity analysis AI call should be updated to align with the new configuration system architecture. When refactoring to use `generateObjectService`, implement the following changes:\n\n1. Replace direct model references with calls to the appropriate config getter:\n ```javascript\n const modelName = getComplexityAnalysisModel(); // Use the specific getter from config-manager.js\n ```\n\n2. Retrieve AI parameters from the config system:\n ```javascript\n const temperature = getAITemperature('complexityAnalysis');\n const maxTokens = getAIMaxTokens('complexityAnalysis');\n ```\n\n3. When constructing the call to `generateObjectService`, pass these configuration values:\n ```javascript\n const result = await generateObjectService({\n prompt,\n schema: complexityReportSchema,\n modelName,\n temperature,\n maxTokens,\n sessionEnv: session?.env\n });\n ```\n\n4. Ensure API key resolution uses the `resolveEnvVariable` helper:\n ```javascript\n // Don't hardcode API keys or directly access process.env\n // The generateObjectService should handle this internally with resolveEnvVariable\n ```\n\n5. Add logging configuration based on settings:\n ```javascript\n const enableLogging = getAILoggingEnabled('complexityAnalysis');\n if (enableLogging) {\n // Use the logging mechanism defined in the configuration\n }\n ```\n</info added on 2025-04-20T03:53:46.120Z>",
"status": "pending",
"status": "cancelled",
"dependencies": [
"61.23"
],
@@ -2929,7 +2929,7 @@
"title": "Refactor Task Addition AI Call to use generateObjectService",
"description": "Update the logic that calls the AI after using `_buildAddTaskPrompt` in `ai-services.js` to use the new `generateObjectService` from `ai-services-unified.js` with a Zod schema for the single task object.",
"details": "\n\n<info added on 2025-04-20T03:53:27.455Z>\nTo implement this refactoring, you'll need to:\n\n1. Replace direct AI calls with the new `generateObjectService` approach:\n ```javascript\n // OLD approach\n const aiResponse = await callLLM(prompt, modelName, temperature, maxTokens);\n const task = parseAIResponseToTask(aiResponse);\n \n // NEW approach using generateObjectService with config-manager\n import { generateObjectService } from '../services/ai-services-unified.js';\n import { getAIModelForRole, getAITemperature, getAIMaxTokens } from '../config/config-manager.js';\n import { taskSchema } from '../schemas/task-schema.js'; // Create this Zod schema for a single task\n \n const modelName = getAIModelForRole('taskCreation');\n const temperature = getAITemperature('taskCreation');\n const maxTokens = getAIMaxTokens('taskCreation');\n \n const task = await generateObjectService({\n prompt: _buildAddTaskPrompt(...),\n schema: taskSchema,\n modelName,\n temperature,\n maxTokens\n });\n ```\n\n2. Create a Zod schema for the task object in a new file `schemas/task-schema.js` that defines the expected structure.\n\n3. Ensure API key resolution uses the new pattern:\n ```javascript\n // This happens inside generateObjectService, but verify it uses:\n import { resolveEnvVariable } from '../config/config-manager.js';\n // Instead of direct process.env access\n ```\n\n4. Update any error handling to match the new service's error patterns.\n</info added on 2025-04-20T03:53:27.455Z>",
"status": "pending",
"status": "cancelled",
"dependencies": [
"61.23"
],
@@ -2973,7 +2973,7 @@
"title": "Implement `anthropic.js` Provider Module using Vercel AI SDK",
"description": "Create and implement the `anthropic.js` module within `src/ai-providers/`. This module should contain functions to interact with the Anthropic API (streaming and non-streaming) using the **Vercel AI SDK**, adhering to the standardized input/output format defined for `ai-services-unified.js`.",
"details": "\n\n<info added on 2025-04-24T02:54:40.326Z>\n- Use the `@ai-sdk/anthropic` package to implement the provider module. You can import the default provider instance with `import { anthropic } from '@ai-sdk/anthropic'`, or create a custom instance using `createAnthropic` if you need to specify custom headers, API key, or base URL (such as for beta features or proxying)[1][4].\n\n- To address persistent 'Not Found' errors, ensure the model name matches the latest Anthropic model IDs (e.g., `claude-3-haiku-20240307`, `claude-3-5-sonnet-20241022`). Model naming is case-sensitive and must match Anthropic's published versions[4][5].\n\n- If you require custom headers (such as for beta features), use the `createAnthropic` function and pass a `headers` object. For example:\n ```js\n import { createAnthropic } from '@ai-sdk/anthropic';\n const anthropic = createAnthropic({\n apiKey: process.env.ANTHROPIC_API_KEY,\n headers: { 'anthropic-beta': 'tools-2024-04-04' }\n });\n ```\n\n- For streaming and non-streaming support, the Vercel AI SDK provides both `generateText` (non-streaming) and `streamText` (streaming) functions. Use these with the Anthropic provider instance as the `model` parameter[5].\n\n- Example usage for non-streaming:\n ```js\n import { generateText } from 'ai';\n import { anthropic } from '@ai-sdk/anthropic';\n\n const result = await generateText({\n model: anthropic('claude-3-haiku-20240307'),\n messages: [{ role: 'user', content: [{ type: 'text', text: 'Hello!' }] }]\n });\n ```\n\n- Example usage for streaming:\n ```js\n import { streamText } from 'ai';\n import { anthropic } from '@ai-sdk/anthropic';\n\n const stream = await streamText({\n model: anthropic('claude-3-haiku-20240307'),\n messages: [{ role: 'user', content: [{ type: 'text', text: 'Hello!' }] }]\n });\n ```\n\n- Ensure that your implementation adheres to the standardized input/output format defined for `ai-services-unified.js`, mapping the SDK's response structure to your unified format.\n\n- If you continue to encounter 'Not Found' errors, verify:\n - The API key is valid and has access to the requested models.\n - The model name is correct and available to your Anthropic account.\n - Any required beta headers are included if using beta features or models[1].\n\n- Prefer direct provider instantiation with explicit headers and API key configuration for maximum compatibility and to avoid SDK-level abstraction issues[1].\n</info added on 2025-04-24T02:54:40.326Z>",
"status": "in-progress",
"status": "done",
"dependencies": [],
"parentTaskId": 61
},
@@ -3114,12 +3114,66 @@
},
{
"id": 35,
"title": "Review/Refactor MCP Direct Functions for Explicit Config Root Passing",
"description": "Review all functions in mcp-server/src/core/direct-functions/*.js. Ensure that any calls made from these functions to getters in scripts/modules/config-manager.js (e.g., getMainProvider, getDefaultPriority, getLogLevel, etc.) explicitly pass the projectRoot (obtained from the args object, which is derived from the session context) as the first argument to the getter. This prevents the getters from incorrectly falling back to using findProjectRoot() based on the server's cwd when running in an MCP context. This is crucial for loading the correct .taskmasterconfig settings based on the user's project.",
"title": "Refactor add-task.js for Unified AI Service & Config",
"description": "Replace direct AI calls (old `ai-services.js` helpers) with `generateObjectService` or `generateTextService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters; use unified service instead. Keep `getDefaultPriority` usage.",
"details": "",
"status": "done",
"dependencies": [],
"parentTaskId": 61
},
{
"id": 36,
"title": "Refactor analyze-task-complexity.js for Unified AI Service & Config",
"description": "Replace direct AI calls with `generateObjectService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters; use unified service instead. Keep config getters needed for report metadata (`getProjectName`, `getDefaultSubtasks`).",
"details": "",
"status": "pending",
"dependencies": [],
"parentTaskId": 61
},
{
"id": 37,
"title": "Refactor expand-task.js for Unified AI Service & Config",
"description": "Replace direct AI calls (old `ai-services.js` helpers like `generateSubtasksWithPerplexity`) with `generateObjectService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters; use unified service instead. Keep `getDefaultSubtasks` usage.",
"details": "",
"status": "pending",
"dependencies": [],
"parentTaskId": 61
},
{
"id": 38,
"title": "Refactor expand-all-tasks.js for Unified AI Helpers & Config",
"description": "Ensure this file correctly calls the refactored `getSubtasksFromAI` helper. Update config usage to only use `getDefaultSubtasks` from `config-manager.js` directly. AI interaction itself is handled by the helper.",
"details": "",
"status": "pending",
"dependencies": [],
"parentTaskId": 61
},
{
"id": 39,
"title": "Refactor get-subtasks-from-ai.js for Unified AI Service & Config",
"description": "Replace direct AI calls (old `ai-services.js` helpers) with `generateObjectService` or `generateTextService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters; use unified service instead.",
"details": "",
"status": "pending",
"dependencies": [],
"parentTaskId": 61
},
{
"id": 40,
"title": "Refactor update-task-by-id.js for Unified AI Service & Config",
"description": "Replace direct AI calls (old `ai-services.js` helpers) with `generateObjectService` or `generateTextService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters and fallback logic; use unified service instead. Keep `getDebugFlag`.",
"details": "",
"status": "pending",
"dependencies": [],
"parentTaskId": 61
},
{
"id": 41,
"title": "Refactor update-tasks.js for Unified AI Service & Config",
"description": "Replace direct AI calls (old `ai-services.js` helpers) with `generateObjectService` or `generateTextService` from `ai-services-unified.js`. Pass `role` and `session`. Remove direct config getter usage (from `config-manager.js`) for AI parameters and fallback logic; use unified service instead. Keep `getDebugFlag`.",
"details": "",
"status": "pending",
"dependencies": [],
"parentTaskId": 61
}
]
}