fix merge conflicts to prep for merge with branch next
- Enhance E2E testing and LLM analysis report and: - Add --analyze-log flag to run_e2e.sh to re-run LLM analysis on existing logs. - Add test:e2e and analyze-log scripts to package.json for easier execution. - Correct display errors and dependency validation output: - Update chalk usage in add-task.js to use bracket notation (chalk[color]) compatible with v5, resolving 'chalk.keyword is not a function' error. - Modify fix-dependencies command output to show red failure box with issue count instead of green success box when validation fails. - Refactor interactive model setup: - Verify inclusion of 'No change' option during interactive model setup flow (task-master models --setup). - Update model definitions: - Add max_tokens field for gpt-4o in supported-models.json. - Remove unused scripts: - Delete prepare-package.js and rule-transformer.test.js. Release candidate
This commit is contained in:
@@ -215,6 +215,7 @@ async function addTask(
|
||||
// Determine the service role based on the useResearch flag
|
||||
const serviceRole = useResearch ? 'research' : 'main';
|
||||
|
||||
report('DEBUG: Calling generateObjectService...', 'debug');
|
||||
// Call the unified AI service
|
||||
const aiGeneratedTaskData = await generateObjectService({
|
||||
role: serviceRole, // <-- Use the determined role
|
||||
@@ -225,14 +226,20 @@ async function addTask(
|
||||
prompt: userPrompt,
|
||||
reportProgress // Pass progress reporter if available
|
||||
});
|
||||
report('DEBUG: generateObjectService returned successfully.', 'debug');
|
||||
|
||||
report('Successfully generated task data from AI.', 'success');
|
||||
taskData = aiGeneratedTaskData; // Assign the validated object
|
||||
} catch (error) {
|
||||
report(
|
||||
`DEBUG: generateObjectService caught error: ${error.message}`,
|
||||
'debug'
|
||||
);
|
||||
report(`Error generating task with AI: ${error.message}`, 'error');
|
||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
||||
throw error; // Re-throw error after logging
|
||||
} finally {
|
||||
report('DEBUG: generateObjectService finally block reached.', 'debug');
|
||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator); // Ensure indicator stops
|
||||
}
|
||||
// --- End Refactored AI Interaction ---
|
||||
@@ -254,13 +261,17 @@ async function addTask(
|
||||
// Add the task to the tasks array
|
||||
data.tasks.push(newTask);
|
||||
|
||||
report('DEBUG: Writing tasks.json...', 'debug');
|
||||
// Write the updated tasks to the file
|
||||
writeJSON(tasksPath, data);
|
||||
report('DEBUG: tasks.json written.', 'debug');
|
||||
|
||||
// Generate markdown task files
|
||||
report('Generating task files...', 'info');
|
||||
report('DEBUG: Calling generateTaskFiles...', 'debug');
|
||||
// Pass mcpLog if available to generateTaskFiles
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath), { mcpLog });
|
||||
report('DEBUG: generateTaskFiles finished.', 'debug');
|
||||
|
||||
// Show success message - only for text output (CLI)
|
||||
if (outputFormat === 'text') {
|
||||
@@ -305,7 +316,7 @@ async function addTask(
|
||||
chalk.white(`Status: ${getStatusWithColor(newTask.status)}`) +
|
||||
'\n' +
|
||||
chalk.white(
|
||||
`Priority: ${chalk.keyword(getPriorityColor(newTask.priority))(newTask.priority)}`
|
||||
`Priority: ${chalk[getPriorityColor(newTask.priority)](newTask.priority)}`
|
||||
) +
|
||||
'\n' +
|
||||
(numericDependencies.length > 0
|
||||
@@ -332,6 +343,7 @@ async function addTask(
|
||||
}
|
||||
|
||||
// Return the new task ID
|
||||
report(`DEBUG: Returning new task ID: ${newTaskId}`, 'debug');
|
||||
return newTaskId;
|
||||
} catch (error) {
|
||||
// Stop any loading indicator on error
|
||||
|
||||
Reference in New Issue
Block a user