feat: Migrate Task Master to generateObject for structured AI responses (#1262)
Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> Co-authored-by: Ben Vargas <ben@example.com>
This commit is contained in:
@@ -2,7 +2,6 @@ import path from 'path';
|
||||
import chalk from 'chalk';
|
||||
import boxen from 'boxen';
|
||||
import Table from 'cli-table3';
|
||||
import { z } from 'zod';
|
||||
import Fuse from 'fuse.js'; // Import Fuse.js for advanced fuzzy search
|
||||
|
||||
import {
|
||||
@@ -29,6 +28,7 @@ import { getDefaultPriority, hasCodebaseAnalysis } from '../config-manager.js';
|
||||
import { getPromptManager } from '../prompt-manager.js';
|
||||
import ContextGatherer from '../utils/contextGatherer.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
import { COMMAND_SCHEMAS } from '../../../src/schemas/registry.js';
|
||||
import {
|
||||
TASK_PRIORITY_OPTIONS,
|
||||
DEFAULT_TASK_PRIORITY,
|
||||
@@ -36,26 +36,6 @@ import {
|
||||
normalizeTaskPriority
|
||||
} from '../../../src/constants/task-priority.js';
|
||||
|
||||
// Define Zod schema for the expected AI output object
|
||||
const AiTaskDataSchema = z.object({
|
||||
title: z.string().describe('Clear, concise title for the task'),
|
||||
description: z
|
||||
.string()
|
||||
.describe('A one or two sentence description of the task'),
|
||||
details: z
|
||||
.string()
|
||||
.describe('In-depth implementation details, considerations, and guidance'),
|
||||
testStrategy: z
|
||||
.string()
|
||||
.describe('Detailed approach for verifying task completion'),
|
||||
dependencies: z
|
||||
.array(z.number())
|
||||
.nullable()
|
||||
.describe(
|
||||
'Array of task IDs that this task depends on (must be completed before this task can start)'
|
||||
)
|
||||
});
|
||||
|
||||
/**
|
||||
* Get all tasks from all tags
|
||||
* @param {Object} rawData - The raw tagged data object
|
||||
@@ -451,7 +431,7 @@ async function addTask(
|
||||
role: serviceRole,
|
||||
session: session,
|
||||
projectRoot: projectRoot,
|
||||
schema: AiTaskDataSchema,
|
||||
schema: COMMAND_SCHEMAS['add-task'],
|
||||
objectName: 'newTaskData',
|
||||
systemPrompt: systemPrompt,
|
||||
prompt: userPrompt,
|
||||
|
||||
@@ -11,7 +11,8 @@ import {
|
||||
displayAiUsageSummary
|
||||
} from '../ui.js';
|
||||
|
||||
import { generateTextService } from '../ai-services-unified.js';
|
||||
import { generateObjectService } from '../ai-services-unified.js';
|
||||
import { COMMAND_SCHEMAS } from '../../../src/schemas/registry.js';
|
||||
|
||||
import {
|
||||
getDebugFlag,
|
||||
@@ -29,46 +30,6 @@ import { ContextGatherer } from '../utils/contextGatherer.js';
|
||||
import { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';
|
||||
import { flattenTasksWithSubtasks } from '../utils.js';
|
||||
|
||||
/**
|
||||
* Generates the prompt for complexity analysis.
|
||||
* (Moved from ai-services.js and simplified)
|
||||
* @param {Object} tasksData - The tasks data object.
|
||||
* @param {string} [gatheredContext] - The gathered context for the analysis.
|
||||
* @returns {string} The generated prompt.
|
||||
*/
|
||||
function generateInternalComplexityAnalysisPrompt(
|
||||
tasksData,
|
||||
gatheredContext = ''
|
||||
) {
|
||||
const tasksString = JSON.stringify(tasksData.tasks, null, 2);
|
||||
let prompt = `Analyze the following tasks to determine their complexity (1-10 scale) and recommend the number of subtasks for expansion. Provide a brief reasoning and an initial expansion prompt for each.
|
||||
|
||||
Tasks:
|
||||
${tasksString}`;
|
||||
|
||||
if (gatheredContext) {
|
||||
prompt += `\n\n# Project Context\n\n${gatheredContext}`;
|
||||
}
|
||||
|
||||
prompt += `
|
||||
|
||||
Respond ONLY with a valid JSON array matching the schema:
|
||||
[
|
||||
{
|
||||
"taskId": <number>,
|
||||
"taskTitle": "<string>",
|
||||
"complexityScore": <number 1-10>,
|
||||
"recommendedSubtasks": <number>,
|
||||
"expansionPrompt": "<string>",
|
||||
"reasoning": "<string>"
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
Do not include any explanatory text, markdown formatting, or code block markers before or after the JSON array.`;
|
||||
return prompt;
|
||||
}
|
||||
|
||||
/**
|
||||
* Analyzes task complexity and generates expansion recommendations
|
||||
* @param {Object} options Command options
|
||||
@@ -446,12 +407,14 @@ async function analyzeTaskComplexity(options, context = {}) {
|
||||
try {
|
||||
const role = useResearch ? 'research' : 'main';
|
||||
|
||||
aiServiceResponse = await generateTextService({
|
||||
aiServiceResponse = await generateObjectService({
|
||||
prompt,
|
||||
systemPrompt,
|
||||
role,
|
||||
session,
|
||||
projectRoot,
|
||||
schema: COMMAND_SCHEMAS['analyze-complexity'],
|
||||
objectName: 'complexityAnalysis',
|
||||
commandName: 'analyze-complexity',
|
||||
outputType: mcpLog ? 'mcp' : 'cli'
|
||||
});
|
||||
@@ -463,63 +426,15 @@ async function analyzeTaskComplexity(options, context = {}) {
|
||||
if (outputFormat === 'text') {
|
||||
readline.clearLine(process.stdout, 0);
|
||||
readline.cursorTo(process.stdout, 0);
|
||||
console.log(
|
||||
chalk.green('AI service call complete. Parsing response...')
|
||||
);
|
||||
console.log(chalk.green('AI service call complete.'));
|
||||
}
|
||||
|
||||
reportLog('Parsing complexity analysis from text response...', 'info');
|
||||
try {
|
||||
let cleanedResponse = aiServiceResponse.mainResult;
|
||||
cleanedResponse = cleanedResponse.trim();
|
||||
|
||||
const codeBlockMatch = cleanedResponse.match(
|
||||
/```(?:json)?\s*([\s\S]*?)\s*```/
|
||||
);
|
||||
if (codeBlockMatch) {
|
||||
cleanedResponse = codeBlockMatch[1].trim();
|
||||
} else {
|
||||
const firstBracket = cleanedResponse.indexOf('[');
|
||||
const lastBracket = cleanedResponse.lastIndexOf(']');
|
||||
if (firstBracket !== -1 && lastBracket > firstBracket) {
|
||||
cleanedResponse = cleanedResponse.substring(
|
||||
firstBracket,
|
||||
lastBracket + 1
|
||||
);
|
||||
} else {
|
||||
reportLog(
|
||||
'Warning: Response does not appear to be a JSON array.',
|
||||
'warn'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (outputFormat === 'text' && getDebugFlag(session)) {
|
||||
console.log(chalk.gray('Attempting to parse cleaned JSON...'));
|
||||
console.log(chalk.gray('Cleaned response (first 100 chars):'));
|
||||
console.log(chalk.gray(cleanedResponse.substring(0, 100)));
|
||||
console.log(chalk.gray('Last 100 chars:'));
|
||||
console.log(
|
||||
chalk.gray(cleanedResponse.substring(cleanedResponse.length - 100))
|
||||
);
|
||||
}
|
||||
|
||||
complexityAnalysis = JSON.parse(cleanedResponse);
|
||||
} catch (parseError) {
|
||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
||||
reportLog(
|
||||
`Error parsing complexity analysis JSON: ${parseError.message}`,
|
||||
'error'
|
||||
);
|
||||
if (outputFormat === 'text') {
|
||||
console.error(
|
||||
chalk.red(
|
||||
`Error parsing complexity analysis JSON: ${parseError.message}`
|
||||
)
|
||||
);
|
||||
}
|
||||
throw parseError;
|
||||
}
|
||||
// With generateObject, we get structured data directly
|
||||
complexityAnalysis = aiServiceResponse.mainResult.complexityAnalysis;
|
||||
reportLog(
|
||||
`Received ${complexityAnalysis.length} complexity analyses from AI.`,
|
||||
'info'
|
||||
);
|
||||
|
||||
const taskIds = tasksData.tasks.map((t) => t.id);
|
||||
const analysisTaskIds = complexityAnalysis.map((a) => a.taskId);
|
||||
|
||||
@@ -1,22 +1,22 @@
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import { z } from 'zod';
|
||||
|
||||
import {
|
||||
getTagAwareFilePath,
|
||||
isSilentMode,
|
||||
log,
|
||||
readJSON,
|
||||
writeJSON,
|
||||
isSilentMode,
|
||||
getTagAwareFilePath
|
||||
writeJSON
|
||||
} from '../utils.js';
|
||||
|
||||
import {
|
||||
displayAiUsageSummary,
|
||||
startLoadingIndicator,
|
||||
stopLoadingIndicator,
|
||||
displayAiUsageSummary
|
||||
stopLoadingIndicator
|
||||
} from '../ui.js';
|
||||
|
||||
import { generateTextService } from '../ai-services-unified.js';
|
||||
import { COMMAND_SCHEMAS } from '../../../src/schemas/registry.js';
|
||||
import { generateObjectService } from '../ai-services-unified.js';
|
||||
|
||||
import {
|
||||
getDefaultSubtasks,
|
||||
@@ -24,259 +24,12 @@ import {
|
||||
hasCodebaseAnalysis
|
||||
} from '../config-manager.js';
|
||||
import { getPromptManager } from '../prompt-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
import { COMPLEXITY_REPORT_FILE } from '../../../src/constants/paths.js';
|
||||
import { findProjectRoot, flattenTasksWithSubtasks } from '../utils.js';
|
||||
import { ContextGatherer } from '../utils/contextGatherer.js';
|
||||
import { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';
|
||||
import { flattenTasksWithSubtasks, findProjectRoot } from '../utils.js';
|
||||
|
||||
// --- Zod Schemas (Keep from previous step) ---
|
||||
const subtaskSchema = z.strictObject({
|
||||
id: z.int().positive().describe('Sequential subtask ID starting from 1'),
|
||||
title: z.string().min(5).describe('Clear, specific title for the subtask'),
|
||||
description: z
|
||||
.string()
|
||||
.min(10)
|
||||
.describe('Detailed description of the subtask'),
|
||||
dependencies: z
|
||||
.array(z.string())
|
||||
.describe(
|
||||
'Array of subtask dependencies within the same parent task. Use format ["parentTaskId.1", "parentTaskId.2"]. Subtasks can only depend on siblings, not external tasks.'
|
||||
),
|
||||
details: z.string().min(20).describe('Implementation details and guidance'),
|
||||
status: z
|
||||
.string()
|
||||
.describe(
|
||||
'The current status of the subtask (should be pending initially)'
|
||||
),
|
||||
testStrategy: z
|
||||
.string()
|
||||
.nullable()
|
||||
.describe('Approach for testing this subtask')
|
||||
.prefault('')
|
||||
});
|
||||
const subtaskArraySchema = z.array(subtaskSchema);
|
||||
const subtaskWrapperSchema = z.object({
|
||||
subtasks: subtaskArraySchema.describe('The array of generated subtasks.')
|
||||
});
|
||||
// --- End Zod Schemas ---
|
||||
|
||||
/**
|
||||
* Parse subtasks from AI's text response. Includes basic cleanup.
|
||||
* @param {string} text - Response text from AI.
|
||||
* @param {number} startId - Starting subtask ID expected.
|
||||
* @param {number} expectedCount - Expected number of subtasks.
|
||||
* @param {number} parentTaskId - Parent task ID for context.
|
||||
* @param {Object} logger - Logging object (mcpLog or console log).
|
||||
* @returns {Array} Parsed and potentially corrected subtasks array.
|
||||
* @throws {Error} If parsing fails or JSON is invalid/malformed.
|
||||
*/
|
||||
function parseSubtasksFromText(
|
||||
text,
|
||||
startId,
|
||||
expectedCount,
|
||||
parentTaskId,
|
||||
logger
|
||||
) {
|
||||
if (typeof text !== 'string') {
|
||||
logger.error(
|
||||
`AI response text is not a string. Received type: ${typeof text}, Value: ${text}`
|
||||
);
|
||||
throw new Error('AI response text is not a string.');
|
||||
}
|
||||
|
||||
if (!text || text.trim() === '') {
|
||||
throw new Error('AI response text is empty after trimming.');
|
||||
}
|
||||
|
||||
const originalTrimmedResponse = text.trim(); // Store the original trimmed response
|
||||
let jsonToParse = originalTrimmedResponse; // Initialize jsonToParse with it
|
||||
|
||||
logger.debug(
|
||||
`Original AI Response for parsing (full length: ${jsonToParse.length}): ${jsonToParse.substring(0, 1000)}...`
|
||||
);
|
||||
|
||||
// --- Pre-emptive cleanup for known AI JSON issues ---
|
||||
// Fix for "dependencies": , or "dependencies":,
|
||||
if (jsonToParse.includes('"dependencies":')) {
|
||||
const malformedPattern = /"dependencies":\s*,/g;
|
||||
if (malformedPattern.test(jsonToParse)) {
|
||||
logger.warn('Attempting to fix malformed "dependencies": , issue.');
|
||||
jsonToParse = jsonToParse.replace(
|
||||
malformedPattern,
|
||||
'"dependencies": [],'
|
||||
);
|
||||
logger.debug(
|
||||
`JSON after fixing "dependencies": ${jsonToParse.substring(0, 500)}...`
|
||||
);
|
||||
}
|
||||
}
|
||||
// --- End pre-emptive cleanup ---
|
||||
|
||||
let parsedObject;
|
||||
let primaryParseAttemptFailed = false;
|
||||
|
||||
// --- Attempt 1: Simple Parse (with optional Markdown cleanup) ---
|
||||
logger.debug('Attempting simple parse...');
|
||||
try {
|
||||
// Check for markdown code block
|
||||
const codeBlockMatch = jsonToParse.match(/```(?:json)?\s*([\s\S]*?)\s*```/);
|
||||
let contentToParseDirectly = jsonToParse;
|
||||
if (codeBlockMatch && codeBlockMatch[1]) {
|
||||
contentToParseDirectly = codeBlockMatch[1].trim();
|
||||
logger.debug('Simple parse: Extracted content from markdown code block.');
|
||||
} else {
|
||||
logger.debug(
|
||||
'Simple parse: No markdown code block found, using trimmed original.'
|
||||
);
|
||||
}
|
||||
|
||||
parsedObject = JSON.parse(contentToParseDirectly);
|
||||
logger.debug('Simple parse successful!');
|
||||
|
||||
// Quick check if it looks like our target object
|
||||
if (
|
||||
!parsedObject ||
|
||||
typeof parsedObject !== 'object' ||
|
||||
!Array.isArray(parsedObject.subtasks)
|
||||
) {
|
||||
logger.warn(
|
||||
'Simple parse succeeded, but result is not the expected {"subtasks": []} structure. Will proceed to advanced extraction.'
|
||||
);
|
||||
primaryParseAttemptFailed = true;
|
||||
parsedObject = null; // Reset parsedObject so we enter the advanced logic
|
||||
}
|
||||
// If it IS the correct structure, we'll skip advanced extraction.
|
||||
} catch (e) {
|
||||
logger.warn(
|
||||
`Simple parse failed: ${e.message}. Proceeding to advanced extraction logic.`
|
||||
);
|
||||
primaryParseAttemptFailed = true;
|
||||
// jsonToParse is already originalTrimmedResponse if simple parse failed before modifying it for markdown
|
||||
}
|
||||
|
||||
// --- Attempt 2: Advanced Extraction (if simple parse failed or produced wrong structure) ---
|
||||
if (primaryParseAttemptFailed || !parsedObject) {
|
||||
// Ensure we try advanced if simple parse gave wrong structure
|
||||
logger.debug('Attempting advanced extraction logic...');
|
||||
// Reset jsonToParse to the original full trimmed response for advanced logic
|
||||
jsonToParse = originalTrimmedResponse;
|
||||
|
||||
// (Insert the more complex extraction logic here - the one we worked on with:
|
||||
// - targetPattern = '{"subtasks":';
|
||||
// - careful brace counting for that targetPattern
|
||||
// - fallbacks to last '{' and '}' if targetPattern logic fails)
|
||||
// This was the logic from my previous message. Let's assume it's here.
|
||||
// This block should ultimately set `jsonToParse` to the best candidate string.
|
||||
|
||||
// Example snippet of that advanced logic's start:
|
||||
const targetPattern = '{"subtasks":';
|
||||
const patternStartIndex = jsonToParse.indexOf(targetPattern);
|
||||
|
||||
if (patternStartIndex !== -1) {
|
||||
const openBraces = 0;
|
||||
const firstBraceFound = false;
|
||||
const extractedJsonBlock = '';
|
||||
// ... (loop for brace counting as before) ...
|
||||
// ... (if successful, jsonToParse = extractedJsonBlock) ...
|
||||
// ... (if that fails, fallbacks as before) ...
|
||||
} else {
|
||||
// ... (fallback to last '{' and '}' if targetPattern not found) ...
|
||||
}
|
||||
// End of advanced logic excerpt
|
||||
|
||||
logger.debug(
|
||||
`Advanced extraction: JSON string that will be parsed: ${jsonToParse.substring(0, 500)}...`
|
||||
);
|
||||
try {
|
||||
parsedObject = JSON.parse(jsonToParse);
|
||||
logger.debug('Advanced extraction parse successful!');
|
||||
} catch (parseError) {
|
||||
logger.error(
|
||||
`Advanced extraction: Failed to parse JSON object: ${parseError.message}`
|
||||
);
|
||||
logger.error(
|
||||
`Advanced extraction: Problematic JSON string for parse (first 500 chars): ${jsonToParse.substring(0, 500)}`
|
||||
);
|
||||
throw new Error(
|
||||
// Re-throw a more specific error if advanced also fails
|
||||
`Failed to parse JSON response object after both simple and advanced attempts: ${parseError.message}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// --- Validation (applies to successfully parsedObject from either attempt) ---
|
||||
if (
|
||||
!parsedObject ||
|
||||
typeof parsedObject !== 'object' ||
|
||||
!Array.isArray(parsedObject.subtasks)
|
||||
) {
|
||||
logger.error(
|
||||
`Final parsed content is not an object or missing 'subtasks' array. Content: ${JSON.stringify(parsedObject).substring(0, 200)}`
|
||||
);
|
||||
throw new Error(
|
||||
'Parsed AI response is not a valid object containing a "subtasks" array after all attempts.'
|
||||
);
|
||||
}
|
||||
const parsedSubtasks = parsedObject.subtasks;
|
||||
|
||||
if (expectedCount && parsedSubtasks.length !== expectedCount) {
|
||||
logger.warn(
|
||||
`Expected ${expectedCount} subtasks, but parsed ${parsedSubtasks.length}.`
|
||||
);
|
||||
}
|
||||
|
||||
let currentId = startId;
|
||||
const validatedSubtasks = [];
|
||||
const validationErrors = [];
|
||||
|
||||
for (const rawSubtask of parsedSubtasks) {
|
||||
const correctedSubtask = {
|
||||
...rawSubtask,
|
||||
id: currentId,
|
||||
dependencies: Array.isArray(rawSubtask.dependencies)
|
||||
? rawSubtask.dependencies.filter(
|
||||
(dep) =>
|
||||
typeof dep === 'string' && dep.startsWith(`${parentTaskId}.`)
|
||||
)
|
||||
: [],
|
||||
status: 'pending'
|
||||
};
|
||||
|
||||
const result = subtaskSchema.safeParse(correctedSubtask);
|
||||
|
||||
if (result.success) {
|
||||
validatedSubtasks.push(result.data);
|
||||
} else {
|
||||
logger.warn(
|
||||
`Subtask validation failed for raw data: ${JSON.stringify(rawSubtask).substring(0, 100)}...`
|
||||
);
|
||||
result.error.errors.forEach((err) => {
|
||||
const errorMessage = ` - Field '${err.path.join('.')}': ${err.message}`;
|
||||
logger.warn(errorMessage);
|
||||
validationErrors.push(`Subtask ${currentId}: ${errorMessage}`);
|
||||
});
|
||||
}
|
||||
currentId++;
|
||||
}
|
||||
|
||||
if (validationErrors.length > 0) {
|
||||
logger.error(
|
||||
`Found ${validationErrors.length} validation errors in the generated subtasks.`
|
||||
);
|
||||
logger.warn('Proceeding with only the successfully validated subtasks.');
|
||||
}
|
||||
|
||||
if (validatedSubtasks.length === 0 && parsedSubtasks.length > 0) {
|
||||
throw new Error(
|
||||
'AI response contained potential subtasks, but none passed validation.'
|
||||
);
|
||||
}
|
||||
return validatedSubtasks.slice(0, expectedCount || validatedSubtasks.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Expand a task into subtasks using the unified AI service (generateTextService).
|
||||
* Expand a task into subtasks using the unified AI service (generateObjectService).
|
||||
* Appends new subtasks by default. Replaces existing subtasks if force=true.
|
||||
* Integrates complexity report to determine subtask count and prompt if available,
|
||||
* unless numSubtasks is explicitly provided.
|
||||
@@ -444,6 +197,10 @@ async function expandTask(
|
||||
}
|
||||
|
||||
// Determine prompt content AND system prompt
|
||||
// Calculate the next subtask ID to match current behavior:
|
||||
// - Start from the number of existing subtasks + 1
|
||||
// - This creates sequential IDs: 1, 2, 3, 4...
|
||||
// - Display format shows as parentTaskId.subtaskId (e.g., "1.1", "1.2", "2.1")
|
||||
const nextSubtaskId = (task.subtasks?.length || 0) + 1;
|
||||
|
||||
// Load prompts using PromptManager
|
||||
@@ -504,7 +261,6 @@ async function expandTask(
|
||||
hasCodebaseAnalysis: hasCodebaseAnalysisCapability,
|
||||
projectRoot: projectRoot || ''
|
||||
};
|
||||
|
||||
let variantKey = 'default';
|
||||
if (expansionPromptText) {
|
||||
variantKey = 'complexity-report';
|
||||
@@ -534,7 +290,7 @@ async function expandTask(
|
||||
);
|
||||
// --- End Complexity Report / Prompt Logic ---
|
||||
|
||||
// --- AI Subtask Generation using generateTextService ---
|
||||
// --- AI Subtask Generation using generateObjectService ---
|
||||
let generatedSubtasks = [];
|
||||
let loadingIndicator = null;
|
||||
if (outputFormat === 'text') {
|
||||
@@ -543,48 +299,36 @@ async function expandTask(
|
||||
);
|
||||
}
|
||||
|
||||
let responseText = '';
|
||||
let aiServiceResponse = null;
|
||||
|
||||
try {
|
||||
const role = useResearch ? 'research' : 'main';
|
||||
|
||||
// Call generateTextService with the determined prompts and telemetry params
|
||||
aiServiceResponse = await generateTextService({
|
||||
// Call generateObjectService with the determined prompts and telemetry params
|
||||
aiServiceResponse = await generateObjectService({
|
||||
prompt: promptContent,
|
||||
systemPrompt: systemPrompt,
|
||||
role,
|
||||
session,
|
||||
projectRoot,
|
||||
schema: COMMAND_SCHEMAS['expand-task'],
|
||||
objectName: 'subtasks',
|
||||
commandName: 'expand-task',
|
||||
outputType: outputFormat
|
||||
});
|
||||
responseText = aiServiceResponse.mainResult;
|
||||
|
||||
// Parse Subtasks
|
||||
generatedSubtasks = parseSubtasksFromText(
|
||||
responseText,
|
||||
nextSubtaskId,
|
||||
finalSubtaskCount,
|
||||
task.id,
|
||||
logger
|
||||
);
|
||||
logger.info(
|
||||
`Successfully parsed ${generatedSubtasks.length} subtasks from AI response.`
|
||||
);
|
||||
// With generateObject, we expect structured data – verify it before use
|
||||
const mainResult = aiServiceResponse?.mainResult;
|
||||
if (!mainResult || !Array.isArray(mainResult.subtasks)) {
|
||||
throw new Error('AI response did not include a valid subtasks array.');
|
||||
}
|
||||
generatedSubtasks = mainResult.subtasks;
|
||||
logger.info(`Received ${generatedSubtasks.length} subtasks from AI.`);
|
||||
} catch (error) {
|
||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
||||
logger.error(
|
||||
`Error during AI call or parsing for task ${taskId}: ${error.message}`, // Added task ID context
|
||||
'error'
|
||||
);
|
||||
// Log raw response in debug mode if parsing failed
|
||||
if (
|
||||
error.message.includes('Failed to parse valid subtasks') &&
|
||||
getDebugFlag(session)
|
||||
) {
|
||||
logger.error(`Raw AI Response that failed parsing:\n${responseText}`);
|
||||
}
|
||||
throw error;
|
||||
} finally {
|
||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
||||
|
||||
@@ -3,7 +3,6 @@ import path from 'path';
|
||||
import chalk from 'chalk';
|
||||
import boxen from 'boxen';
|
||||
import Table from 'cli-table3';
|
||||
import { z } from 'zod'; // Keep Zod for post-parse validation
|
||||
|
||||
import {
|
||||
log as consoleLog,
|
||||
@@ -22,7 +21,11 @@ import {
|
||||
displayAiUsageSummary
|
||||
} from '../ui.js';
|
||||
|
||||
import { generateTextService } from '../ai-services-unified.js';
|
||||
import {
|
||||
generateTextService,
|
||||
generateObjectService
|
||||
} from '../ai-services-unified.js';
|
||||
import { COMMAND_SCHEMAS } from '../../../src/schemas/registry.js';
|
||||
import {
|
||||
getDebugFlag,
|
||||
isApiKeySet,
|
||||
@@ -32,229 +35,6 @@ import { getPromptManager } from '../prompt-manager.js';
|
||||
import { ContextGatherer } from '../utils/contextGatherer.js';
|
||||
import { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';
|
||||
|
||||
// Zod schema for post-parsing validation of the updated task object
|
||||
const updatedTaskSchema = z
|
||||
.object({
|
||||
id: z.number().int(),
|
||||
title: z.string(), // Title should be preserved, but check it exists
|
||||
description: z.string(),
|
||||
status: z.string(),
|
||||
dependencies: z.array(z.union([z.number().int(), z.string()])),
|
||||
priority: z.string().nullable().prefault('medium'),
|
||||
details: z.string().nullable().prefault(''),
|
||||
testStrategy: z.string().nullable().prefault(''),
|
||||
subtasks: z
|
||||
.array(
|
||||
z.object({
|
||||
id: z
|
||||
.number()
|
||||
.int()
|
||||
.positive()
|
||||
.describe('Sequential subtask ID starting from 1'),
|
||||
title: z.string(),
|
||||
description: z.string(),
|
||||
status: z.string(),
|
||||
dependencies: z.array(z.number().int()).nullable().prefault([]),
|
||||
details: z.string().nullable().prefault(''),
|
||||
testStrategy: z.string().nullable().prefault('')
|
||||
})
|
||||
)
|
||||
.nullable()
|
||||
.prefault([])
|
||||
})
|
||||
.strip(); // Enforce the canonical task shape and drop unknown fields
|
||||
|
||||
/**
|
||||
* Parses a single updated task object from AI's text response.
|
||||
* @param {string} text - Response text from AI.
|
||||
* @param {number} expectedTaskId - The ID of the task expected.
|
||||
* @param {Function | Object} logFn - Logging function or MCP logger.
|
||||
* @param {boolean} isMCP - Flag indicating MCP context.
|
||||
* @returns {Object} Parsed and validated task object.
|
||||
* @throws {Error} If parsing or validation fails.
|
||||
*/
|
||||
function parseUpdatedTaskFromText(text, expectedTaskId, logFn, isMCP) {
|
||||
// Report helper consistent with the established pattern
|
||||
const report = (level, ...args) => {
|
||||
if (isMCP) {
|
||||
if (typeof logFn[level] === 'function') logFn[level](...args);
|
||||
else logFn.info(...args);
|
||||
} else if (!isSilentMode()) {
|
||||
logFn(level, ...args);
|
||||
}
|
||||
};
|
||||
|
||||
report(
|
||||
'info',
|
||||
'Attempting to parse updated task object from text response...'
|
||||
);
|
||||
if (!text || text.trim() === '')
|
||||
throw new Error('AI response text is empty.');
|
||||
|
||||
let cleanedResponse = text.trim();
|
||||
const originalResponseForDebug = cleanedResponse;
|
||||
let parseMethodUsed = 'raw'; // Keep track of which method worked
|
||||
|
||||
// --- NEW Step 1: Try extracting between {} first ---
|
||||
const firstBraceIndex = cleanedResponse.indexOf('{');
|
||||
const lastBraceIndex = cleanedResponse.lastIndexOf('}');
|
||||
let potentialJsonFromBraces = null;
|
||||
|
||||
if (firstBraceIndex !== -1 && lastBraceIndex > firstBraceIndex) {
|
||||
potentialJsonFromBraces = cleanedResponse.substring(
|
||||
firstBraceIndex,
|
||||
lastBraceIndex + 1
|
||||
);
|
||||
if (potentialJsonFromBraces.length <= 2) {
|
||||
potentialJsonFromBraces = null; // Ignore empty braces {}
|
||||
}
|
||||
}
|
||||
|
||||
// If {} extraction yielded something, try parsing it immediately
|
||||
if (potentialJsonFromBraces) {
|
||||
try {
|
||||
const testParse = JSON.parse(potentialJsonFromBraces);
|
||||
// It worked! Use this as the primary cleaned response.
|
||||
cleanedResponse = potentialJsonFromBraces;
|
||||
parseMethodUsed = 'braces';
|
||||
} catch (e) {
|
||||
report(
|
||||
'info',
|
||||
'Content between {} looked promising but failed initial parse. Proceeding to other methods.'
|
||||
);
|
||||
// Reset cleanedResponse to original if brace parsing failed
|
||||
cleanedResponse = originalResponseForDebug;
|
||||
}
|
||||
}
|
||||
|
||||
// --- Step 2: If brace parsing didn't work or wasn't applicable, try code block extraction ---
|
||||
if (parseMethodUsed === 'raw') {
|
||||
const codeBlockMatch = cleanedResponse.match(
|
||||
/```(?:json|javascript)?\s*([\s\S]*?)\s*```/i
|
||||
);
|
||||
if (codeBlockMatch) {
|
||||
cleanedResponse = codeBlockMatch[1].trim();
|
||||
parseMethodUsed = 'codeblock';
|
||||
report('info', 'Extracted JSON content from Markdown code block.');
|
||||
} else {
|
||||
// --- Step 3: If code block failed, try stripping prefixes ---
|
||||
const commonPrefixes = [
|
||||
'json\n',
|
||||
'javascript\n'
|
||||
// ... other prefixes ...
|
||||
];
|
||||
let prefixFound = false;
|
||||
for (const prefix of commonPrefixes) {
|
||||
if (cleanedResponse.toLowerCase().startsWith(prefix)) {
|
||||
cleanedResponse = cleanedResponse.substring(prefix.length).trim();
|
||||
parseMethodUsed = 'prefix';
|
||||
report('info', `Stripped prefix: "${prefix.trim()}"`);
|
||||
prefixFound = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!prefixFound) {
|
||||
report(
|
||||
'warn',
|
||||
'Response does not appear to contain {}, code block, or known prefix. Attempting raw parse.'
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --- Step 4: Attempt final parse ---
|
||||
let parsedTask;
|
||||
try {
|
||||
parsedTask = JSON.parse(cleanedResponse);
|
||||
} catch (parseError) {
|
||||
report('error', `Failed to parse JSON object: ${parseError.message}`);
|
||||
report(
|
||||
'error',
|
||||
`Problematic JSON string (first 500 chars): ${cleanedResponse.substring(0, 500)}`
|
||||
);
|
||||
report(
|
||||
'error',
|
||||
`Original Raw Response (first 500 chars): ${originalResponseForDebug.substring(0, 500)}`
|
||||
);
|
||||
throw new Error(
|
||||
`Failed to parse JSON response object: ${parseError.message}`
|
||||
);
|
||||
}
|
||||
|
||||
if (!parsedTask || typeof parsedTask !== 'object') {
|
||||
report(
|
||||
'error',
|
||||
`Parsed content is not an object. Type: ${typeof parsedTask}`
|
||||
);
|
||||
report(
|
||||
'error',
|
||||
`Parsed content sample: ${JSON.stringify(parsedTask).substring(0, 200)}`
|
||||
);
|
||||
throw new Error('Parsed AI response is not a valid JSON object.');
|
||||
}
|
||||
|
||||
// Preprocess the task to ensure subtasks have proper structure
|
||||
const preprocessedTask = {
|
||||
...parsedTask,
|
||||
status: parsedTask.status || 'pending',
|
||||
dependencies: Array.isArray(parsedTask.dependencies)
|
||||
? parsedTask.dependencies
|
||||
: [],
|
||||
details:
|
||||
typeof parsedTask.details === 'string'
|
||||
? parsedTask.details
|
||||
: String(parsedTask.details || ''),
|
||||
testStrategy:
|
||||
typeof parsedTask.testStrategy === 'string'
|
||||
? parsedTask.testStrategy
|
||||
: String(parsedTask.testStrategy || ''),
|
||||
// Ensure subtasks is an array and each subtask has required fields
|
||||
subtasks: Array.isArray(parsedTask.subtasks)
|
||||
? parsedTask.subtasks.map((subtask) => ({
|
||||
...subtask,
|
||||
title: subtask.title || '',
|
||||
description: subtask.description || '',
|
||||
status: subtask.status || 'pending',
|
||||
dependencies: Array.isArray(subtask.dependencies)
|
||||
? subtask.dependencies
|
||||
: [],
|
||||
details:
|
||||
typeof subtask.details === 'string'
|
||||
? subtask.details
|
||||
: String(subtask.details || ''),
|
||||
testStrategy:
|
||||
typeof subtask.testStrategy === 'string'
|
||||
? subtask.testStrategy
|
||||
: String(subtask.testStrategy || '')
|
||||
}))
|
||||
: []
|
||||
};
|
||||
|
||||
// Validate the parsed task object using Zod
|
||||
const validationResult = updatedTaskSchema.safeParse(preprocessedTask);
|
||||
if (!validationResult.success) {
|
||||
report('error', 'Parsed task object failed Zod validation.');
|
||||
validationResult.error.errors.forEach((err) => {
|
||||
report('error', ` - Field '${err.path.join('.')}': ${err.message}`);
|
||||
});
|
||||
throw new Error(
|
||||
`AI response failed task structure validation: ${validationResult.error.message}`
|
||||
);
|
||||
}
|
||||
|
||||
// Final check: ensure ID matches expected ID (AI might hallucinate)
|
||||
if (validationResult.data.id !== expectedTaskId) {
|
||||
report(
|
||||
'warn',
|
||||
`AI returned task with ID ${validationResult.data.id}, but expected ${expectedTaskId}. Overwriting ID.`
|
||||
);
|
||||
validationResult.data.id = expectedTaskId; // Enforce correct ID
|
||||
}
|
||||
|
||||
report('info', 'Successfully validated updated task structure.');
|
||||
return validationResult.data; // Return the validated task data
|
||||
}
|
||||
|
||||
/**
|
||||
* Update a task by ID with new information using the unified AI service.
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
@@ -522,15 +302,32 @@ async function updateTaskById(
|
||||
|
||||
try {
|
||||
const serviceRole = useResearch ? 'research' : 'main';
|
||||
aiServiceResponse = await generateTextService({
|
||||
role: serviceRole,
|
||||
session: session,
|
||||
projectRoot: projectRoot,
|
||||
systemPrompt: systemPrompt,
|
||||
prompt: userPrompt,
|
||||
commandName: 'update-task',
|
||||
outputType: isMCP ? 'mcp' : 'cli'
|
||||
});
|
||||
|
||||
if (appendMode) {
|
||||
// Append mode still uses generateTextService since it returns plain text
|
||||
aiServiceResponse = await generateTextService({
|
||||
role: serviceRole,
|
||||
session: session,
|
||||
projectRoot: projectRoot,
|
||||
systemPrompt: systemPrompt,
|
||||
prompt: userPrompt,
|
||||
commandName: 'update-task',
|
||||
outputType: isMCP ? 'mcp' : 'cli'
|
||||
});
|
||||
} else {
|
||||
// Full update mode uses generateObjectService for structured output
|
||||
aiServiceResponse = await generateObjectService({
|
||||
role: serviceRole,
|
||||
session: session,
|
||||
projectRoot: projectRoot,
|
||||
systemPrompt: systemPrompt,
|
||||
prompt: userPrompt,
|
||||
schema: COMMAND_SCHEMAS['update-task-by-id'],
|
||||
objectName: 'task',
|
||||
commandName: 'update-task',
|
||||
outputType: isMCP ? 'mcp' : 'cli'
|
||||
});
|
||||
}
|
||||
|
||||
if (loadingIndicator)
|
||||
stopLoadingIndicator(loadingIndicator, 'AI update complete.');
|
||||
@@ -600,13 +397,8 @@ async function updateTaskById(
|
||||
};
|
||||
}
|
||||
|
||||
// Full update mode: Use mainResult (text) for parsing
|
||||
const updatedTask = parseUpdatedTaskFromText(
|
||||
aiServiceResponse.mainResult,
|
||||
taskId,
|
||||
logFn,
|
||||
isMCP
|
||||
);
|
||||
// Full update mode: Use structured data directly
|
||||
const updatedTask = aiServiceResponse.mainResult.task;
|
||||
|
||||
// --- Task Validation/Correction (Keep existing logic) ---
|
||||
if (!updatedTask || typeof updatedTask !== 'object')
|
||||
|
||||
@@ -2,7 +2,6 @@ import path from 'path';
|
||||
import chalk from 'chalk';
|
||||
import boxen from 'boxen';
|
||||
import Table from 'cli-table3';
|
||||
import { z } from 'zod'; // Keep Zod for post-parsing validation
|
||||
|
||||
import {
|
||||
log as consoleLog,
|
||||
@@ -22,258 +21,13 @@ import {
|
||||
import { getDebugFlag, hasCodebaseAnalysis } from '../config-manager.js';
|
||||
import { getPromptManager } from '../prompt-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
import { generateTextService } from '../ai-services-unified.js';
|
||||
import { generateObjectService } from '../ai-services-unified.js';
|
||||
import { COMMAND_SCHEMAS } from '../../../src/schemas/registry.js';
|
||||
import { getModelConfiguration } from './models.js';
|
||||
import { ContextGatherer } from '../utils/contextGatherer.js';
|
||||
import { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';
|
||||
import { flattenTasksWithSubtasks, findProjectRoot } from '../utils.js';
|
||||
|
||||
// Zod schema for validating the structure of tasks AFTER parsing
|
||||
const updatedTaskSchema = z
|
||||
.object({
|
||||
id: z.int(),
|
||||
title: z.string(),
|
||||
description: z.string(),
|
||||
status: z.string(),
|
||||
dependencies: z.array(z.union([z.int(), z.string()])),
|
||||
priority: z.string().nullable(),
|
||||
details: z.string().nullable(),
|
||||
testStrategy: z.string().nullable(),
|
||||
subtasks: z.array(z.any()).nullable() // Keep subtasks flexible for now
|
||||
})
|
||||
.strip(); // Enforce the canonical task shape and drop unknown fields
|
||||
|
||||
// Preprocessing schema that adds defaults before validation
|
||||
const preprocessTaskSchema = z.preprocess((task) => {
|
||||
// Ensure task is an object
|
||||
if (typeof task !== 'object' || task === null) {
|
||||
return {};
|
||||
}
|
||||
|
||||
// Return task with defaults for missing fields
|
||||
return {
|
||||
...task,
|
||||
// Add defaults for required fields if missing
|
||||
id: task.id ?? 0,
|
||||
title: task.title ?? 'Untitled Task',
|
||||
description: task.description ?? '',
|
||||
status: task.status ?? 'pending',
|
||||
dependencies: Array.isArray(task.dependencies) ? task.dependencies : [],
|
||||
// Optional fields - preserve undefined/null distinction
|
||||
priority: task.hasOwnProperty('priority') ? task.priority : null,
|
||||
details: task.hasOwnProperty('details') ? task.details : null,
|
||||
testStrategy: task.hasOwnProperty('testStrategy')
|
||||
? task.testStrategy
|
||||
: null,
|
||||
subtasks: Array.isArray(task.subtasks)
|
||||
? task.subtasks
|
||||
: task.subtasks === null
|
||||
? null
|
||||
: []
|
||||
};
|
||||
}, updatedTaskSchema);
|
||||
|
||||
const updatedTaskArraySchema = z.array(updatedTaskSchema);
|
||||
const preprocessedTaskArraySchema = z.array(preprocessTaskSchema);
|
||||
|
||||
/**
|
||||
* Parses an array of task objects from AI's text response.
|
||||
* @param {string} text - Response text from AI.
|
||||
* @param {number} expectedCount - Expected number of tasks.
|
||||
* @param {Function | Object} logFn - The logging function or MCP log object.
|
||||
* @param {boolean} isMCP - Flag indicating if logFn is MCP logger.
|
||||
* @returns {Array} Parsed and validated tasks array.
|
||||
* @throws {Error} If parsing or validation fails.
|
||||
*/
|
||||
function parseUpdatedTasksFromText(text, expectedCount, logFn, isMCP) {
|
||||
const report = (level, ...args) => {
|
||||
if (isMCP) {
|
||||
if (typeof logFn[level] === 'function') logFn[level](...args);
|
||||
else logFn.info(...args);
|
||||
} else if (!isSilentMode()) {
|
||||
// Check silent mode for consoleLog
|
||||
consoleLog(level, ...args);
|
||||
}
|
||||
};
|
||||
|
||||
report(
|
||||
'info',
|
||||
'Attempting to parse updated tasks array from text response...'
|
||||
);
|
||||
if (!text || text.trim() === '')
|
||||
throw new Error('AI response text is empty.');
|
||||
|
||||
let cleanedResponse = text.trim();
|
||||
const originalResponseForDebug = cleanedResponse;
|
||||
let parseMethodUsed = 'raw'; // Track which method worked
|
||||
|
||||
// --- NEW Step 1: Try extracting between [] first ---
|
||||
const firstBracketIndex = cleanedResponse.indexOf('[');
|
||||
const lastBracketIndex = cleanedResponse.lastIndexOf(']');
|
||||
let potentialJsonFromArray = null;
|
||||
|
||||
if (firstBracketIndex !== -1 && lastBracketIndex > firstBracketIndex) {
|
||||
potentialJsonFromArray = cleanedResponse.substring(
|
||||
firstBracketIndex,
|
||||
lastBracketIndex + 1
|
||||
);
|
||||
// Basic check to ensure it's not just "[]" or malformed
|
||||
if (potentialJsonFromArray.length <= 2) {
|
||||
potentialJsonFromArray = null; // Ignore empty array
|
||||
}
|
||||
}
|
||||
|
||||
// If [] extraction yielded something, try parsing it immediately
|
||||
if (potentialJsonFromArray) {
|
||||
try {
|
||||
const testParse = JSON.parse(potentialJsonFromArray);
|
||||
// It worked! Use this as the primary cleaned response.
|
||||
cleanedResponse = potentialJsonFromArray;
|
||||
parseMethodUsed = 'brackets';
|
||||
} catch (e) {
|
||||
report(
|
||||
'info',
|
||||
'Content between [] looked promising but failed initial parse. Proceeding to other methods.'
|
||||
);
|
||||
// Reset cleanedResponse to original if bracket parsing failed
|
||||
cleanedResponse = originalResponseForDebug;
|
||||
}
|
||||
}
|
||||
|
||||
// --- Step 2: If bracket parsing didn't work or wasn't applicable, try code block extraction ---
|
||||
if (parseMethodUsed === 'raw') {
|
||||
// Only look for ```json blocks now
|
||||
const codeBlockMatch = cleanedResponse.match(
|
||||
/```json\s*([\s\S]*?)\s*```/i // Only match ```json
|
||||
);
|
||||
if (codeBlockMatch) {
|
||||
cleanedResponse = codeBlockMatch[1].trim();
|
||||
parseMethodUsed = 'codeblock';
|
||||
report('info', 'Extracted JSON content from JSON Markdown code block.');
|
||||
} else {
|
||||
report('info', 'No JSON code block found.');
|
||||
// --- Step 3: If code block failed, try stripping prefixes ---
|
||||
const commonPrefixes = [
|
||||
'json\n',
|
||||
'javascript\n', // Keep checking common prefixes just in case
|
||||
'python\n',
|
||||
'here are the updated tasks:',
|
||||
'here is the updated json:',
|
||||
'updated tasks:',
|
||||
'updated json:',
|
||||
'response:',
|
||||
'output:'
|
||||
];
|
||||
let prefixFound = false;
|
||||
for (const prefix of commonPrefixes) {
|
||||
if (cleanedResponse.toLowerCase().startsWith(prefix)) {
|
||||
cleanedResponse = cleanedResponse.substring(prefix.length).trim();
|
||||
parseMethodUsed = 'prefix';
|
||||
report('info', `Stripped prefix: "${prefix.trim()}"`);
|
||||
prefixFound = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!prefixFound) {
|
||||
report(
|
||||
'warn',
|
||||
'Response does not appear to contain [], JSON code block, or known prefix. Attempting raw parse.'
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --- Step 4: Attempt final parse ---
|
||||
let parsedTasks;
|
||||
try {
|
||||
parsedTasks = JSON.parse(cleanedResponse);
|
||||
} catch (parseError) {
|
||||
report('error', `Failed to parse JSON array: ${parseError.message}`);
|
||||
report(
|
||||
'error',
|
||||
`Extraction method used: ${parseMethodUsed}` // Log which method failed
|
||||
);
|
||||
report(
|
||||
'error',
|
||||
`Problematic JSON string (first 500 chars): ${cleanedResponse.substring(0, 500)}`
|
||||
);
|
||||
report(
|
||||
'error',
|
||||
`Original Raw Response (first 500 chars): ${originalResponseForDebug.substring(0, 500)}`
|
||||
);
|
||||
throw new Error(
|
||||
`Failed to parse JSON response array: ${parseError.message}`
|
||||
);
|
||||
}
|
||||
|
||||
// --- Step 5 & 6: Validate Array structure and Zod schema ---
|
||||
if (!Array.isArray(parsedTasks)) {
|
||||
report(
|
||||
'error',
|
||||
`Parsed content is not an array. Type: ${typeof parsedTasks}`
|
||||
);
|
||||
report(
|
||||
'error',
|
||||
`Parsed content sample: ${JSON.stringify(parsedTasks).substring(0, 200)}`
|
||||
);
|
||||
throw new Error('Parsed AI response is not a valid JSON array.');
|
||||
}
|
||||
|
||||
report('info', `Successfully parsed ${parsedTasks.length} potential tasks.`);
|
||||
if (expectedCount && parsedTasks.length !== expectedCount) {
|
||||
report(
|
||||
'warn',
|
||||
`Expected ${expectedCount} tasks, but parsed ${parsedTasks.length}.`
|
||||
);
|
||||
}
|
||||
|
||||
// Log missing fields for debugging before preprocessing
|
||||
let hasWarnings = false;
|
||||
parsedTasks.forEach((task, index) => {
|
||||
const missingFields = [];
|
||||
if (!task.hasOwnProperty('id')) missingFields.push('id');
|
||||
if (!task.hasOwnProperty('status')) missingFields.push('status');
|
||||
if (!task.hasOwnProperty('dependencies'))
|
||||
missingFields.push('dependencies');
|
||||
|
||||
if (missingFields.length > 0) {
|
||||
hasWarnings = true;
|
||||
report(
|
||||
'warn',
|
||||
`Task ${index} is missing fields: ${missingFields.join(', ')} - will use defaults`
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
if (hasWarnings) {
|
||||
report(
|
||||
'warn',
|
||||
'Some tasks were missing required fields. Applying defaults...'
|
||||
);
|
||||
}
|
||||
|
||||
// Use the preprocessing schema to add defaults and validate
|
||||
const preprocessResult = preprocessedTaskArraySchema.safeParse(parsedTasks);
|
||||
|
||||
if (!preprocessResult.success) {
|
||||
// This should rarely happen now since preprocessing adds defaults
|
||||
report('error', 'Failed to validate task array even after preprocessing.');
|
||||
preprocessResult.error.errors.forEach((err) => {
|
||||
report('error', ` - Path '${err.path.join('.')}': ${err.message}`);
|
||||
});
|
||||
|
||||
throw new Error(
|
||||
`AI response failed validation: ${preprocessResult.error.message}`
|
||||
);
|
||||
}
|
||||
|
||||
report('info', 'Successfully validated and transformed task structure.');
|
||||
return preprocessResult.data.slice(
|
||||
0,
|
||||
expectedCount || preprocessResult.data.length
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Update tasks based on new context using the unified AI service.
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
@@ -458,13 +212,15 @@ async function updateTasks(
|
||||
// Determine role based on research flag
|
||||
const serviceRole = useResearch ? 'research' : 'main';
|
||||
|
||||
// Call the unified AI service
|
||||
aiServiceResponse = await generateTextService({
|
||||
// Call the unified AI service with generateObject
|
||||
aiServiceResponse = await generateObjectService({
|
||||
role: serviceRole,
|
||||
session: session,
|
||||
projectRoot: projectRoot,
|
||||
systemPrompt: systemPrompt,
|
||||
prompt: userPrompt,
|
||||
schema: COMMAND_SCHEMAS['update-tasks'],
|
||||
objectName: 'tasks',
|
||||
commandName: 'update-tasks',
|
||||
outputType: isMCP ? 'mcp' : 'cli'
|
||||
});
|
||||
@@ -472,13 +228,8 @@ async function updateTasks(
|
||||
if (loadingIndicator)
|
||||
stopLoadingIndicator(loadingIndicator, 'AI update complete.');
|
||||
|
||||
// Use the mainResult (text) for parsing
|
||||
const parsedUpdatedTasks = parseUpdatedTasksFromText(
|
||||
aiServiceResponse.mainResult,
|
||||
tasksToUpdate.length,
|
||||
logFn,
|
||||
isMCP
|
||||
);
|
||||
// With generateObject, we get structured data directly
|
||||
const parsedUpdatedTasks = aiServiceResponse.mainResult.tasks;
|
||||
|
||||
// --- Update Tasks Data (Updated writeJSON call) ---
|
||||
if (!Array.isArray(parsedUpdatedTasks)) {
|
||||
|
||||
Reference in New Issue
Block a user