feat(telemetry): Integrate usage telemetry for expand-task, fix return types

This commit integrates AI usage telemetry for the `expand-task` command/tool and resolves issues related to incorrect return type handling and logging.

Key Changes:

1.  **Telemetry Integration for `expand-task` (Subtask 77.7):**\n    -   Applied the standard telemetry pattern to the `expandTask` core logic (`scripts/modules/task-manager/expand-task.js`) and the `expandTaskDirect` wrapper (`mcp-server/src/core/direct-functions/expand-task.js`).\n    -   AI service calls now pass `commandName` and `outputType`.\n    -   Core function returns `{ task, telemetryData }`.\n    -   Direct function correctly extracts `task` and passes `telemetryData` in the MCP response `data` field.\n    -   Telemetry summary is now displayed in the CLI output for the `expand` command.

2.  **Fix AI Service Return Type Handling (`ai-services-unified.js`):**\n    -   Corrected the `_unifiedServiceRunner` function to properly handle the return objects from provider-specific functions (`generateText`, `generateObject`).\n    -   It now correctly extracts `providerResponse.text` or `providerResponse.object` into the `mainResult` field based on `serviceType`, resolving the "text.trim is not a function" error encountered during `expand-task`.

3.  **Log Cleanup:**\n    -   Removed various redundant or excessive `console.log` statements across multiple files (as indicated by recent changes) to reduce noise and improve clarity, particularly for MCP interactions.
This commit is contained in:
Eyal Toledano
2025-05-08 16:02:23 -04:00
parent f89d2aacc0
commit ab84afd036
10 changed files with 269 additions and 70 deletions

View File

@@ -209,7 +209,6 @@ async function analyzeTaskComplexity(options, context = {}) {
try {
const role = useResearch ? 'research' : 'main';
reportLog(`Using AI service with role: ${role}`, 'info');
fullResponse = await generateTextService({
prompt,
@@ -219,11 +218,6 @@ async function analyzeTaskComplexity(options, context = {}) {
projectRoot
});
reportLog(
'Successfully received text response via AI service',
'success'
);
// --- Stop Loading Indicator (Unchanged) ---
if (loadingIndicator) {
stopLoadingIndicator(loadingIndicator);

View File

@@ -4,7 +4,11 @@ import { z } from 'zod';
import { log, readJSON, writeJSON, isSilentMode } from '../utils.js';
import { startLoadingIndicator, stopLoadingIndicator } from '../ui.js';
import {
startLoadingIndicator,
stopLoadingIndicator,
displayAiUsageSummary
} from '../ui.js';
import { generateTextService } from '../ai-services-unified.js';
@@ -182,9 +186,16 @@ function parseSubtasksFromText(
parentTaskId,
logger
) {
logger.info('Attempting to parse subtasks object from text response...');
// Add a type check for 'text' before attempting to call .trim()
if (typeof text !== 'string') {
logger.error(
`AI response text is not a string. Received type: ${typeof text}, Value: ${text}`
);
throw new Error('AI response text is not a string.');
}
if (!text || text.trim() === '') {
throw new Error('AI response text is empty.');
throw new Error('AI response text is empty after trimming.'); // Clarified error message
}
let cleanedResponse = text.trim();
@@ -196,14 +207,12 @@ function parseSubtasksFromText(
);
if (codeBlockMatch) {
cleanedResponse = codeBlockMatch[1].trim();
logger.info('Extracted JSON content from Markdown code block.');
} else {
// 2. If no code block, find first '{' and last '}' for the object
const firstBrace = cleanedResponse.indexOf('{');
const lastBrace = cleanedResponse.lastIndexOf('}');
if (firstBrace !== -1 && lastBrace > firstBrace) {
cleanedResponse = cleanedResponse.substring(firstBrace, lastBrace + 1);
logger.info('Extracted content between first { and last }.');
} else {
logger.warn(
'Response does not appear to contain a JSON object structure. Parsing raw response.'
@@ -243,9 +252,6 @@ function parseSubtasksFromText(
}
const parsedSubtasks = parsedObject.subtasks; // Extract the array
logger.info(
`Successfully parsed ${parsedSubtasks.length} potential subtasks from the object.`
);
if (expectedCount && parsedSubtasks.length !== expectedCount) {
logger.warn(
`Expected ${expectedCount} subtasks, but parsed ${parsedSubtasks.length}.`
@@ -336,9 +342,13 @@ async function expandTask(
context = {},
force = false
) {
const { session, mcpLog } = context;
const { session, mcpLog, projectRoot: contextProjectRoot } = context;
const outputFormat = mcpLog ? 'json' : 'text';
// Determine projectRoot: Use from context if available, otherwise derive from tasksPath
const projectRoot =
contextProjectRoot || path.dirname(path.dirname(tasksPath));
// Use mcpLog if available, otherwise use the default console log wrapper
const logger = mcpLog || {
info: (msg) => !isSilentMode() && log('info', msg),
@@ -381,7 +391,6 @@ async function expandTask(
let complexityReasoningContext = '';
let systemPrompt; // Declare systemPrompt here
const projectRoot = path.dirname(path.dirname(tasksPath));
const complexityReportPath = path.join(
projectRoot,
'scripts/task-complexity-report.json'
@@ -488,28 +497,27 @@ async function expandTask(
let loadingIndicator = null;
if (outputFormat === 'text') {
loadingIndicator = startLoadingIndicator(
`Generating ${finalSubtaskCount} subtasks...`
`Generating ${finalSubtaskCount} subtasks...\n`
);
}
let responseText = '';
let aiServiceResponse = null;
try {
const role = useResearch ? 'research' : 'main';
logger.info(`Using AI service with role: ${role}`);
// Call generateTextService with the determined prompts
responseText = await generateTextService({
// Call generateTextService with the determined prompts and telemetry params
aiServiceResponse = await generateTextService({
prompt: promptContent,
systemPrompt: systemPrompt, // Use the determined system prompt
systemPrompt: systemPrompt,
role,
session,
projectRoot
projectRoot,
commandName: 'expand-task',
outputType: outputFormat
});
logger.info(
'Successfully received text response from AI service',
'success'
);
responseText = aiServiceResponse.mainResult;
// Parse Subtasks
generatedSubtasks = parseSubtasksFromText(
@@ -550,14 +558,23 @@ async function expandTask(
// --- End Change: Append instead of replace ---
data.tasks[taskIndex] = task; // Assign the modified task back
logger.info(`Writing updated tasks to ${tasksPath}`);
writeJSON(tasksPath, data);
logger.info(`Generating individual task files...`);
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
logger.info(`Task files generated.`);
// --- End Task Update & File Writing ---
return task; // Return the updated task object
// Display AI Usage Summary for CLI
if (
outputFormat === 'text' &&
aiServiceResponse &&
aiServiceResponse.telemetryData
) {
displayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');
}
// Return the updated task object AND telemetry data
return {
task,
telemetryData: aiServiceResponse?.telemetryData
};
} catch (error) {
// Catches errors from file reading, parsing, AI call etc.
logger.error(`Error expanding task ${taskId}: ${error.message}`, 'error');

View File

@@ -19,8 +19,6 @@ function generateTaskFiles(tasksPath, outputDir, options = {}) {
// Determine if we're in MCP mode by checking for mcpLog
const isMcpMode = !!options?.mcpLog;
log('info', `Preparing to regenerate task files in ${tasksPath}`);
const data = readJSON(tasksPath);
if (!data || !data.tasks) {
throw new Error(`No valid tasks found in ${tasksPath}`);
@@ -31,7 +29,7 @@ function generateTaskFiles(tasksPath, outputDir, options = {}) {
fs.mkdirSync(outputDir, { recursive: true });
}
log('info', `Found ${data.tasks.length} tasks to regenerate`);
log('info', `Preparing to regenerate ${data.tasks.length} task files`);
// Validate and fix dependencies before generating files
log('info', `Validating and fixing dependencies`);

View File

@@ -388,7 +388,6 @@ The changes described in the prompt should be thoughtfully applied to make the t
try {
// --- Call Unified AI Service (generateTextService) ---
const role = useResearch ? 'research' : 'main';
report('info', `Using AI service with role: ${role}`);
responseText = await generateTextService({
prompt: userPrompt,
@@ -397,7 +396,6 @@ The changes described in the prompt should be thoughtfully applied to make the t
session,
projectRoot
});
report('success', 'Successfully received text response from AI service');
// --- End AI Service Call ---
} catch (error) {
// Catch errors from generateTextService

View File

@@ -361,8 +361,6 @@ The changes described in the prompt should be applied to ALL tasks in the list.`
try {
// --- Call Unified AI Service ---
const role = useResearch ? 'research' : 'main';
if (isMCP) logFn.info(`Using AI service with role: ${role}`);
else logFn('info', `Using AI service with role: ${role}`);
responseText = await generateTextService({
prompt: userPrompt,
@@ -371,9 +369,6 @@ The changes described in the prompt should be applied to ALL tasks in the list.`
session,
projectRoot
});
if (isMCP) logFn.info('Successfully received text response');
else
logFn('success', 'Successfully received text response via AI service');
// --- End AI Service Call ---
} catch (error) {
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);