feat(telemetry): Integrate AI usage telemetry into parse-prd

Implements AI usage telemetry capture and propagation for the  command and MCP tool, following the established telemetry pattern.

Key changes:

-   **Core ():**
    -   Modified the  call to include  and .
    -   Updated to receive  from .
    -   Adjusted to return an object .
    -   Added a call to  to show telemetry data in the CLI output when not in MCP mode.

-   **Direct Function ():**
    -   Updated the call to the core  function to pass , , and .
    -   Modified to correctly handle the new return structure from the core function.
    -   Ensures  received from the core function is included in the  field of the successful MCP response.

-   **MCP Tool ():**
    -   No changes required; existing  correctly passes through the  object containing .

-   **CLI Command ():**
    -   The  command's action now relies on the core  function to handle CLI success messages and telemetry display.

This ensures that AI usage for the  functionality is tracked and can be displayed or logged as appropriate for both CLI and MCP interactions.
This commit is contained in:
Eyal Toledano
2025-05-07 14:22:42 -04:00
parent 0288311965
commit f89d2aacc0
4 changed files with 54 additions and 44 deletions

View File

@@ -131,21 +131,28 @@ export async function parsePRDDirect(args, log, context = {}) {
inputPath,
outputPath,
numTasks,
{ session, mcpLog: logWrapper, projectRoot, useForce, useAppend },
{
session,
mcpLog: logWrapper,
projectRoot,
useForce,
useAppend,
commandName: 'parse-prd',
outputType: 'mcp'
},
'json'
);
// parsePRD returns { success: true, tasks: processedTasks } on success
if (result && result.success && Array.isArray(result.tasks)) {
logWrapper.success(
`Successfully parsed PRD. Generated ${result.tasks.length} tasks.`
);
// Adjust check for the new return structure
if (result && result.success) {
const successMsg = `Successfully parsed PRD and generated tasks in ${result.tasksPath}`;
logWrapper.success(successMsg);
return {
success: true,
data: {
message: `Successfully parsed PRD and generated ${result.tasks.length} tasks.`,
outputPath: outputPath,
taskCount: result.tasks.length
message: successMsg,
outputPath: result.tasksPath,
telemetryData: result.telemetryData
}
};
} else {

View File

@@ -17,6 +17,7 @@ import {
import { generateObjectService } from '../ai-services-unified.js';
import { getDebugFlag } from '../config-manager.js';
import generateTaskFiles from './generate-task-files.js';
import { displayAiUsageSummary } from '../ui.js';
// Define the Zod schema for a SINGLE task object
const prdSingleTaskSchema = z.object({
@@ -95,6 +96,7 @@ async function parsePRD(prdPath, tasksPath, numTasks, options = {}) {
let existingTasks = [];
let nextId = 1;
let aiServiceResponse = null;
try {
// Handle file existence and overwrite/append logic
@@ -206,8 +208,8 @@ Guidelines:
// Call the unified AI service
report('Calling AI service to generate tasks from PRD...', 'info');
// Call generateObjectService with the CORRECT schema
const generatedData = await generateObjectService({
// Call generateObjectService with the CORRECT schema and additional telemetry params
aiServiceResponse = await generateObjectService({
role: 'main',
session: session,
projectRoot: projectRoot,
@@ -215,7 +217,8 @@ Guidelines:
objectName: 'tasks_data',
systemPrompt: systemPrompt,
prompt: userPrompt,
reportProgress
commandName: 'parse-prd',
outputType: isMCP ? 'mcp' : 'cli'
});
// Create the directory if it doesn't exist
@@ -223,12 +226,12 @@ Guidelines:
if (!fs.existsSync(tasksDir)) {
fs.mkdirSync(tasksDir, { recursive: true });
}
logFn.success('Successfully parsed PRD via AI service.'); // Assumes generateObjectService validated
logFn.success('Successfully parsed PRD via AI service.');
// Validate and Process Tasks
const generatedData = aiServiceResponse?.mainResult?.object;
if (!generatedData || !Array.isArray(generatedData.tasks)) {
// This error *shouldn't* happen if generateObjectService enforced prdResponseSchema
// But keep it as a safeguard
logFn.error(
`Internal Error: generateObjectService returned unexpected data structure: ${JSON.stringify(generatedData)}`
);
@@ -265,36 +268,27 @@ Guidelines:
);
});
const allTasks = useAppend
const finalTasks = useAppend
? [...existingTasks, ...processedNewTasks]
: processedNewTasks;
const outputData = { tasks: finalTasks };
const finalTaskData = { tasks: allTasks }; // Use the combined list
// Write the tasks to the file
writeJSON(tasksPath, finalTaskData);
// Write the final tasks to the file
writeJSON(tasksPath, outputData);
report(
`Successfully wrote ${allTasks.length} total tasks to ${tasksPath} (${processedNewTasks.length} new).`,
`Successfully ${useAppend ? 'appended' : 'generated'} ${processedNewTasks.length} tasks in ${tasksPath}`,
'success'
);
report(`Tasks saved to: ${tasksPath}`, 'info');
// Generate individual task files
if (reportProgress && mcpLog) {
// Enable silent mode when being called from MCP server
enableSilentMode();
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
disableSilentMode();
} else {
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
}
// Generate markdown task files after writing tasks.json
await generateTaskFiles(tasksPath, path.dirname(tasksPath), { mcpLog });
// Only show success boxes for text output (CLI)
// Handle CLI output (e.g., success message)
if (outputFormat === 'text') {
console.log(
boxen(
chalk.green(
`Successfully generated ${processedNewTasks.length} new tasks. Total tasks in ${tasksPath}: ${allTasks.length}`
`Successfully generated ${processedNewTasks.length} new tasks. Total tasks in ${tasksPath}: ${finalTasks.length}`
),
{ padding: 1, borderColor: 'green', borderStyle: 'round' }
)
@@ -314,9 +308,18 @@ Guidelines:
}
)
);
if (aiServiceResponse && aiServiceResponse.telemetryData) {
displayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');
}
}
return { success: true, tasks: processedNewTasks };
// Return telemetry data
return {
success: true,
tasksPath,
telemetryData: aiServiceResponse?.telemetryData
};
} catch (error) {
report(`Error parsing PRD: ${error.message}`, 'error');

View File

@@ -24,7 +24,7 @@
# Subtasks:
## 1. Implement telemetry utility and data collection [in-progress]
## 1. Implement telemetry utility and data collection [done]
### Dependencies: None
### Description: Create the logAiUsage utility in ai-services.js that captures all required telemetry data fields
### Details:
@@ -96,7 +96,7 @@ Implement HTTPS POST request functionality to securely send the telemetry payloa
### Details:
Design and implement a clear privacy notice explaining what data is collected and how it's used. Create a user consent prompt during initial installation/setup that requires explicit opt-in. Store the consent status in the .taskmasterconfig file and respect this setting throughout the application.
## 4. Integrate telemetry into Taskmaster commands [in-progress]
## 4. Integrate telemetry into Taskmaster commands [done]
### Dependencies: 77.1, 77.3
### Description: Integrate the telemetry utility across all relevant Taskmaster commands
### Details:
@@ -107,13 +107,13 @@ Modify each Taskmaster command (expand, parse-prd, research, etc.) to call the l
Successfully integrated telemetry calls into `addTask` (core) and `addTaskDirect` (MCP) functions by passing `commandName` and `outputType` parameters to the telemetry system. The `ai-services-unified.js` module now logs basic telemetry data, including calculated cost information, whenever the `add-task` command or tool is invoked. This integration respects user consent settings and maintains performance standards.
</info added on 2025-05-06T17:57:13.980Z>
## 5. Implement usage summary display [pending]
## 5. Implement usage summary display [done]
### Dependencies: 77.1, 77.4
### Description: Create an optional feature to display AI usage summary in the CLI output
### Details:
Develop functionality to display a concise summary of AI usage (tokens used, estimated cost) directly in the CLI output after command execution. Make this feature configurable through Taskmaster settings. Ensure the display is formatted clearly and doesn't clutter the main command output.
## 6. Telemetry Integration for parse-prd [pending]
## 6. Telemetry Integration for parse-prd [done]
### Dependencies: None
### Description: Integrate AI usage telemetry capture and propagation for the parse-prd functionality.
### Details:
@@ -136,7 +136,7 @@ Apply telemetry pattern from telemetry.mdc:
* Verify `handleApiResult` correctly passes `data.telemetryData` through.
## 7. Telemetry Integration for expand-task [pending]
## 7. Telemetry Integration for expand-task [in-progress]
### Dependencies: None
### Description: Integrate AI usage telemetry capture and propagation for the expand-task functionality.
### Details:

View File

@@ -4017,7 +4017,7 @@
"description": "Create the logAiUsage utility in ai-services.js that captures all required telemetry data fields",
"dependencies": [],
"details": "Develop the logAiUsage function that collects timestamp, userId, commandName, modelUsed, inputTokens, outputTokens, totalTokens, and totalCost. Implement token counting logic and cost calculation using pricing from supported_models.json. Ensure proper error handling and data validation.\n<info added on 2025-05-05T21:08:51.413Z>\nDevelop the logAiUsage function that collects timestamp, userId, commandName, modelUsed, inputTokens, outputTokens, totalTokens, and totalCost. Implement token counting logic and cost calculation using pricing from supported_models.json. Ensure proper error handling and data validation.\n\nImplementation Plan:\n1. Define `logAiUsage` function in `ai-services-unified.js` that accepts parameters: userId, commandName, providerName, modelId, inputTokens, and outputTokens.\n\n2. Implement data collection and calculation logic:\n - Generate timestamp using `new Date().toISOString()`\n - Calculate totalTokens by adding inputTokens and outputTokens\n - Create a helper function `_getCostForModel(providerName, modelId)` that:\n - Loads pricing data from supported-models.json\n - Finds the appropriate provider/model entry\n - Returns inputCost and outputCost rates or defaults if not found\n - Calculate totalCost using the formula: ((inputTokens/1,000,000) * inputCost) + ((outputTokens/1,000,000) * outputCost)\n - Assemble complete telemetryData object with all required fields\n\n3. Add initial logging functionality:\n - Use existing log utility to record telemetry data at 'info' level\n - Implement proper error handling with try/catch blocks\n\n4. Integrate with `_unifiedServiceRunner`:\n - Modify to accept commandName and userId parameters\n - After successful API calls, extract usage data from results\n - Call logAiUsage with the appropriate parameters\n\n5. Update provider functions in src/ai-providers/*.js:\n - Ensure all provider functions return both the primary result and usage statistics\n - Standardize the return format to include a usage object with inputTokens and outputTokens\n</info added on 2025-05-05T21:08:51.413Z>\n<info added on 2025-05-07T17:28:57.361Z>\nTo implement the AI usage telemetry effectively, we need to update each command across our different stacks. Let's create a structured approach for this implementation:\n\nCommand Integration Plan:\n1. Core Function Commands:\n - Identify all AI-utilizing commands in the core function library\n - For each command, modify to pass commandName and userId to _unifiedServiceRunner\n - Update return handling to process and forward usage statistics\n\n2. Direct Function Commands:\n - Map all direct function commands that leverage AI capabilities\n - Implement telemetry collection at the appropriate execution points\n - Ensure consistent error handling and telemetry reporting\n\n3. MCP Tool Stack Commands:\n - Inventory all MCP commands with AI dependencies\n - Standardize the telemetry collection approach across the tool stack\n - Add telemetry hooks that maintain backward compatibility\n\nFor each command category, we'll need to:\n- Document current implementation details\n- Define specific code changes required\n- Create tests to verify telemetry is being properly collected\n- Establish validation procedures to ensure data accuracy\n</info added on 2025-05-07T17:28:57.361Z>",
"status": "in-progress",
"status": "done",
"testStrategy": "Unit test the utility with mock AI usage data to verify all fields are correctly captured and calculated"
},
{
@@ -4049,7 +4049,7 @@
3
],
"details": "Modify each Taskmaster command (expand, parse-prd, research, etc.) to call the logAiUsage utility after AI interactions. Ensure telemetry is only sent if user has provided consent. Implement the integration in a way that doesn't impact command performance or user experience.\n<info added on 2025-05-06T17:57:13.980Z>\nModify each Taskmaster command (expand, parse-prd, research, etc.) to call the logAiUsage utility after AI interactions. Ensure telemetry is only sent if user has provided consent. Implement the integration in a way that doesn't impact command performance or user experience.\n\nSuccessfully integrated telemetry calls into `addTask` (core) and `addTaskDirect` (MCP) functions by passing `commandName` and `outputType` parameters to the telemetry system. The `ai-services-unified.js` module now logs basic telemetry data, including calculated cost information, whenever the `add-task` command or tool is invoked. This integration respects user consent settings and maintains performance standards.\n</info added on 2025-05-06T17:57:13.980Z>",
"status": "in-progress",
"status": "done",
"testStrategy": "Integration tests to verify telemetry is correctly triggered across different commands with proper data"
},
{
@@ -4061,7 +4061,7 @@
4
],
"details": "Develop functionality to display a concise summary of AI usage (tokens used, estimated cost) directly in the CLI output after command execution. Make this feature configurable through Taskmaster settings. Ensure the display is formatted clearly and doesn't clutter the main command output.",
"status": "pending",
"status": "done",
"testStrategy": "User acceptance testing to verify the summary display is clear, accurate, and properly configurable"
},
{
@@ -4069,7 +4069,7 @@
"title": "Telemetry Integration for parse-prd",
"description": "Integrate AI usage telemetry capture and propagation for the parse-prd functionality.",
"details": "\\\nApply telemetry pattern from telemetry.mdc:\n\n1. **Core (`scripts/modules/task-manager/parse-prd.js`):**\n * Modify AI service call to include `commandName: \\'parse-prd\\'` and `outputType`.\n * Receive `{ mainResult, telemetryData }`.\n * Return object including `telemetryData`.\n * Handle CLI display via `displayAiUsageSummary` if applicable.\n\n2. **Direct (`mcp-server/src/core/direct-functions/parse-prd.js`):**\n * Pass `commandName`, `outputType: \\'mcp\\'` to core.\n * Pass `outputFormat: \\'json\\'` if applicable.\n * Receive `{ ..., telemetryData }` from core.\n * Return `{ success: true, data: { ..., telemetryData } }`.\n\n3. **Tool (`mcp-server/src/tools/parse-prd.js`):**\n * Verify `handleApiResult` correctly passes `data.telemetryData` through.\n",
"status": "pending",
"status": "done",
"dependencies": [],
"parentTaskId": 77
},
@@ -4078,7 +4078,7 @@
"title": "Telemetry Integration for expand-task",
"description": "Integrate AI usage telemetry capture and propagation for the expand-task functionality.",
"details": "\\\nApply telemetry pattern from telemetry.mdc:\n\n1. **Core (`scripts/modules/task-manager/expand-task.js`):**\n * Modify AI service call to include `commandName: \\'expand-task\\'` and `outputType`.\n * Receive `{ mainResult, telemetryData }`.\n * Return object including `telemetryData`.\n * Handle CLI display via `displayAiUsageSummary` if applicable.\n\n2. **Direct (`mcp-server/src/core/direct-functions/expand-task.js`):**\n * Pass `commandName`, `outputType: \\'mcp\\'` to core.\n * Pass `outputFormat: \\'json\\'` if applicable.\n * Receive `{ ..., telemetryData }` from core.\n * Return `{ success: true, data: { ..., telemetryData } }`.\n\n3. **Tool (`mcp-server/src/tools/expand-task.js`):**\n * Verify `handleApiResult` correctly passes `data.telemetryData` through.\n",
"status": "pending",
"status": "in-progress",
"dependencies": [],
"parentTaskId": 77
},