feat(telemetry): Integrate usage telemetry for expand-task, fix return types

This commit integrates AI usage telemetry for the `expand-task` command/tool and resolves issues related to incorrect return type handling and logging.

Key Changes:

1.  **Telemetry Integration for `expand-task` (Subtask 77.7):**\n    -   Applied the standard telemetry pattern to the `expandTask` core logic (`scripts/modules/task-manager/expand-task.js`) and the `expandTaskDirect` wrapper (`mcp-server/src/core/direct-functions/expand-task.js`).\n    -   AI service calls now pass `commandName` and `outputType`.\n    -   Core function returns `{ task, telemetryData }`.\n    -   Direct function correctly extracts `task` and passes `telemetryData` in the MCP response `data` field.\n    -   Telemetry summary is now displayed in the CLI output for the `expand` command.

2.  **Fix AI Service Return Type Handling (`ai-services-unified.js`):**\n    -   Corrected the `_unifiedServiceRunner` function to properly handle the return objects from provider-specific functions (`generateText`, `generateObject`).\n    -   It now correctly extracts `providerResponse.text` or `providerResponse.object` into the `mainResult` field based on `serviceType`, resolving the "text.trim is not a function" error encountered during `expand-task`.

3.  **Log Cleanup:**\n    -   Removed various redundant or excessive `console.log` statements across multiple files (as indicated by recent changes) to reduce noise and improve clarity, particularly for MCP interactions.
This commit is contained in:
Eyal Toledano
2025-05-08 16:02:23 -04:00
parent f89d2aacc0
commit ab84afd036
10 changed files with 269 additions and 70 deletions

View File

@@ -16,7 +16,8 @@ import {
getFallbackModelId,
getParametersForRole,
getUserId,
MODEL_MAP
MODEL_MAP,
getDebugFlag
} from './config-manager.js';
import { log, resolveEnvVariable, isSilentMode } from './utils.js';
@@ -298,12 +299,14 @@ async function _unifiedServiceRunner(serviceType, params) {
outputType,
...restApiParams
} = params;
log('info', `${serviceType}Service called`, {
role: initialRole,
commandName,
outputType,
projectRoot
});
if (getDebugFlag()) {
log('info', `${serviceType}Service called`, {
role: initialRole,
commandName,
outputType,
projectRoot
});
}
// Determine the effective project root (passed in or detected if needed by config getters)
const { findProjectRoot: detectProjectRoot } = await import('./utils.js'); // Dynamically import if needed
@@ -333,7 +336,7 @@ async function _unifiedServiceRunner(serviceType, params) {
for (const currentRole of sequence) {
let providerName, modelId, apiKey, roleParams, providerFnSet, providerApiFn;
let aiCallResult;
let providerResponse;
let telemetryData = null;
try {
@@ -456,7 +459,7 @@ async function _unifiedServiceRunner(serviceType, params) {
};
// 6. Attempt the call with retries
aiCallResult = await _attemptProviderCallWithRetries(
providerResponse = await _attemptProviderCallWithRetries(
providerApiFn,
callParams,
providerName,
@@ -464,36 +467,51 @@ async function _unifiedServiceRunner(serviceType, params) {
currentRole
);
log('info', `${serviceType}Service succeeded using role: ${currentRole}`);
// --- Log Telemetry & Capture Data ---
// TODO: Add telemetry logic gate in case user doesn't accept telemetry
if (userId && aiCallResult && aiCallResult.usage) {
// Use providerResponse which contains the usage data directly for text/object
if (userId && providerResponse && providerResponse.usage) {
try {
telemetryData = await logAiUsage({
userId,
commandName,
providerName,
modelId,
inputTokens: aiCallResult.usage.inputTokens,
outputTokens: aiCallResult.usage.outputTokens,
inputTokens: providerResponse.usage.inputTokens,
outputTokens: providerResponse.usage.outputTokens,
outputType
});
} catch (telemetryError) {
// logAiUsage already logs its own errors and returns null on failure
// No need to log again here, telemetryData will remain null
}
} else if (userId && aiCallResult && !aiCallResult.usage) {
} else if (userId && providerResponse && !providerResponse.usage) {
log(
'warn',
`Cannot log telemetry for ${commandName} (${providerName}/${modelId}): AI result missing 'usage' data.`
`Cannot log telemetry for ${commandName} (${providerName}/${modelId}): AI result missing 'usage' data. (May be expected for streams)`
);
}
// --- End Log Telemetry ---
// Return a composite object including the main AI result and telemetry data
// --- Extract the correct main result based on serviceType ---
let finalMainResult;
if (serviceType === 'generateText') {
finalMainResult = providerResponse.text;
} else if (serviceType === 'generateObject') {
finalMainResult = providerResponse.object;
} else if (serviceType === 'streamText') {
finalMainResult = providerResponse; // Return the whole stream object
} else {
log(
'error',
`Unknown serviceType in _unifiedServiceRunner: ${serviceType}`
);
finalMainResult = providerResponse; // Default to returning the whole object as fallback
}
// --- End Main Result Extraction ---
// Return a composite object including the extracted main result and telemetry data
return {
mainResult: aiCallResult,
mainResult: finalMainResult,
telemetryData: telemetryData
};
} catch (error) {
@@ -653,7 +671,9 @@ async function logAiUsage({
currency // Add currency to the telemetry data
};
log('info', 'AI Usage Telemetry:', telemetryData);
if (getDebugFlag()) {
log('info', 'AI Usage Telemetry:', telemetryData);
}
// TODO (Subtask 77.2): Send telemetryData securely to the external endpoint.