feat(ai): Enhance Google provider telemetry and AI object response handling

This commit introduces two key improvements:

1.  **Google Provider Telemetry:**
    - Updated  to include token usage data (, ) in the responses from  and .
    - This aligns the Google provider with others for consistent AI usage telemetry.

2.  **Robust AI Object Response Handling:**
    - Modified  to more flexibly handle responses from .
    - The add-task module now check for the AI-generated object in both  and , improving compatibility with different AI provider response structures (e.g., Gemini).

These changes enhance the reliability of AI interactions, particularly with the Google provider, and ensure accurate telemetry collection.
This commit is contained in:
Eyal Toledano
2025-05-13 12:13:35 -04:00
parent 59230c4d91
commit e53d5e1577
5 changed files with 72 additions and 59 deletions

View File

@@ -1,32 +1,32 @@
{
"models": {
"main": {
"provider": "anthropic",
"modelId": "claude-3-7-sonnet-20250219",
"maxTokens": 100000,
"temperature": 0.2
},
"research": {
"provider": "perplexity",
"modelId": "sonar-pro",
"maxTokens": 8700,
"temperature": 0.1
},
"fallback": {
"provider": "anthropic",
"modelId": "claude-3-7-sonnet-20250219",
"maxTokens": 120000,
"temperature": 0.2
}
},
"global": {
"userId": "1234567890",
"logLevel": "info",
"debug": false,
"defaultSubtasks": 5,
"defaultPriority": "medium",
"projectName": "Taskmaster",
"ollamaBaseUrl": "http://localhost:11434/api",
"azureOpenaiBaseUrl": "https://your-endpoint.openai.azure.com/"
}
"models": {
"main": {
"provider": "google",
"modelId": "gemini-2.5-pro-exp-03-25",
"maxTokens": 100000,
"temperature": 0.2
},
"research": {
"provider": "perplexity",
"modelId": "sonar-pro",
"maxTokens": 8700,
"temperature": 0.1
},
"fallback": {
"provider": "anthropic",
"modelId": "claude-3-7-sonnet-20250219",
"maxTokens": 120000,
"temperature": 0.2
}
},
"global": {
"logLevel": "info",
"debug": false,
"defaultSubtasks": 5,
"defaultPriority": "medium",
"projectName": "Taskmaster",
"ollamaBaseUrl": "http://localhost:11434/api",
"userId": "1234567890",
"azureOpenaiBaseUrl": "https://your-endpoint.openai.azure.com/"
}
}

View File

@@ -93,20 +93,6 @@ async function addTask(
};
try {
// Only display banner and UI elements for text output (CLI)
if (outputFormat === 'text') {
displayBanner();
console.log(
boxen(chalk.white.bold(`Creating New Task`), {
padding: 1,
borderColor: 'blue',
borderStyle: 'round',
margin: { top: 1, bottom: 1 }
})
);
}
// Read the existing tasks
const data = readJSON(tasksPath);
if (!data || !data.tasks) {
@@ -173,7 +159,7 @@ async function addTask(
} else {
report('DEBUG: Taking AI task generation path.', 'debug');
// --- Refactored AI Interaction ---
report('Generating task data with AI...', 'info');
report(`Generating task data with AI with prompt:\n${prompt}`, 'info');
// Create context string for task creation prompt
let contextTasks = '';
@@ -233,7 +219,7 @@ async function addTask(
// Start the loading indicator - only for text mode
if (outputFormat === 'text') {
loadingIndicator = startLoadingIndicator(
`Generating new task with ${useResearch ? 'Research' : 'Main'} AI..\n`
`Generating new task with ${useResearch ? 'Research' : 'Main'} AI...\n`
);
}
@@ -255,16 +241,27 @@ async function addTask(
});
report('DEBUG: generateObjectService returned successfully.', 'debug');
if (
!aiServiceResponse ||
!aiServiceResponse.mainResult ||
!aiServiceResponse.mainResult.object
) {
if (!aiServiceResponse || !aiServiceResponse.mainResult) {
throw new Error(
'AI service did not return the expected object structure.'
);
}
taskData = aiServiceResponse.mainResult.object; // Extract the AI-generated task data
// Prefer mainResult if it looks like a valid task object, otherwise try mainResult.object
if (
aiServiceResponse.mainResult.title &&
aiServiceResponse.mainResult.description
) {
taskData = aiServiceResponse.mainResult;
} else if (
aiServiceResponse.mainResult.object &&
aiServiceResponse.mainResult.object.title &&
aiServiceResponse.mainResult.object.description
) {
taskData = aiServiceResponse.mainResult.object;
} else {
throw new Error('AI service did not return a valid task object.');
}
report('Successfully generated task data from AI.', 'success');
} catch (error) {

View File

@@ -9,7 +9,7 @@ import { generateText, streamText, generateObject } from 'ai'; // Import from ma
import { log } from '../../scripts/modules/utils.js'; // Import logging utility
// Consider making model configurable via config-manager.js later
const DEFAULT_MODEL = 'gemini-2.0-pro'; // Or a suitable default
const DEFAULT_MODEL = 'gemini-2.5-pro-exp-03-25'; // Or a suitable default
const DEFAULT_TEMPERATURE = 0.2; // Or a suitable default
/**
@@ -52,7 +52,15 @@ async function generateGoogleText({
});
// Assuming result structure provides text directly or within a property
return result.text; // Adjust based on actual SDK response
// return result.text; // Adjust based on actual SDK response
// Return both text and usage
return {
text: result.text,
usage: {
inputTokens: result.usage.promptTokens,
outputTokens: result.usage.completionTokens
}
};
} catch (error) {
log(
'error',
@@ -143,7 +151,7 @@ async function generateGoogleObject({
// const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval
const model = googleProvider(modelId); // Correct model retrieval
const { object } = await generateObject({
const result = await generateObject({
model, // Pass the model instance
schema,
messages,
@@ -154,7 +162,15 @@ async function generateGoogleObject({
// Check SDK docs if specific tool calling/JSON mode needs explicit setup.
});
return object; // Return the parsed object
// return object; // Return the parsed object
// Return both object and usage
return {
object: result.object,
usage: {
inputTokens: result.usage.promptTokens,
outputTokens: result.usage.completionTokens
}
};
} catch (error) {
log(
'error',

View File

@@ -381,7 +381,7 @@ async function callAiService(params) {
```
</info added on 2025-05-09T04:02:44.847Z>
## 13. Update google.js for Telemetry Compatibility [pending]
## 13. Update google.js for Telemetry Compatibility [done]
### Dependencies: None
### Description: Modify src/ai-providers/google.js functions to return usage data.
### Details:

View File

@@ -5039,7 +5039,7 @@
"title": "Update google.js for Telemetry Compatibility",
"description": "Modify src/ai-providers/google.js functions to return usage data.",
"details": "Update the provider functions in `src/ai-providers/google.js` to ensure they return telemetry-compatible results:\\n\\n1. **`generateGoogleText`**: Return `{ text: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts from the Vercel AI SDK result.\\n2. **`generateGoogleObject`**: Return `{ object: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts.\\n3. **`streamGoogleText`**: Return the *full stream result object* returned by the Vercel AI SDK's `streamText`, not just the `textStream` property. The full object contains usage information.\\n\\nReference `anthropic.js` for the pattern.",
"status": "pending",
"status": "done",
"dependencies": [],
"parentTaskId": 77
},