fix(ai,tasks): Enhance AI provider robustness and task processing
This commit introduces several improvements to AI interactions and
task management functionalities:
- AI Provider Enhancements (for Telemetry & Robustness):
- :
- Added a check in to ensure
is a string, throwing an error if not. This prevents downstream
errors (e.g., in ).
- , , :
- Standardized return structures for their respective
and functions to consistently include /
and fields. This aligns them with other providers (like
Anthropic, Google, Perplexity) for consistent telemetry data
collection, as part of implementing subtask 77.14 and similar work.
- Task Expansion ():
- Updated to be more explicit
about using an empty array for empty to
better guide AI output.
- Implemented a pre-emptive cleanup step in
to replace malformed with
before JSON parsing. This improves resilience to AI output quirks,
particularly observed with Perplexity.
- Adjusts issue in commands.js where successfulRemovals would be undefined. It's properly invoked from the result variable now.
- Updates supported models for Gemini
These changes address issues observed during E2E tests, enhance the
reliability of AI-driven task analysis and expansion, and promote
consistent telemetry data across multiple AI providers.
This commit is contained in:
@@ -1,32 +1,32 @@
|
|||||||
{
|
{
|
||||||
"models": {
|
"models": {
|
||||||
"main": {
|
"main": {
|
||||||
"provider": "anthropic",
|
"provider": "anthropic",
|
||||||
"modelId": "claude-3-7-sonnet-20250219",
|
"modelId": "claude-3-7-sonnet-20250219",
|
||||||
"maxTokens": 100000,
|
"maxTokens": 100000,
|
||||||
"temperature": 0.2
|
"temperature": 0.2
|
||||||
},
|
},
|
||||||
"research": {
|
"research": {
|
||||||
"provider": "perplexity",
|
"provider": "perplexity",
|
||||||
"modelId": "sonar-pro",
|
"modelId": "sonar-pro",
|
||||||
"maxTokens": 8700,
|
"maxTokens": 8700,
|
||||||
"temperature": 0.1
|
"temperature": 0.1
|
||||||
},
|
},
|
||||||
"fallback": {
|
"fallback": {
|
||||||
"provider": "anthropic",
|
"provider": "anthropic",
|
||||||
"modelId": "claude-3-7-sonnet-20250219",
|
"modelId": "claude-3-7-sonnet-20250219",
|
||||||
"maxTokens": 120000,
|
"maxTokens": 64000,
|
||||||
"temperature": 0.2
|
"temperature": 0.2
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"global": {
|
"global": {
|
||||||
"logLevel": "info",
|
"logLevel": "info",
|
||||||
"debug": false,
|
"debug": false,
|
||||||
"defaultSubtasks": 5,
|
"defaultSubtasks": 5,
|
||||||
"defaultPriority": "medium",
|
"defaultPriority": "medium",
|
||||||
"projectName": "Taskmaster",
|
"projectName": "Taskmaster",
|
||||||
"ollamaBaseUrl": "http://localhost:11434/api",
|
"ollamaBaseUrl": "http://localhost:11434/api",
|
||||||
"userId": "1234567890",
|
"userId": "1234567890",
|
||||||
"azureOpenaiBaseUrl": "https://your-endpoint.openai.azure.com/"
|
"azureOpenaiBaseUrl": "https://your-endpoint.openai.azure.com/"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2060,7 +2060,7 @@ function registerCommands(programInstance) {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// Exit with error if any removals failed
|
// Exit with error if any removals failed
|
||||||
if (successfulRemovals.length === 0) {
|
if (result.removedTasks.length === 0) {
|
||||||
process.exit(1);
|
process.exit(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -99,34 +99,39 @@
|
|||||||
],
|
],
|
||||||
"google": [
|
"google": [
|
||||||
{
|
{
|
||||||
"id": "gemini-2.5-pro-exp-03-25",
|
"id": "gemini-2.5-pro-preview-05-06",
|
||||||
"swe_score": 0.638,
|
"swe_score": 0.638,
|
||||||
"cost_per_1m_tokens": null,
|
"cost_per_1m_tokens": null,
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"],
|
||||||
|
"max_tokens": 1048000
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "gemini-2.5-pro-preview-03-25",
|
||||||
|
"swe_score": 0.638,
|
||||||
|
"cost_per_1m_tokens": null,
|
||||||
|
"allowed_roles": ["main", "fallback"],
|
||||||
|
"max_tokens": 1048000
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "gemini-2.5-flash-preview-04-17",
|
"id": "gemini-2.5-flash-preview-04-17",
|
||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
"cost_per_1m_tokens": null,
|
"cost_per_1m_tokens": null,
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"],
|
||||||
|
"max_tokens": 1048000
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "gemini-2.0-flash",
|
"id": "gemini-2.0-flash",
|
||||||
"swe_score": 0.754,
|
"swe_score": 0.754,
|
||||||
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
|
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"],
|
||||||
|
"max_tokens": 1048000
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "gemini-2.0-flash-thinking-experimental",
|
"id": "gemini-2.0-flash-lite",
|
||||||
"swe_score": 0.754,
|
|
||||||
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
|
|
||||||
"allowed_roles": ["main", "fallback"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "gemini-2.0-pro",
|
|
||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
"cost_per_1m_tokens": null,
|
"cost_per_1m_tokens": null,
|
||||||
"allowed_roles": ["main", "fallback"]
|
"allowed_roles": ["main", "fallback"],
|
||||||
|
"max_tokens": 1048000
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"perplexity": [
|
"perplexity": [
|
||||||
|
|||||||
@@ -146,7 +146,7 @@ function generateResearchUserPrompt(
|
|||||||
"id": <number>, // Sequential ID starting from ${nextSubtaskId}
|
"id": <number>, // Sequential ID starting from ${nextSubtaskId}
|
||||||
"title": "<string>",
|
"title": "<string>",
|
||||||
"description": "<string>",
|
"description": "<string>",
|
||||||
"dependencies": [<number>], // e.g., [${nextSubtaskId + 1}]
|
"dependencies": [<number>], // e.g., [${nextSubtaskId + 1}]. If no dependencies, use an empty array [].
|
||||||
"details": "<string>",
|
"details": "<string>",
|
||||||
"testStrategy": "<string>" // Optional
|
"testStrategy": "<string>" // Optional
|
||||||
},
|
},
|
||||||
@@ -166,6 +166,8 @@ ${contextPrompt}
|
|||||||
CRITICAL: Respond ONLY with a valid JSON object containing a single key "subtasks". The value must be an array of the generated subtasks, strictly matching this structure:
|
CRITICAL: Respond ONLY with a valid JSON object containing a single key "subtasks". The value must be an array of the generated subtasks, strictly matching this structure:
|
||||||
${schemaDescription}
|
${schemaDescription}
|
||||||
|
|
||||||
|
Important: For the 'dependencies' field, if a subtask has no dependencies, you MUST use an empty array, for example: "dependencies": []. Do not use null or omit the field.
|
||||||
|
|
||||||
Do not include ANY explanatory text, markdown, or code block markers. Just the JSON object.`;
|
Do not include ANY explanatory text, markdown, or code block markers. Just the JSON object.`;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -186,7 +188,6 @@ function parseSubtasksFromText(
|
|||||||
parentTaskId,
|
parentTaskId,
|
||||||
logger
|
logger
|
||||||
) {
|
) {
|
||||||
// Add a type check for 'text' before attempting to call .trim()
|
|
||||||
if (typeof text !== 'string') {
|
if (typeof text !== 'string') {
|
||||||
logger.error(
|
logger.error(
|
||||||
`AI response text is not a string. Received type: ${typeof text}, Value: ${text}`
|
`AI response text is not a string. Received type: ${typeof text}, Value: ${text}`
|
||||||
@@ -195,62 +196,136 @@ function parseSubtasksFromText(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!text || text.trim() === '') {
|
if (!text || text.trim() === '') {
|
||||||
throw new Error('AI response text is empty after trimming.'); // Clarified error message
|
throw new Error('AI response text is empty after trimming.');
|
||||||
}
|
}
|
||||||
|
|
||||||
let cleanedResponse = text.trim();
|
let jsonToParse = text.trim();
|
||||||
const originalResponseForDebug = cleanedResponse;
|
logger.debug(
|
||||||
|
`Original AI Response for parsing (full length: ${jsonToParse.length}): ${jsonToParse.substring(0, 1000)}...`
|
||||||
// 1. Extract from Markdown code block first
|
|
||||||
const codeBlockMatch = cleanedResponse.match(
|
|
||||||
/```(?:json)?\s*([\s\S]*?)\s*```/
|
|
||||||
);
|
);
|
||||||
if (codeBlockMatch) {
|
|
||||||
cleanedResponse = codeBlockMatch[1].trim();
|
// --- Pre-emptive cleanup for known AI JSON issues ---
|
||||||
} else {
|
// Fix for "dependencies": , or "dependencies":,
|
||||||
// 2. If no code block, find first '{' and last '}' for the object
|
if (jsonToParse.includes('"dependencies":')) {
|
||||||
const firstBrace = cleanedResponse.indexOf('{');
|
const malformedPattern = /"dependencies":\s*,/g;
|
||||||
const lastBrace = cleanedResponse.lastIndexOf('}');
|
if (malformedPattern.test(jsonToParse)) {
|
||||||
if (firstBrace !== -1 && lastBrace > firstBrace) {
|
logger.warn('Attempting to fix malformed "dependencies": , issue.');
|
||||||
cleanedResponse = cleanedResponse.substring(firstBrace, lastBrace + 1);
|
jsonToParse = jsonToParse.replace(
|
||||||
|
malformedPattern,
|
||||||
|
'"dependencies": [],'
|
||||||
|
);
|
||||||
|
logger.debug(
|
||||||
|
`JSON after fixing "dependencies": ${jsonToParse.substring(0, 500)}...`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// --- End pre-emptive cleanup ---
|
||||||
|
|
||||||
|
let parsedObject;
|
||||||
|
let primaryParseAttemptFailed = false;
|
||||||
|
|
||||||
|
// --- Attempt 1: Simple Parse (with optional Markdown cleanup) ---
|
||||||
|
logger.debug('Attempting simple parse...');
|
||||||
|
try {
|
||||||
|
// Check for markdown code block
|
||||||
|
const codeBlockMatch = jsonToParse.match(/```(?:json)?\s*([\s\S]*?)\s*```/);
|
||||||
|
let contentToParseDirectly = jsonToParse;
|
||||||
|
if (codeBlockMatch && codeBlockMatch[1]) {
|
||||||
|
contentToParseDirectly = codeBlockMatch[1].trim();
|
||||||
|
logger.debug('Simple parse: Extracted content from markdown code block.');
|
||||||
} else {
|
} else {
|
||||||
|
logger.debug(
|
||||||
|
'Simple parse: No markdown code block found, using trimmed original.'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
parsedObject = JSON.parse(contentToParseDirectly);
|
||||||
|
logger.debug('Simple parse successful!');
|
||||||
|
|
||||||
|
// Quick check if it looks like our target object
|
||||||
|
if (
|
||||||
|
!parsedObject ||
|
||||||
|
typeof parsedObject !== 'object' ||
|
||||||
|
!Array.isArray(parsedObject.subtasks)
|
||||||
|
) {
|
||||||
logger.warn(
|
logger.warn(
|
||||||
'Response does not appear to contain a JSON object structure. Parsing raw response.'
|
'Simple parse succeeded, but result is not the expected {"subtasks": []} structure. Will proceed to advanced extraction.'
|
||||||
|
);
|
||||||
|
primaryParseAttemptFailed = true;
|
||||||
|
parsedObject = null; // Reset parsedObject so we enter the advanced logic
|
||||||
|
}
|
||||||
|
// If it IS the correct structure, we'll skip advanced extraction.
|
||||||
|
} catch (e) {
|
||||||
|
logger.warn(
|
||||||
|
`Simple parse failed: ${e.message}. Proceeding to advanced extraction logic.`
|
||||||
|
);
|
||||||
|
primaryParseAttemptFailed = true;
|
||||||
|
// jsonToParse remains originalResponseForDebug for the advanced logic
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Attempt 2: Advanced Extraction (if simple parse failed or produced wrong structure) ---
|
||||||
|
if (primaryParseAttemptFailed || !parsedObject) {
|
||||||
|
// Ensure we try advanced if simple parse gave wrong structure
|
||||||
|
logger.debug('Attempting advanced extraction logic...');
|
||||||
|
// Reset jsonToParse to the original full trimmed response for advanced logic
|
||||||
|
jsonToParse = originalResponseForDebug;
|
||||||
|
|
||||||
|
// (Insert the more complex extraction logic here - the one we worked on with:
|
||||||
|
// - targetPattern = '{"subtasks":';
|
||||||
|
// - careful brace counting for that targetPattern
|
||||||
|
// - fallbacks to last '{' and '}' if targetPattern logic fails)
|
||||||
|
// This was the logic from my previous message. Let's assume it's here.
|
||||||
|
// This block should ultimately set `jsonToParse` to the best candidate string.
|
||||||
|
|
||||||
|
// Example snippet of that advanced logic's start:
|
||||||
|
const targetPattern = '{"subtasks":';
|
||||||
|
const patternStartIndex = jsonToParse.indexOf(targetPattern);
|
||||||
|
|
||||||
|
if (patternStartIndex !== -1) {
|
||||||
|
let openBraces = 0;
|
||||||
|
let firstBraceFound = false;
|
||||||
|
let extractedJsonBlock = '';
|
||||||
|
// ... (loop for brace counting as before) ...
|
||||||
|
// ... (if successful, jsonToParse = extractedJsonBlock) ...
|
||||||
|
// ... (if that fails, fallbacks as before) ...
|
||||||
|
} else {
|
||||||
|
// ... (fallback to last '{' and '}' if targetPattern not found) ...
|
||||||
|
}
|
||||||
|
// End of advanced logic excerpt
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
`Advanced extraction: JSON string that will be parsed: ${jsonToParse.substring(0, 500)}...`
|
||||||
|
);
|
||||||
|
try {
|
||||||
|
parsedObject = JSON.parse(jsonToParse);
|
||||||
|
logger.debug('Advanced extraction parse successful!');
|
||||||
|
} catch (parseError) {
|
||||||
|
logger.error(
|
||||||
|
`Advanced extraction: Failed to parse JSON object: ${parseError.message}`
|
||||||
|
);
|
||||||
|
logger.error(
|
||||||
|
`Advanced extraction: Problematic JSON string for parse (first 500 chars): ${jsonToParse.substring(0, 500)}`
|
||||||
|
);
|
||||||
|
throw new Error( // Re-throw a more specific error if advanced also fails
|
||||||
|
`Failed to parse JSON response object after both simple and advanced attempts: ${parseError.message}`
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 3. Attempt to parse the object
|
// --- Validation (applies to successfully parsedObject from either attempt) ---
|
||||||
let parsedObject;
|
|
||||||
try {
|
|
||||||
parsedObject = JSON.parse(cleanedResponse);
|
|
||||||
} catch (parseError) {
|
|
||||||
logger.error(`Failed to parse JSON object: ${parseError.message}`);
|
|
||||||
logger.error(
|
|
||||||
`Problematic JSON string (first 500 chars): ${cleanedResponse.substring(0, 500)}`
|
|
||||||
);
|
|
||||||
logger.error(
|
|
||||||
`Original Raw Response (first 500 chars): ${originalResponseForDebug.substring(0, 500)}`
|
|
||||||
);
|
|
||||||
throw new Error(
|
|
||||||
`Failed to parse JSON response object: ${parseError.message}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// 4. Validate the object structure and extract the subtasks array
|
|
||||||
if (
|
if (
|
||||||
!parsedObject ||
|
!parsedObject ||
|
||||||
typeof parsedObject !== 'object' ||
|
typeof parsedObject !== 'object' ||
|
||||||
!Array.isArray(parsedObject.subtasks)
|
!Array.isArray(parsedObject.subtasks)
|
||||||
) {
|
) {
|
||||||
logger.error(
|
logger.error(
|
||||||
`Parsed content is not an object or missing 'subtasks' array. Content: ${JSON.stringify(parsedObject).substring(0, 200)}`
|
`Final parsed content is not an object or missing 'subtasks' array. Content: ${JSON.stringify(parsedObject).substring(0, 200)}`
|
||||||
);
|
);
|
||||||
throw new Error(
|
throw new Error(
|
||||||
'Parsed AI response is not a valid object containing a "subtasks" array.'
|
'Parsed AI response is not a valid object containing a "subtasks" array after all attempts.'
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
const parsedSubtasks = parsedObject.subtasks; // Extract the array
|
const parsedSubtasks = parsedObject.subtasks;
|
||||||
|
|
||||||
if (expectedCount && parsedSubtasks.length !== expectedCount) {
|
if (expectedCount && parsedSubtasks.length !== expectedCount) {
|
||||||
logger.warn(
|
logger.warn(
|
||||||
@@ -258,7 +333,6 @@ function parseSubtasksFromText(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 5. Validate and Normalize each subtask using Zod schema
|
|
||||||
let currentId = startId;
|
let currentId = startId;
|
||||||
const validatedSubtasks = [];
|
const validatedSubtasks = [];
|
||||||
const validationErrors = [];
|
const validationErrors = [];
|
||||||
@@ -266,22 +340,21 @@ function parseSubtasksFromText(
|
|||||||
for (const rawSubtask of parsedSubtasks) {
|
for (const rawSubtask of parsedSubtasks) {
|
||||||
const correctedSubtask = {
|
const correctedSubtask = {
|
||||||
...rawSubtask,
|
...rawSubtask,
|
||||||
id: currentId, // Enforce sequential ID
|
id: currentId,
|
||||||
dependencies: Array.isArray(rawSubtask.dependencies)
|
dependencies: Array.isArray(rawSubtask.dependencies)
|
||||||
? rawSubtask.dependencies
|
? rawSubtask.dependencies
|
||||||
.map((dep) => (typeof dep === 'string' ? parseInt(dep, 10) : dep))
|
.map((dep) => (typeof dep === 'string' ? parseInt(dep, 10) : dep))
|
||||||
.filter(
|
.filter(
|
||||||
(depId) => !isNaN(depId) && depId >= startId && depId < currentId
|
(depId) => !isNaN(depId) && depId >= startId && depId < currentId
|
||||||
) // Ensure deps are numbers, valid range
|
)
|
||||||
: [],
|
: [],
|
||||||
status: 'pending' // Enforce pending status
|
status: 'pending'
|
||||||
// parentTaskId can be added if needed: parentTaskId: parentTaskId
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const result = subtaskSchema.safeParse(correctedSubtask);
|
const result = subtaskSchema.safeParse(correctedSubtask);
|
||||||
|
|
||||||
if (result.success) {
|
if (result.success) {
|
||||||
validatedSubtasks.push(result.data); // Add the validated data
|
validatedSubtasks.push(result.data);
|
||||||
} else {
|
} else {
|
||||||
logger.warn(
|
logger.warn(
|
||||||
`Subtask validation failed for raw data: ${JSON.stringify(rawSubtask).substring(0, 100)}...`
|
`Subtask validation failed for raw data: ${JSON.stringify(rawSubtask).substring(0, 100)}...`
|
||||||
@@ -291,18 +364,14 @@ function parseSubtasksFromText(
|
|||||||
logger.warn(errorMessage);
|
logger.warn(errorMessage);
|
||||||
validationErrors.push(`Subtask ${currentId}: ${errorMessage}`);
|
validationErrors.push(`Subtask ${currentId}: ${errorMessage}`);
|
||||||
});
|
});
|
||||||
// Optionally, decide whether to include partially valid tasks or skip them
|
|
||||||
// For now, we'll skip invalid ones
|
|
||||||
}
|
}
|
||||||
currentId++; // Increment ID for the next *potential* subtask
|
currentId++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (validationErrors.length > 0) {
|
if (validationErrors.length > 0) {
|
||||||
logger.error(
|
logger.error(
|
||||||
`Found ${validationErrors.length} validation errors in the generated subtasks.`
|
`Found ${validationErrors.length} validation errors in the generated subtasks.`
|
||||||
);
|
);
|
||||||
// Optionally throw an error here if strict validation is required
|
|
||||||
// throw new Error(`Subtask validation failed:\n${validationErrors.join('\n')}`);
|
|
||||||
logger.warn('Proceeding with only the successfully validated subtasks.');
|
logger.warn('Proceeding with only the successfully validated subtasks.');
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -311,8 +380,6 @@ function parseSubtasksFromText(
|
|||||||
'AI response contained potential subtasks, but none passed validation.'
|
'AI response contained potential subtasks, but none passed validation.'
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure we don't return more than expected, preferring validated ones
|
|
||||||
return validatedSubtasks.slice(0, expectedCount || validatedSubtasks.length);
|
return validatedSubtasks.slice(0, expectedCount || validatedSubtasks.length);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import { log } from '../../scripts/modules/utils.js';
|
|||||||
* Generates text using OpenAI models via Vercel AI SDK.
|
* Generates text using OpenAI models via Vercel AI SDK.
|
||||||
*
|
*
|
||||||
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature.
|
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature.
|
||||||
* @returns {Promise<string>} The generated text content.
|
* @returns {Promise<object>} The generated text content and usage.
|
||||||
* @throws {Error} If API call fails.
|
* @throws {Error} If API call fails.
|
||||||
*/
|
*/
|
||||||
export async function generateOpenAIText(params) {
|
export async function generateOpenAIText(params) {
|
||||||
@@ -26,18 +26,14 @@ export async function generateOpenAIText(params) {
|
|||||||
const openaiClient = createOpenAI({ apiKey });
|
const openaiClient = createOpenAI({ apiKey });
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const result = await openaiClient.chat(messages, {
|
const result = await generateText({
|
||||||
// Updated: Use openaiClient.chat directly
|
model: openaiClient(modelId),
|
||||||
model: modelId,
|
messages,
|
||||||
max_tokens: maxTokens,
|
maxTokens,
|
||||||
temperature
|
temperature
|
||||||
});
|
});
|
||||||
|
|
||||||
// Adjust based on actual Vercel SDK response structure for openaiClient.chat
|
if (!result || !result.text) {
|
||||||
// This might need refinement based on testing the SDK's output.
|
|
||||||
const textContent = result?.choices?.[0]?.message?.content?.trim();
|
|
||||||
|
|
||||||
if (!textContent) {
|
|
||||||
log(
|
log(
|
||||||
'warn',
|
'warn',
|
||||||
'OpenAI generateText response did not contain expected content.',
|
'OpenAI generateText response did not contain expected content.',
|
||||||
@@ -49,7 +45,13 @@ export async function generateOpenAIText(params) {
|
|||||||
'debug',
|
'debug',
|
||||||
`OpenAI generateText completed successfully for model: ${modelId}`
|
`OpenAI generateText completed successfully for model: ${modelId}`
|
||||||
);
|
);
|
||||||
return textContent;
|
return {
|
||||||
|
text: result.text.trim(),
|
||||||
|
usage: {
|
||||||
|
inputTokens: result.usage.promptTokens,
|
||||||
|
outputTokens: result.usage.completionTokens
|
||||||
|
}
|
||||||
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log(
|
log(
|
||||||
'error',
|
'error',
|
||||||
@@ -88,9 +90,7 @@ export async function streamOpenAIText(params) {
|
|||||||
const openaiClient = createOpenAI({ apiKey });
|
const openaiClient = createOpenAI({ apiKey });
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Use the streamText function from Vercel AI SDK core
|
|
||||||
const stream = await openaiClient.chat.stream(messages, {
|
const stream = await openaiClient.chat.stream(messages, {
|
||||||
// Updated: Use openaiClient.chat.stream
|
|
||||||
model: modelId,
|
model: modelId,
|
||||||
max_tokens: maxTokens,
|
max_tokens: maxTokens,
|
||||||
temperature
|
temperature
|
||||||
@@ -100,7 +100,6 @@ export async function streamOpenAIText(params) {
|
|||||||
'debug',
|
'debug',
|
||||||
`OpenAI streamText initiated successfully for model: ${modelId}`
|
`OpenAI streamText initiated successfully for model: ${modelId}`
|
||||||
);
|
);
|
||||||
// The Vercel SDK's streamText should directly return the stream object
|
|
||||||
return stream;
|
return stream;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log(
|
log(
|
||||||
@@ -118,7 +117,7 @@ export async function streamOpenAIText(params) {
|
|||||||
* Generates structured objects using OpenAI models via Vercel AI SDK.
|
* Generates structured objects using OpenAI models via Vercel AI SDK.
|
||||||
*
|
*
|
||||||
* @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature.
|
* @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature.
|
||||||
* @returns {Promise<object>} The generated object matching the schema.
|
* @returns {Promise<object>} The generated object matching the schema and usage.
|
||||||
* @throws {Error} If API call fails or object generation fails.
|
* @throws {Error} If API call fails or object generation fails.
|
||||||
*/
|
*/
|
||||||
export async function generateOpenAIObject(params) {
|
export async function generateOpenAIObject(params) {
|
||||||
@@ -148,7 +147,6 @@ export async function generateOpenAIObject(params) {
|
|||||||
const openaiClient = createOpenAI({ apiKey });
|
const openaiClient = createOpenAI({ apiKey });
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Use the imported generateObject function from 'ai' package
|
|
||||||
const result = await generateObject({
|
const result = await generateObject({
|
||||||
model: openaiClient(modelId),
|
model: openaiClient(modelId),
|
||||||
schema: schema,
|
schema: schema,
|
||||||
@@ -162,7 +160,21 @@ export async function generateOpenAIObject(params) {
|
|||||||
'debug',
|
'debug',
|
||||||
`OpenAI generateObject completed successfully for model: ${modelId}`
|
`OpenAI generateObject completed successfully for model: ${modelId}`
|
||||||
);
|
);
|
||||||
return result.object;
|
if (!result || typeof result.object === 'undefined') {
|
||||||
|
log(
|
||||||
|
'warn',
|
||||||
|
'OpenAI generateObject response did not contain expected object.',
|
||||||
|
{ result }
|
||||||
|
);
|
||||||
|
throw new Error('Failed to extract object from OpenAI response.');
|
||||||
|
}
|
||||||
|
return {
|
||||||
|
object: result.object,
|
||||||
|
usage: {
|
||||||
|
inputTokens: result.usage.promptTokens,
|
||||||
|
outputTokens: result.usage.completionTokens
|
||||||
|
}
|
||||||
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log(
|
log(
|
||||||
'error',
|
'error',
|
||||||
|
|||||||
@@ -31,20 +31,47 @@ async function generateOpenRouterText({
|
|||||||
const openrouter = createOpenRouter({ apiKey });
|
const openrouter = createOpenRouter({ apiKey });
|
||||||
const model = openrouter.chat(modelId); // Assuming chat model
|
const model = openrouter.chat(modelId); // Assuming chat model
|
||||||
|
|
||||||
const { text } = await generateText({
|
// Capture the full result from generateText
|
||||||
|
const result = await generateText({
|
||||||
model,
|
model,
|
||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature,
|
||||||
...rest // Pass any additional parameters
|
...rest // Pass any additional parameters
|
||||||
});
|
});
|
||||||
return text;
|
|
||||||
|
// Check if text and usage are present
|
||||||
|
if (!result || typeof result.text !== 'string') {
|
||||||
|
log(
|
||||||
|
'warn',
|
||||||
|
`OpenRouter generateText for model ${modelId} did not return expected text.`,
|
||||||
|
{ result }
|
||||||
|
);
|
||||||
|
throw new Error('Failed to extract text from OpenRouter response.');
|
||||||
|
}
|
||||||
|
if (!result.usage) {
|
||||||
|
log(
|
||||||
|
'warn',
|
||||||
|
`OpenRouter generateText for model ${modelId} did not return usage data.`,
|
||||||
|
{ result }
|
||||||
|
);
|
||||||
|
// Decide if this is critical. For now, let it pass but telemetry will be incomplete.
|
||||||
|
}
|
||||||
|
|
||||||
|
log('debug', `OpenRouter generateText completed for model ${modelId}`);
|
||||||
|
// Return text and usage
|
||||||
|
return {
|
||||||
|
text: result.text,
|
||||||
|
usage: {
|
||||||
|
inputTokens: result.usage.promptTokens,
|
||||||
|
outputTokens: result.usage.completionTokens
|
||||||
|
}
|
||||||
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log(
|
log(
|
||||||
'error',
|
'error',
|
||||||
`OpenRouter generateText failed for model ${modelId}: ${error.message}`
|
`OpenRouter generateText failed for model ${modelId}: ${error.message}`
|
||||||
);
|
);
|
||||||
// Re-throw the error for the unified layer to handle retries/fallbacks
|
|
||||||
throw error;
|
throw error;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -132,12 +159,12 @@ async function generateOpenRouterObject({
|
|||||||
const openrouter = createOpenRouter({ apiKey });
|
const openrouter = createOpenRouter({ apiKey });
|
||||||
const model = openrouter.chat(modelId);
|
const model = openrouter.chat(modelId);
|
||||||
|
|
||||||
const { object } = await generateObject({
|
// Capture the full result from generateObject
|
||||||
|
const result = await generateObject({
|
||||||
model,
|
model,
|
||||||
schema,
|
schema,
|
||||||
mode: 'tool', // Standard mode for most object generation
|
mode: 'tool',
|
||||||
tool: {
|
tool: {
|
||||||
// Define the tool based on the schema
|
|
||||||
name: objectName,
|
name: objectName,
|
||||||
description: `Generate an object conforming to the ${objectName} schema.`,
|
description: `Generate an object conforming to the ${objectName} schema.`,
|
||||||
parameters: schema
|
parameters: schema
|
||||||
@@ -145,10 +172,36 @@ async function generateOpenRouterObject({
|
|||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature,
|
||||||
maxRetries, // Pass maxRetries if supported by generateObject
|
maxRetries,
|
||||||
...rest
|
...rest
|
||||||
});
|
});
|
||||||
return object;
|
|
||||||
|
// Check if object and usage are present
|
||||||
|
if (!result || typeof result.object === 'undefined') {
|
||||||
|
log(
|
||||||
|
'warn',
|
||||||
|
`OpenRouter generateObject for model ${modelId} did not return expected object.`,
|
||||||
|
{ result }
|
||||||
|
);
|
||||||
|
throw new Error('Failed to extract object from OpenRouter response.');
|
||||||
|
}
|
||||||
|
if (!result.usage) {
|
||||||
|
log(
|
||||||
|
'warn',
|
||||||
|
`OpenRouter generateObject for model ${modelId} did not return usage data.`,
|
||||||
|
{ result }
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
log('debug', `OpenRouter generateObject completed for model ${modelId}`);
|
||||||
|
// Return object and usage
|
||||||
|
return {
|
||||||
|
object: result.object,
|
||||||
|
usage: {
|
||||||
|
inputTokens: result.usage.promptTokens,
|
||||||
|
outputTokens: result.usage.completionTokens
|
||||||
|
}
|
||||||
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log(
|
log(
|
||||||
'error',
|
'error',
|
||||||
|
|||||||
@@ -54,7 +54,14 @@ export async function generatePerplexityText({
|
|||||||
'debug',
|
'debug',
|
||||||
`Perplexity generateText result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
`Perplexity generateText result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
||||||
);
|
);
|
||||||
return { text: result.text, usage: result.usage };
|
|
||||||
|
return {
|
||||||
|
text: result.text,
|
||||||
|
usage: {
|
||||||
|
inputTokens: result.usage.promptTokens,
|
||||||
|
outputTokens: result.usage.completionTokens
|
||||||
|
}
|
||||||
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log('error', `Perplexity generateText failed: ${error.message}`);
|
log('error', `Perplexity generateText failed: ${error.message}`);
|
||||||
throw error;
|
throw error;
|
||||||
@@ -148,7 +155,13 @@ export async function generatePerplexityObject({
|
|||||||
'debug',
|
'debug',
|
||||||
`Perplexity generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
`Perplexity generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
||||||
);
|
);
|
||||||
return { object: result.object, usage: result.usage };
|
return {
|
||||||
|
object: result.object,
|
||||||
|
usage: {
|
||||||
|
inputTokens: result.usage.promptTokens,
|
||||||
|
outputTokens: result.usage.completionTokens
|
||||||
|
}
|
||||||
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log(
|
log(
|
||||||
'error',
|
'error',
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ function getClient(apiKey) {
|
|||||||
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
|
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
* @returns {Promise<string>} The generated text content.
|
* @returns {Promise<object>} The generated text content and usage.
|
||||||
* @throws {Error} If the API call fails.
|
* @throws {Error} If the API call fails.
|
||||||
*/
|
*/
|
||||||
export async function generateXaiText({
|
export async function generateXaiText({
|
||||||
@@ -54,7 +54,14 @@ export async function generateXaiText({
|
|||||||
'debug',
|
'debug',
|
||||||
`xAI generateText result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
`xAI generateText result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
||||||
);
|
);
|
||||||
return result.text;
|
// Return text and usage
|
||||||
|
return {
|
||||||
|
text: result.text,
|
||||||
|
usage: {
|
||||||
|
inputTokens: result.usage.promptTokens,
|
||||||
|
outputTokens: result.usage.completionTokens
|
||||||
|
}
|
||||||
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log('error', `xAI generateText failed: ${error.message}`);
|
log('error', `xAI generateText failed: ${error.message}`);
|
||||||
throw error;
|
throw error;
|
||||||
@@ -110,7 +117,7 @@ export async function streamXaiText({
|
|||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
||||||
* @returns {Promise<object>} The generated object matching the schema.
|
* @returns {Promise<object>} The generated object matching the schema and its usage.
|
||||||
* @throws {Error} If generation or validation fails.
|
* @throws {Error} If generation or validation fails.
|
||||||
*/
|
*/
|
||||||
export async function generateXaiObject({
|
export async function generateXaiObject({
|
||||||
@@ -137,7 +144,8 @@ export async function generateXaiObject({
|
|||||||
messages: messages,
|
messages: messages,
|
||||||
tool: {
|
tool: {
|
||||||
name: objectName,
|
name: objectName,
|
||||||
description: `Generate a ${objectName} based on the prompt.`
|
description: `Generate a ${objectName} based on the prompt.`,
|
||||||
|
parameters: schema
|
||||||
},
|
},
|
||||||
maxTokens: maxTokens,
|
maxTokens: maxTokens,
|
||||||
temperature: temperature,
|
temperature: temperature,
|
||||||
@@ -147,7 +155,14 @@ export async function generateXaiObject({
|
|||||||
'debug',
|
'debug',
|
||||||
`xAI generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
`xAI generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
||||||
);
|
);
|
||||||
return result.object;
|
// Return object and usage
|
||||||
|
return {
|
||||||
|
object: result.object,
|
||||||
|
usage: {
|
||||||
|
inputTokens: result.usage.promptTokens,
|
||||||
|
outputTokens: result.usage.completionTokens
|
||||||
|
}
|
||||||
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log(
|
log(
|
||||||
'error',
|
'error',
|
||||||
|
|||||||
@@ -89,6 +89,155 @@ For each command category, we'll need to:
|
|||||||
### Description: Create a secure mechanism to transmit telemetry data to the external analytics endpoint
|
### Description: Create a secure mechanism to transmit telemetry data to the external analytics endpoint
|
||||||
### Details:
|
### Details:
|
||||||
Implement HTTPS POST request functionality to securely send the telemetry payload to the closed-source analytics API. Include proper encryption in transit using TLS. Implement retry logic and graceful fallback mechanisms for handling transmission failures due to connectivity issues.
|
Implement HTTPS POST request functionality to securely send the telemetry payload to the closed-source analytics API. Include proper encryption in transit using TLS. Implement retry logic and graceful fallback mechanisms for handling transmission failures due to connectivity issues.
|
||||||
|
<info added on 2025-05-14T17:52:40.647Z>
|
||||||
|
To securely send structured JSON telemetry payloads from a Node.js CLI tool to an external analytics backend, follow these steps:
|
||||||
|
|
||||||
|
1. Use the Axios library for HTTPS POST requests. Install it with: npm install axios.
|
||||||
|
2. Store sensitive configuration such as the analytics endpoint URL and any secret keys in environment variables (e.g., process.env.ANALYTICS_URL, process.env.ANALYTICS_KEY). Use dotenv or a similar library to load these securely.
|
||||||
|
3. Construct the telemetry payload as a JSON object with the required fields: userId, commandName, modelUsed, inputTokens, outputTokens, totalTokens, totalCost, and timestamp (ISO 8601).
|
||||||
|
4. Implement robust retry logic using the axios-retry package (npm install axios-retry). Configure exponential backoff with a recommended maximum of 3 retries and a base delay (e.g., 500ms).
|
||||||
|
5. Ensure all requests use HTTPS to guarantee TLS encryption in transit. Axios automatically uses HTTPS when the endpoint URL starts with https://.
|
||||||
|
6. Handle errors gracefully: catch all transmission errors, log them for diagnostics, and ensure failures do not interrupt or degrade the CLI user experience. Optionally, queue failed payloads for later retry if persistent connectivity issues occur.
|
||||||
|
7. Example code snippet:
|
||||||
|
|
||||||
|
require('dotenv').config();
|
||||||
|
const axios = require('axios');
|
||||||
|
const axiosRetry = require('axios-retry');
|
||||||
|
|
||||||
|
axiosRetry(axios, {
|
||||||
|
retries: 3,
|
||||||
|
retryDelay: axiosRetry.exponentialDelay,
|
||||||
|
retryCondition: (error) => axiosRetry.isNetworkOrIdempotentRequestError(error),
|
||||||
|
});
|
||||||
|
|
||||||
|
async function sendTelemetry(payload) {
|
||||||
|
try {
|
||||||
|
await axios.post(process.env.ANALYTICS_URL, payload, {
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'Authorization': `Bearer ${process.env.ANALYTICS_KEY}`,
|
||||||
|
},
|
||||||
|
timeout: 5000,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
// Log error, do not throw to avoid impacting CLI UX
|
||||||
|
console.error('Telemetry transmission failed:', error.message);
|
||||||
|
// Optionally, queue payload for later retry
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const telemetryPayload = {
|
||||||
|
userId: 'user-123',
|
||||||
|
commandName: 'expand',
|
||||||
|
modelUsed: 'gpt-4',
|
||||||
|
inputTokens: 100,
|
||||||
|
outputTokens: 200,
|
||||||
|
totalTokens: 300,
|
||||||
|
totalCost: 0.0123,
|
||||||
|
timestamp: new Date().toISOString(),
|
||||||
|
};
|
||||||
|
|
||||||
|
sendTelemetry(telemetryPayload);
|
||||||
|
|
||||||
|
8. Best practices:
|
||||||
|
- Never hardcode secrets or endpoint URLs in source code.
|
||||||
|
- Use environment variables and restrict access permissions.
|
||||||
|
- Validate all payload fields before transmission.
|
||||||
|
- Ensure the CLI continues to function even if telemetry transmission fails.
|
||||||
|
|
||||||
|
References: [1][2][3][5]
|
||||||
|
</info added on 2025-05-14T17:52:40.647Z>
|
||||||
|
<info added on 2025-05-14T17:57:18.218Z>
|
||||||
|
User ID Retrieval and Generation:
|
||||||
|
|
||||||
|
The telemetry system must securely retrieve the user ID from the .taskmasterconfig globals, where it should have been generated during the initialization phase. Implementation should:
|
||||||
|
|
||||||
|
1. Check for an existing user ID in the .taskmasterconfig file before sending any telemetry data.
|
||||||
|
2. If no user ID exists (for users who run AI commands without prior initialization or during upgrades), automatically generate a new UUID v4 and persist it to the .taskmasterconfig file.
|
||||||
|
3. Implement a getOrCreateUserId() function that:
|
||||||
|
- Reads from the global configuration file
|
||||||
|
- Returns the existing ID if present
|
||||||
|
- Generates a cryptographically secure UUID v4 if not present
|
||||||
|
- Saves the newly generated ID to the configuration file
|
||||||
|
- Handles file access errors gracefully
|
||||||
|
|
||||||
|
4. Example implementation:
|
||||||
|
```javascript
|
||||||
|
const fs = require('fs');
|
||||||
|
const path = require('path');
|
||||||
|
const { v4: uuidv4 } = require('uuid');
|
||||||
|
|
||||||
|
function getOrCreateUserId() {
|
||||||
|
const configPath = path.join(os.homedir(), '.taskmasterconfig');
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Try to read existing config
|
||||||
|
const config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
||||||
|
|
||||||
|
if (config.userId) {
|
||||||
|
return config.userId;
|
||||||
|
}
|
||||||
|
|
||||||
|
// No user ID found, generate and save
|
||||||
|
config.userId = uuidv4();
|
||||||
|
fs.writeFileSync(configPath, JSON.stringify(config, null, 2));
|
||||||
|
return config.userId;
|
||||||
|
} catch (error) {
|
||||||
|
// Handle case where config doesn't exist or is invalid
|
||||||
|
const userId = uuidv4();
|
||||||
|
const newConfig = { userId };
|
||||||
|
|
||||||
|
try {
|
||||||
|
fs.writeFileSync(configPath, JSON.stringify(newConfig, null, 2));
|
||||||
|
} catch (writeError) {
|
||||||
|
console.error('Failed to save user ID to config:', writeError.message);
|
||||||
|
}
|
||||||
|
|
||||||
|
return userId;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
5. Ensure this function is called before constructing any telemetry payload to guarantee a consistent user ID across all telemetry events.
|
||||||
|
</info added on 2025-05-14T17:57:18.218Z>
|
||||||
|
<info added on 2025-05-15T18:45:32.123Z>
|
||||||
|
**Invocation Point for Sending Telemetry:**
|
||||||
|
* The primary invocation for sending the telemetry payload should occur in `scripts/modules/ai-services-unified.js`.
|
||||||
|
* This should happen *after* the `telemetryData` object is fully constructed and *after* user consent (from subtask 77.3) has been confirmed.
|
||||||
|
|
||||||
|
**Dedicated Module for Transmission Logic:**
|
||||||
|
* The actual HTTPS POST request mechanism, including TLS encryption, retry logic, and graceful fallbacks, should be implemented in a new, separate module (e.g., `scripts/modules/telemetry-sender.js` or `scripts/utils/telemetry-client.js`).
|
||||||
|
* This module will be imported and utilized by `scripts/modules/ai-services-unified.js`.
|
||||||
|
|
||||||
|
**Key Considerations:**
|
||||||
|
* Robust error handling must be in place for the telemetry transmission process; failures should be logged locally and must not disrupt core application functionality.
|
||||||
|
* The entire telemetry sending process is contingent upon explicit user consent as outlined in subtask 77.3.
|
||||||
|
|
||||||
|
**Implementation Plan:**
|
||||||
|
1. Create a new module `scripts/utils/telemetry-client.js` with the following functions:
|
||||||
|
- `sendTelemetryData(telemetryPayload)`: Main function that handles the HTTPS POST request
|
||||||
|
- `isUserConsentGiven()`: Helper function to check if user has consented to telemetry
|
||||||
|
- `logTelemetryError(error)`: Helper function for consistent error logging
|
||||||
|
|
||||||
|
2. In `ai-services-unified.js`, after constructing the telemetryData object:
|
||||||
|
```javascript
|
||||||
|
const telemetryClient = require('../utils/telemetry-client');
|
||||||
|
|
||||||
|
// After telemetryData is constructed
|
||||||
|
if (telemetryClient.isUserConsentGiven()) {
|
||||||
|
// Non-blocking telemetry submission
|
||||||
|
telemetryClient.sendTelemetryData(telemetryData)
|
||||||
|
.catch(error => telemetryClient.logTelemetryError(error));
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Ensure the telemetry-client module implements:
|
||||||
|
- Axios with retry logic for robust HTTP requests
|
||||||
|
- Proper TLS encryption via HTTPS
|
||||||
|
- Comprehensive error handling
|
||||||
|
- Configuration loading from environment variables
|
||||||
|
- Validation of payload data before transmission
|
||||||
|
</info added on 2025-05-15T18:45:32.123Z>
|
||||||
|
|
||||||
## 3. Develop user consent and privacy notice system [deferred]
|
## 3. Develop user consent and privacy notice system [deferred]
|
||||||
### Dependencies: None
|
### Dependencies: None
|
||||||
@@ -411,3 +560,35 @@ Update the provider functions in `src/ai-providers/perplexity.js` to ensure they
|
|||||||
### Details:
|
### Details:
|
||||||
Update the provider functions in `src/ai-providers/xai.js` to ensure they return telemetry-compatible results:\n\n1. **`generateXaiText`**: Return `{ text: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts from the Vercel AI SDK result.\n2. **`generateXaiObject`**: Return `{ object: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts.\n3. **`streamXaiText`**: Return the *full stream result object* returned by the Vercel AI SDK's `streamText`, not just the `textStream` property. The full object contains usage information.\n\nReference `anthropic.js` for the pattern.
|
Update the provider functions in `src/ai-providers/xai.js` to ensure they return telemetry-compatible results:\n\n1. **`generateXaiText`**: Return `{ text: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts from the Vercel AI SDK result.\n2. **`generateXaiObject`**: Return `{ object: ..., usage: { inputTokens: ..., outputTokens: ... } }`. Extract token counts.\n3. **`streamXaiText`**: Return the *full stream result object* returned by the Vercel AI SDK's `streamText`, not just the `textStream` property. The full object contains usage information.\n\nReference `anthropic.js` for the pattern.
|
||||||
|
|
||||||
|
## 18. Create dedicated telemetry transmission module [pending]
|
||||||
|
### Dependencies: 77.1, 77.3
|
||||||
|
### Description: Implement a separate module for handling telemetry transmission logic
|
||||||
|
### Details:
|
||||||
|
Create a new module (e.g., `scripts/utils/telemetry-client.js`) that encapsulates all telemetry transmission functionality:
|
||||||
|
|
||||||
|
1. Implement core functions:
|
||||||
|
- `sendTelemetryData(telemetryPayload)`: Main function to handle HTTPS POST requests
|
||||||
|
- `isUserConsentGiven()`: Helper to check if user has consented to telemetry
|
||||||
|
- `logTelemetryError(error)`: Helper for consistent error logging
|
||||||
|
|
||||||
|
2. Use Axios with retry logic:
|
||||||
|
- Configure with exponential backoff (max 3 retries, 500ms base delay)
|
||||||
|
- Implement proper TLS encryption via HTTPS
|
||||||
|
- Set appropriate timeouts (5000ms recommended)
|
||||||
|
|
||||||
|
3. Implement robust error handling:
|
||||||
|
- Catch all transmission errors
|
||||||
|
- Log failures locally without disrupting application flow
|
||||||
|
- Ensure failures are transparent to users
|
||||||
|
|
||||||
|
4. Configure securely:
|
||||||
|
- Load endpoint URL and authentication from environment variables
|
||||||
|
- Never hardcode secrets in source code
|
||||||
|
- Validate payload data before transmission
|
||||||
|
|
||||||
|
5. Integration with ai-services-unified.js:
|
||||||
|
- Import the telemetry-client module
|
||||||
|
- Call after telemetryData object is constructed
|
||||||
|
- Only send if user consent is confirmed
|
||||||
|
- Use non-blocking approach to avoid performance impact
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user