fix(ai,tasks): Enhance AI provider robustness and task processing
This commit introduces several improvements to AI interactions and
task management functionalities:
- AI Provider Enhancements (for Telemetry & Robustness):
- :
- Added a check in to ensure
is a string, throwing an error if not. This prevents downstream
errors (e.g., in ).
- , , :
- Standardized return structures for their respective
and functions to consistently include /
and fields. This aligns them with other providers (like
Anthropic, Google, Perplexity) for consistent telemetry data
collection, as part of implementing subtask 77.14 and similar work.
- Task Expansion ():
- Updated to be more explicit
about using an empty array for empty to
better guide AI output.
- Implemented a pre-emptive cleanup step in
to replace malformed with
before JSON parsing. This improves resilience to AI output quirks,
particularly observed with Perplexity.
- Adjusts issue in commands.js where successfulRemovals would be undefined. It's properly invoked from the result variable now.
- Updates supported models for Gemini
These changes address issues observed during E2E tests, enhance the
reliability of AI-driven task analysis and expansion, and promote
consistent telemetry data across multiple AI providers.
This commit is contained in:
@@ -6,7 +6,7 @@ import { log } from '../../scripts/modules/utils.js';
|
||||
* Generates text using OpenAI models via Vercel AI SDK.
|
||||
*
|
||||
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature.
|
||||
* @returns {Promise<string>} The generated text content.
|
||||
* @returns {Promise<object>} The generated text content and usage.
|
||||
* @throws {Error} If API call fails.
|
||||
*/
|
||||
export async function generateOpenAIText(params) {
|
||||
@@ -26,18 +26,14 @@ export async function generateOpenAIText(params) {
|
||||
const openaiClient = createOpenAI({ apiKey });
|
||||
|
||||
try {
|
||||
const result = await openaiClient.chat(messages, {
|
||||
// Updated: Use openaiClient.chat directly
|
||||
model: modelId,
|
||||
max_tokens: maxTokens,
|
||||
const result = await generateText({
|
||||
model: openaiClient(modelId),
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature
|
||||
});
|
||||
|
||||
// Adjust based on actual Vercel SDK response structure for openaiClient.chat
|
||||
// This might need refinement based on testing the SDK's output.
|
||||
const textContent = result?.choices?.[0]?.message?.content?.trim();
|
||||
|
||||
if (!textContent) {
|
||||
if (!result || !result.text) {
|
||||
log(
|
||||
'warn',
|
||||
'OpenAI generateText response did not contain expected content.',
|
||||
@@ -49,7 +45,13 @@ export async function generateOpenAIText(params) {
|
||||
'debug',
|
||||
`OpenAI generateText completed successfully for model: ${modelId}`
|
||||
);
|
||||
return textContent;
|
||||
return {
|
||||
text: result.text.trim(),
|
||||
usage: {
|
||||
inputTokens: result.usage.promptTokens,
|
||||
outputTokens: result.usage.completionTokens
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
@@ -88,9 +90,7 @@ export async function streamOpenAIText(params) {
|
||||
const openaiClient = createOpenAI({ apiKey });
|
||||
|
||||
try {
|
||||
// Use the streamText function from Vercel AI SDK core
|
||||
const stream = await openaiClient.chat.stream(messages, {
|
||||
// Updated: Use openaiClient.chat.stream
|
||||
model: modelId,
|
||||
max_tokens: maxTokens,
|
||||
temperature
|
||||
@@ -100,7 +100,6 @@ export async function streamOpenAIText(params) {
|
||||
'debug',
|
||||
`OpenAI streamText initiated successfully for model: ${modelId}`
|
||||
);
|
||||
// The Vercel SDK's streamText should directly return the stream object
|
||||
return stream;
|
||||
} catch (error) {
|
||||
log(
|
||||
@@ -118,7 +117,7 @@ export async function streamOpenAIText(params) {
|
||||
* Generates structured objects using OpenAI models via Vercel AI SDK.
|
||||
*
|
||||
* @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature.
|
||||
* @returns {Promise<object>} The generated object matching the schema.
|
||||
* @returns {Promise<object>} The generated object matching the schema and usage.
|
||||
* @throws {Error} If API call fails or object generation fails.
|
||||
*/
|
||||
export async function generateOpenAIObject(params) {
|
||||
@@ -148,7 +147,6 @@ export async function generateOpenAIObject(params) {
|
||||
const openaiClient = createOpenAI({ apiKey });
|
||||
|
||||
try {
|
||||
// Use the imported generateObject function from 'ai' package
|
||||
const result = await generateObject({
|
||||
model: openaiClient(modelId),
|
||||
schema: schema,
|
||||
@@ -162,7 +160,21 @@ export async function generateOpenAIObject(params) {
|
||||
'debug',
|
||||
`OpenAI generateObject completed successfully for model: ${modelId}`
|
||||
);
|
||||
return result.object;
|
||||
if (!result || typeof result.object === 'undefined') {
|
||||
log(
|
||||
'warn',
|
||||
'OpenAI generateObject response did not contain expected object.',
|
||||
{ result }
|
||||
);
|
||||
throw new Error('Failed to extract object from OpenAI response.');
|
||||
}
|
||||
return {
|
||||
object: result.object,
|
||||
usage: {
|
||||
inputTokens: result.usage.promptTokens,
|
||||
outputTokens: result.usage.completionTokens
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
|
||||
@@ -31,20 +31,47 @@ async function generateOpenRouterText({
|
||||
const openrouter = createOpenRouter({ apiKey });
|
||||
const model = openrouter.chat(modelId); // Assuming chat model
|
||||
|
||||
const { text } = await generateText({
|
||||
// Capture the full result from generateText
|
||||
const result = await generateText({
|
||||
model,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature,
|
||||
...rest // Pass any additional parameters
|
||||
});
|
||||
return text;
|
||||
|
||||
// Check if text and usage are present
|
||||
if (!result || typeof result.text !== 'string') {
|
||||
log(
|
||||
'warn',
|
||||
`OpenRouter generateText for model ${modelId} did not return expected text.`,
|
||||
{ result }
|
||||
);
|
||||
throw new Error('Failed to extract text from OpenRouter response.');
|
||||
}
|
||||
if (!result.usage) {
|
||||
log(
|
||||
'warn',
|
||||
`OpenRouter generateText for model ${modelId} did not return usage data.`,
|
||||
{ result }
|
||||
);
|
||||
// Decide if this is critical. For now, let it pass but telemetry will be incomplete.
|
||||
}
|
||||
|
||||
log('debug', `OpenRouter generateText completed for model ${modelId}`);
|
||||
// Return text and usage
|
||||
return {
|
||||
text: result.text,
|
||||
usage: {
|
||||
inputTokens: result.usage.promptTokens,
|
||||
outputTokens: result.usage.completionTokens
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
`OpenRouter generateText failed for model ${modelId}: ${error.message}`
|
||||
);
|
||||
// Re-throw the error for the unified layer to handle retries/fallbacks
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
@@ -132,12 +159,12 @@ async function generateOpenRouterObject({
|
||||
const openrouter = createOpenRouter({ apiKey });
|
||||
const model = openrouter.chat(modelId);
|
||||
|
||||
const { object } = await generateObject({
|
||||
// Capture the full result from generateObject
|
||||
const result = await generateObject({
|
||||
model,
|
||||
schema,
|
||||
mode: 'tool', // Standard mode for most object generation
|
||||
mode: 'tool',
|
||||
tool: {
|
||||
// Define the tool based on the schema
|
||||
name: objectName,
|
||||
description: `Generate an object conforming to the ${objectName} schema.`,
|
||||
parameters: schema
|
||||
@@ -145,10 +172,36 @@ async function generateOpenRouterObject({
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature,
|
||||
maxRetries, // Pass maxRetries if supported by generateObject
|
||||
maxRetries,
|
||||
...rest
|
||||
});
|
||||
return object;
|
||||
|
||||
// Check if object and usage are present
|
||||
if (!result || typeof result.object === 'undefined') {
|
||||
log(
|
||||
'warn',
|
||||
`OpenRouter generateObject for model ${modelId} did not return expected object.`,
|
||||
{ result }
|
||||
);
|
||||
throw new Error('Failed to extract object from OpenRouter response.');
|
||||
}
|
||||
if (!result.usage) {
|
||||
log(
|
||||
'warn',
|
||||
`OpenRouter generateObject for model ${modelId} did not return usage data.`,
|
||||
{ result }
|
||||
);
|
||||
}
|
||||
|
||||
log('debug', `OpenRouter generateObject completed for model ${modelId}`);
|
||||
// Return object and usage
|
||||
return {
|
||||
object: result.object,
|
||||
usage: {
|
||||
inputTokens: result.usage.promptTokens,
|
||||
outputTokens: result.usage.completionTokens
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
|
||||
@@ -54,7 +54,14 @@ export async function generatePerplexityText({
|
||||
'debug',
|
||||
`Perplexity generateText result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
||||
);
|
||||
return { text: result.text, usage: result.usage };
|
||||
|
||||
return {
|
||||
text: result.text,
|
||||
usage: {
|
||||
inputTokens: result.usage.promptTokens,
|
||||
outputTokens: result.usage.completionTokens
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
log('error', `Perplexity generateText failed: ${error.message}`);
|
||||
throw error;
|
||||
@@ -148,7 +155,13 @@ export async function generatePerplexityObject({
|
||||
'debug',
|
||||
`Perplexity generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
||||
);
|
||||
return { object: result.object, usage: result.usage };
|
||||
return {
|
||||
object: result.object,
|
||||
usage: {
|
||||
inputTokens: result.usage.promptTokens,
|
||||
outputTokens: result.usage.completionTokens
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
|
||||
@@ -31,7 +31,7 @@ function getClient(apiKey) {
|
||||
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @returns {Promise<string>} The generated text content.
|
||||
* @returns {Promise<object>} The generated text content and usage.
|
||||
* @throws {Error} If the API call fails.
|
||||
*/
|
||||
export async function generateXaiText({
|
||||
@@ -54,7 +54,14 @@ export async function generateXaiText({
|
||||
'debug',
|
||||
`xAI generateText result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
||||
);
|
||||
return result.text;
|
||||
// Return text and usage
|
||||
return {
|
||||
text: result.text,
|
||||
usage: {
|
||||
inputTokens: result.usage.promptTokens,
|
||||
outputTokens: result.usage.completionTokens
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
log('error', `xAI generateText failed: ${error.message}`);
|
||||
throw error;
|
||||
@@ -110,7 +117,7 @@ export async function streamXaiText({
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
||||
* @returns {Promise<object>} The generated object matching the schema.
|
||||
* @returns {Promise<object>} The generated object matching the schema and its usage.
|
||||
* @throws {Error} If generation or validation fails.
|
||||
*/
|
||||
export async function generateXaiObject({
|
||||
@@ -137,7 +144,8 @@ export async function generateXaiObject({
|
||||
messages: messages,
|
||||
tool: {
|
||||
name: objectName,
|
||||
description: `Generate a ${objectName} based on the prompt.`
|
||||
description: `Generate a ${objectName} based on the prompt.`,
|
||||
parameters: schema
|
||||
},
|
||||
maxTokens: maxTokens,
|
||||
temperature: temperature,
|
||||
@@ -147,7 +155,14 @@ export async function generateXaiObject({
|
||||
'debug',
|
||||
`xAI generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
||||
);
|
||||
return result.object;
|
||||
// Return object and usage
|
||||
return {
|
||||
object: result.object,
|
||||
usage: {
|
||||
inputTokens: result.usage.promptTokens,
|
||||
outputTokens: result.usage.completionTokens
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
|
||||
Reference in New Issue
Block a user