fix(ai,tasks): Enhance AI provider robustness and task processing

This commit introduces several improvements to AI interactions and
task management functionalities:

- AI Provider Enhancements (for Telemetry & Robustness):
    - :
        - Added a check in  to ensure
          is a string, throwing an error if not. This prevents downstream
           errors (e.g., in ).
    - , , :
        - Standardized return structures for their respective
          and  functions to consistently include /
          and  fields. This aligns them with other providers (like
          Anthropic, Google, Perplexity) for consistent telemetry data
          collection, as part of implementing subtask 77.14 and similar work.

- Task Expansion ():
    - Updated  to be more explicit
      about using an empty array  for empty  to
      better guide AI output.
    - Implemented a pre-emptive cleanup step in
      to replace malformed  with
      before JSON parsing. This improves resilience to AI output quirks,
      particularly observed with Perplexity.

- Adjusts issue in commands.js where successfulRemovals would be undefined. It's properly invoked from the result variable now.

- Updates supported models for Gemini
These changes address issues observed during E2E tests, enhance the
reliability of AI-driven task analysis and expansion, and promote
consistent telemetry data across multiple AI providers.
This commit is contained in:
Eyal Toledano
2025-05-14 19:04:03 -04:00
parent 79a41543d5
commit ca5ec03cd8
10 changed files with 490 additions and 131 deletions

View File

@@ -6,7 +6,7 @@ import { log } from '../../scripts/modules/utils.js';
* Generates text using OpenAI models via Vercel AI SDK.
*
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature.
* @returns {Promise<string>} The generated text content.
* @returns {Promise<object>} The generated text content and usage.
* @throws {Error} If API call fails.
*/
export async function generateOpenAIText(params) {
@@ -26,18 +26,14 @@ export async function generateOpenAIText(params) {
const openaiClient = createOpenAI({ apiKey });
try {
const result = await openaiClient.chat(messages, {
// Updated: Use openaiClient.chat directly
model: modelId,
max_tokens: maxTokens,
const result = await generateText({
model: openaiClient(modelId),
messages,
maxTokens,
temperature
});
// Adjust based on actual Vercel SDK response structure for openaiClient.chat
// This might need refinement based on testing the SDK's output.
const textContent = result?.choices?.[0]?.message?.content?.trim();
if (!textContent) {
if (!result || !result.text) {
log(
'warn',
'OpenAI generateText response did not contain expected content.',
@@ -49,7 +45,13 @@ export async function generateOpenAIText(params) {
'debug',
`OpenAI generateText completed successfully for model: ${modelId}`
);
return textContent;
return {
text: result.text.trim(),
usage: {
inputTokens: result.usage.promptTokens,
outputTokens: result.usage.completionTokens
}
};
} catch (error) {
log(
'error',
@@ -88,9 +90,7 @@ export async function streamOpenAIText(params) {
const openaiClient = createOpenAI({ apiKey });
try {
// Use the streamText function from Vercel AI SDK core
const stream = await openaiClient.chat.stream(messages, {
// Updated: Use openaiClient.chat.stream
model: modelId,
max_tokens: maxTokens,
temperature
@@ -100,7 +100,6 @@ export async function streamOpenAIText(params) {
'debug',
`OpenAI streamText initiated successfully for model: ${modelId}`
);
// The Vercel SDK's streamText should directly return the stream object
return stream;
} catch (error) {
log(
@@ -118,7 +117,7 @@ export async function streamOpenAIText(params) {
* Generates structured objects using OpenAI models via Vercel AI SDK.
*
* @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature.
* @returns {Promise<object>} The generated object matching the schema.
* @returns {Promise<object>} The generated object matching the schema and usage.
* @throws {Error} If API call fails or object generation fails.
*/
export async function generateOpenAIObject(params) {
@@ -148,7 +147,6 @@ export async function generateOpenAIObject(params) {
const openaiClient = createOpenAI({ apiKey });
try {
// Use the imported generateObject function from 'ai' package
const result = await generateObject({
model: openaiClient(modelId),
schema: schema,
@@ -162,7 +160,21 @@ export async function generateOpenAIObject(params) {
'debug',
`OpenAI generateObject completed successfully for model: ${modelId}`
);
return result.object;
if (!result || typeof result.object === 'undefined') {
log(
'warn',
'OpenAI generateObject response did not contain expected object.',
{ result }
);
throw new Error('Failed to extract object from OpenAI response.');
}
return {
object: result.object,
usage: {
inputTokens: result.usage.promptTokens,
outputTokens: result.usage.completionTokens
}
};
} catch (error) {
log(
'error',