fix(ai,tasks): Enhance AI provider robustness and task processing

This commit introduces several improvements to AI interactions and
task management functionalities:

- AI Provider Enhancements (for Telemetry & Robustness):
    - :
        - Added a check in  to ensure
          is a string, throwing an error if not. This prevents downstream
           errors (e.g., in ).
    - , , :
        - Standardized return structures for their respective
          and  functions to consistently include /
          and  fields. This aligns them with other providers (like
          Anthropic, Google, Perplexity) for consistent telemetry data
          collection, as part of implementing subtask 77.14 and similar work.

- Task Expansion ():
    - Updated  to be more explicit
      about using an empty array  for empty  to
      better guide AI output.
    - Implemented a pre-emptive cleanup step in
      to replace malformed  with
      before JSON parsing. This improves resilience to AI output quirks,
      particularly observed with Perplexity.

- Adjusts issue in commands.js where successfulRemovals would be undefined. It's properly invoked from the result variable now.

- Updates supported models for Gemini
These changes address issues observed during E2E tests, enhance the
reliability of AI-driven task analysis and expansion, and promote
consistent telemetry data across multiple AI providers.
This commit is contained in:
Eyal Toledano
2025-05-14 19:04:03 -04:00
parent 79a41543d5
commit ca5ec03cd8
10 changed files with 490 additions and 131 deletions

View File

@@ -31,7 +31,7 @@ function getClient(apiKey) {
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
* @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation.
* @returns {Promise<string>} The generated text content.
* @returns {Promise<object>} The generated text content and usage.
* @throws {Error} If the API call fails.
*/
export async function generateXaiText({
@@ -54,7 +54,14 @@ export async function generateXaiText({
'debug',
`xAI generateText result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
);
return result.text;
// Return text and usage
return {
text: result.text,
usage: {
inputTokens: result.usage.promptTokens,
outputTokens: result.usage.completionTokens
}
};
} catch (error) {
log('error', `xAI generateText failed: ${error.message}`);
throw error;
@@ -110,7 +117,7 @@ export async function streamXaiText({
* @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation.
* @param {number} [params.maxRetries] - Max retries for validation/generation.
* @returns {Promise<object>} The generated object matching the schema.
* @returns {Promise<object>} The generated object matching the schema and its usage.
* @throws {Error} If generation or validation fails.
*/
export async function generateXaiObject({
@@ -137,7 +144,8 @@ export async function generateXaiObject({
messages: messages,
tool: {
name: objectName,
description: `Generate a ${objectName} based on the prompt.`
description: `Generate a ${objectName} based on the prompt.`,
parameters: schema
},
maxTokens: maxTokens,
temperature: temperature,
@@ -147,7 +155,14 @@ export async function generateXaiObject({
'debug',
`xAI generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
);
return result.object;
// Return object and usage
return {
object: result.object,
usage: {
inputTokens: result.usage.promptTokens,
outputTokens: result.usage.completionTokens
}
};
} catch (error) {
log(
'error',