fix(ai): Align Perplexity provider with standard telemetry response structure

This commit updates the Perplexity AI provider () to ensure its functions return data in a structure consistent with other providers and the expectations of the unified AI service layer ().

Specifically:
-  now returns an object  instead of only the text string.
-  now returns an object  instead of only the result object.

These changes ensure that  can correctly extract both the primary AI-generated content and the token usage data for telemetry purposes when Perplexity models are used. This resolves issues encountered during E2E testing where complexity analysis (which can use Perplexity for its research role) failed due to unexpected response formats.

The  function was already compliant.
This commit is contained in:
Eyal Toledano
2025-05-14 11:46:35 -04:00
parent 9f4bac8d6a
commit 79a41543d5
4 changed files with 17 additions and 5 deletions

View File

@@ -54,7 +54,7 @@ export async function generatePerplexityText({
'debug',
`Perplexity generateText result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
);
return result.text;
return { text: result.text, usage: result.usage };
} catch (error) {
log('error', `Perplexity generateText failed: ${error.message}`);
throw error;
@@ -148,7 +148,7 @@ export async function generatePerplexityObject({
'debug',
`Perplexity generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
);
return result.object;
return { object: result.object, usage: result.usage };
} catch (error) {
log(
'error',