Feat/add.azure.and.other.providers (#607)

* fix: claude-4 not having the right max_tokens

* feat: add bedrock support

* chore: fix package-lock.json

* fix: rename baseUrl to baseURL

* feat: add azure support

* fix: final touches of azure integration

* feat: add google vertex provider

* chore: fix tests and refactor task-manager.test.js

* chore: move task 92 to 94
This commit is contained in:
Ralph Khreish
2025-05-28 00:42:31 +02:00
committed by GitHub
parent 80735f9e60
commit 6a8a68e1a3
49 changed files with 12785 additions and 5015 deletions

View File

@@ -4,9 +4,9 @@
* Implementation for interacting with Anthropic models (e.g., Claude)
* using the Vercel AI SDK.
*/
import { createAnthropic } from '@ai-sdk/anthropic';
import { generateText, streamText, generateObject } from 'ai';
import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible
import { BaseAIProvider } from './base-provider.js';
// TODO: Implement standardized functions for generateText, streamText, generateObject
@@ -17,207 +17,38 @@ import { log } from '../../scripts/modules/utils.js'; // Assuming utils is acces
// Remove the global variable and caching logic
// let anthropicClient;
function getClient(apiKey, baseUrl) {
if (!apiKey) {
// In a real scenario, this would use the config resolver.
// Throwing error here if key isn't passed for simplicity.
// Keep the error check for the passed key
throw new Error('Anthropic API key is required.');
export class AnthropicAIProvider extends BaseAIProvider {
constructor() {
super();
this.name = 'Anthropic';
}
// Remove the check for anthropicClient
// if (!anthropicClient) {
// TODO: Explore passing options like default headers if needed
// Create and return a new instance directly with standard version header
return createAnthropic({
apiKey: apiKey,
...(baseUrl && { baseURL: baseUrl }),
// Use standard version header instead of beta
headers: {
'anthropic-beta': 'output-128k-2025-02-19'
/**
* Creates and returns an Anthropic client instance.
* @param {object} params - Parameters for client initialization
* @param {string} params.apiKey - Anthropic API key
* @param {string} [params.baseURL] - Optional custom API endpoint
* @returns {Function} Anthropic client function
* @throws {Error} If API key is missing or initialization fails
*/
getClient(params) {
try {
const { apiKey, baseURL } = params;
if (!apiKey) {
throw new Error('Anthropic API key is required.');
}
return createAnthropic({
apiKey,
...(baseURL && { baseURL }),
headers: {
'anthropic-beta': 'output-128k-2025-02-19'
}
});
} catch (error) {
this.handleError('client initialization', error);
}
});
}
// --- Standardized Service Function Implementations ---
/**
* Generates text using an Anthropic model.
*
* @param {object} params - Parameters for the text generation.
* @param {string} params.apiKey - The Anthropic API key.
* @param {string} params.modelId - The specific Anthropic model ID.
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
* @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation.
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
* @returns {Promise<object>} The generated text content and usage.
* @throws {Error} If the API call fails.
*/
export async function generateAnthropicText({
apiKey,
modelId,
messages,
maxTokens,
temperature,
baseUrl
}) {
log('debug', `Generating Anthropic text with model: ${modelId}`);
try {
const client = getClient(apiKey, baseUrl);
const result = await generateText({
model: client(modelId),
messages: messages,
maxTokens: maxTokens,
temperature: temperature
// Beta header moved to client initialization
// TODO: Add other relevant parameters like topP, topK if needed
});
log(
'debug',
`Anthropic generateText result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
);
// Return both text and usage
return {
text: result.text,
usage: {
inputTokens: result.usage.promptTokens,
outputTokens: result.usage.completionTokens
}
};
} catch (error) {
log('error', `Anthropic generateText failed: ${error.message}`);
// Consider more specific error handling or re-throwing a standardized error
throw error;
}
}
/**
* Streams text using an Anthropic model.
*
* @param {object} params - Parameters for the text streaming.
* @param {string} params.apiKey - The Anthropic API key.
* @param {string} params.modelId - The specific Anthropic model ID.
* @param {Array<object>} params.messages - The messages array.
* @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation.
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
* @throws {Error} If the API call fails to initiate the stream.
*/
export async function streamAnthropicText({
apiKey,
modelId,
messages,
maxTokens,
temperature,
baseUrl
}) {
log('debug', `Streaming Anthropic text with model: ${modelId}`);
try {
const client = getClient(apiKey, baseUrl);
log(
'debug',
'[streamAnthropicText] Parameters received by streamText:',
JSON.stringify(
{
modelId: modelId,
messages: messages,
maxTokens: maxTokens,
temperature: temperature
},
null,
2
)
);
const stream = await streamText({
model: client(modelId),
messages: messages,
maxTokens: maxTokens,
temperature: temperature
// TODO: Add other relevant parameters
});
// *** RETURN THE FULL STREAM OBJECT, NOT JUST stream.textStream ***
return stream;
} catch (error) {
log('error', `Anthropic streamText failed: ${error.message}`, error.stack);
throw error;
}
}
/**
* Generates a structured object using an Anthropic model.
* NOTE: Anthropic's tool/function calling support might have limitations
* compared to OpenAI, especially regarding complex schemas or enforcement.
* The Vercel AI SDK attempts to abstract this.
*
* @param {object} params - Parameters for object generation.
* @param {string} params.apiKey - The Anthropic API key.
* @param {string} params.modelId - The specific Anthropic model ID.
* @param {Array<object>} params.messages - The messages array.
* @param {import('zod').ZodSchema} params.schema - The Zod schema for the object.
* @param {string} params.objectName - A name for the object/tool.
* @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation.
* @param {number} [params.maxRetries] - Max retries for validation/generation.
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
* @returns {Promise<object>} The generated object matching the schema and usage.
* @throws {Error} If generation or validation fails.
*/
export async function generateAnthropicObject({
apiKey,
modelId,
messages,
schema,
objectName = 'generated_object',
maxTokens,
temperature,
maxRetries = 3,
baseUrl
}) {
log(
'debug',
`Generating Anthropic object ('${objectName}') with model: ${modelId}`
);
try {
const client = getClient(apiKey, baseUrl);
log(
'debug',
`Using maxTokens: ${maxTokens}, temperature: ${temperature}, model: ${modelId}`
);
const result = await generateObject({
model: client(modelId),
mode: 'tool',
schema: schema,
messages: messages,
tool: {
name: objectName,
description: `Generate a ${objectName} based on the prompt.`
},
maxTokens: maxTokens,
temperature: temperature,
maxRetries: maxRetries
});
log(
'debug',
`Anthropic generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
);
// Return both object and usage
return {
object: result.object,
usage: {
inputTokens: result.usage.promptTokens,
outputTokens: result.usage.completionTokens
}
};
} catch (error) {
log(
'error',
`Anthropic generateObject ('${objectName}') failed: ${error.message}`
);
throw error;
}
}

52
src/ai-providers/azure.js Normal file
View File

@@ -0,0 +1,52 @@
/**
* azure.js
* AI provider implementation for Azure OpenAI models using Vercel AI SDK.
*/
import { createAzure } from '@ai-sdk/azure';
import { BaseAIProvider } from './base-provider.js';
export class AzureProvider extends BaseAIProvider {
constructor() {
super();
this.name = 'Azure OpenAI';
}
/**
* Validates Azure-specific authentication parameters
* @param {object} params - Parameters to validate
* @throws {Error} If required parameters are missing
*/
validateAuth(params) {
if (!params.apiKey) {
throw new Error('Azure API key is required');
}
if (!params.baseURL) {
throw new Error(
'Azure endpoint URL is required. Set it in .taskmasterconfig global.azureBaseURL or models.[role].baseURL'
);
}
}
/**
* Creates and returns an Azure OpenAI client instance.
* @param {object} params - Parameters for client initialization
* @param {string} params.apiKey - Azure OpenAI API key
* @param {string} params.baseURL - Azure OpenAI endpoint URL (from .taskmasterconfig global.azureBaseURL or models.[role].baseURL)
* @returns {Function} Azure OpenAI client function
* @throws {Error} If required parameters are missing or initialization fails
*/
getClient(params) {
try {
const { apiKey, baseURL } = params;
return createAzure({
apiKey,
baseURL
});
} catch (error) {
this.handleError('client initialization', error);
}
}
}

View File

@@ -0,0 +1,214 @@
import { generateText, streamText, generateObject } from 'ai';
import { log } from '../../scripts/modules/index.js';
/**
* Base class for all AI providers
*/
export class BaseAIProvider {
constructor() {
if (this.constructor === BaseAIProvider) {
throw new Error('BaseAIProvider cannot be instantiated directly');
}
// Each provider must set their name
this.name = this.constructor.name;
}
/**
* Validates authentication parameters - can be overridden by providers
* @param {object} params - Parameters to validate
*/
validateAuth(params) {
// Default: require API key (most providers need this)
if (!params.apiKey) {
throw new Error(`${this.name} API key is required`);
}
}
/**
* Validates common parameters across all methods
* @param {object} params - Parameters to validate
*/
validateParams(params) {
// Validate authentication (can be overridden by providers)
this.validateAuth(params);
// Validate required model ID
if (!params.modelId) {
throw new Error(`${this.name} Model ID is required`);
}
// Validate optional parameters
this.validateOptionalParams(params);
}
/**
* Validates optional parameters like temperature and maxTokens
* @param {object} params - Parameters to validate
*/
validateOptionalParams(params) {
if (
params.temperature !== undefined &&
(params.temperature < 0 || params.temperature > 1)
) {
throw new Error('Temperature must be between 0 and 1');
}
if (params.maxTokens !== undefined && params.maxTokens <= 0) {
throw new Error('maxTokens must be greater than 0');
}
}
/**
* Validates message array structure
*/
validateMessages(messages) {
if (!messages || !Array.isArray(messages) || messages.length === 0) {
throw new Error('Invalid or empty messages array provided');
}
for (const msg of messages) {
if (!msg.role || !msg.content) {
throw new Error(
'Invalid message format. Each message must have role and content'
);
}
}
}
/**
* Common error handler
*/
handleError(operation, error) {
const errorMessage = error.message || 'Unknown error occurred';
log('error', `${this.name} ${operation} failed: ${errorMessage}`, {
error
});
throw new Error(
`${this.name} API error during ${operation}: ${errorMessage}`
);
}
/**
* Creates and returns a client instance for the provider
* @abstract
*/
getClient(params) {
throw new Error('getClient must be implemented by provider');
}
/**
* Generates text using the provider's model
*/
async generateText(params) {
try {
this.validateParams(params);
this.validateMessages(params.messages);
log(
'debug',
`Generating ${this.name} text with model: ${params.modelId}`
);
const client = this.getClient(params);
const result = await generateText({
model: client(params.modelId),
messages: params.messages,
maxTokens: params.maxTokens,
temperature: params.temperature
});
log(
'debug',
`${this.name} generateText completed successfully for model: ${params.modelId}`
);
return {
text: result.text,
usage: {
inputTokens: result.usage?.promptTokens,
outputTokens: result.usage?.completionTokens,
totalTokens: result.usage?.totalTokens
}
};
} catch (error) {
this.handleError('text generation', error);
}
}
/**
* Streams text using the provider's model
*/
async streamText(params) {
try {
this.validateParams(params);
this.validateMessages(params.messages);
log('debug', `Streaming ${this.name} text with model: ${params.modelId}`);
const client = this.getClient(params);
const stream = await streamText({
model: client(params.modelId),
messages: params.messages,
maxTokens: params.maxTokens,
temperature: params.temperature
});
log(
'debug',
`${this.name} streamText initiated successfully for model: ${params.modelId}`
);
return stream;
} catch (error) {
this.handleError('text streaming', error);
}
}
/**
* Generates a structured object using the provider's model
*/
async generateObject(params) {
try {
this.validateParams(params);
this.validateMessages(params.messages);
if (!params.schema) {
throw new Error('Schema is required for object generation');
}
if (!params.objectName) {
throw new Error('Object name is required for object generation');
}
log(
'debug',
`Generating ${this.name} object ('${params.objectName}') with model: ${params.modelId}`
);
const client = this.getClient(params);
const result = await generateObject({
model: client(params.modelId),
messages: params.messages,
schema: params.schema,
mode: 'tool',
maxTokens: params.maxTokens,
temperature: params.temperature
});
log(
'debug',
`${this.name} generateObject completed successfully for model: ${params.modelId}`
);
return {
object: result.object,
usage: {
inputTokens: result.usage?.promptTokens,
outputTokens: result.usage?.completionTokens,
totalTokens: result.usage?.totalTokens
}
};
} catch (error) {
this.handleError('object generation', error);
}
}
}

View File

@@ -0,0 +1,41 @@
import { createAmazonBedrock } from '@ai-sdk/amazon-bedrock';
import { fromNodeProviderChain } from '@aws-sdk/credential-providers';
import { BaseAIProvider } from './base-provider.js';
export class BedrockAIProvider extends BaseAIProvider {
constructor() {
super();
this.name = 'Bedrock';
}
/**
* Override auth validation - Bedrock uses AWS credentials instead of API keys
* @param {object} params - Parameters to validate
*/
validateAuth(params) {}
/**
* Creates and returns a Bedrock client instance.
* See https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html
* for AWS SDK environment variables and configuration options.
*/
getClient(params) {
try {
const {
profile = process.env.AWS_PROFILE || 'default',
region = process.env.AWS_DEFAULT_REGION || 'us-east-1',
baseURL
} = params;
const credentialProvider = fromNodeProviderChain({ profile });
return createAmazonBedrock({
region,
credentialProvider,
...(baseURL && { baseURL })
});
} catch (error) {
this.handleError('client initialization', error);
}
}
}

View File

@@ -0,0 +1,150 @@
/**
* google-vertex.js
* AI provider implementation for Google Vertex AI models using Vercel AI SDK.
*/
import { createVertex } from '@ai-sdk/google-vertex';
import { BaseAIProvider } from './base-provider.js';
import { resolveEnvVariable } from '../../scripts/modules/utils.js';
import { log } from '../../scripts/modules/utils.js';
// Vertex-specific error classes
class VertexAuthError extends Error {
constructor(message) {
super(message);
this.name = 'VertexAuthError';
this.code = 'vertex_auth_error';
}
}
class VertexConfigError extends Error {
constructor(message) {
super(message);
this.name = 'VertexConfigError';
this.code = 'vertex_config_error';
}
}
class VertexApiError extends Error {
constructor(message, statusCode) {
super(message);
this.name = 'VertexApiError';
this.code = 'vertex_api_error';
this.statusCode = statusCode;
}
}
export class VertexAIProvider extends BaseAIProvider {
constructor() {
super();
this.name = 'Google Vertex AI';
}
/**
* Validates Vertex AI-specific authentication parameters
* @param {object} params - Parameters to validate
* @throws {Error} If required parameters are missing
*/
validateAuth(params) {
const { apiKey, projectId, location, credentials } = params;
// Check for API key OR service account credentials
if (!apiKey && !credentials) {
throw new VertexAuthError(
'Either Google API key (GOOGLE_API_KEY) or service account credentials (GOOGLE_APPLICATION_CREDENTIALS) is required for Vertex AI'
);
}
// Project ID is required for Vertex AI
if (!projectId) {
throw new VertexConfigError(
'Google Cloud project ID is required for Vertex AI. Set VERTEX_PROJECT_ID environment variable.'
);
}
// Location is required for Vertex AI
if (!location) {
throw new VertexConfigError(
'Google Cloud location is required for Vertex AI. Set VERTEX_LOCATION environment variable (e.g., "us-central1").'
);
}
}
/**
* Creates and returns a Google Vertex AI client instance.
* @param {object} params - Parameters for client initialization
* @param {string} [params.apiKey] - Google API key
* @param {string} params.projectId - Google Cloud project ID
* @param {string} params.location - Google Cloud location (e.g., "us-central1")
* @param {object} [params.credentials] - Service account credentials object
* @param {string} [params.baseURL] - Optional custom API endpoint
* @returns {Function} Google Vertex AI client function
* @throws {Error} If required parameters are missing or initialization fails
*/
getClient(params) {
try {
// Validate required parameters
this.validateAuth(params);
const { apiKey, projectId, location, credentials, baseURL } = params;
// Configure auth options - either API key or service account
const authOptions = {};
if (apiKey) {
authOptions.apiKey = apiKey;
} else if (credentials) {
authOptions.googleAuthOptions = credentials;
}
// Return Vertex AI client
return createVertex({
...authOptions,
projectId,
location,
...(baseURL && { baseURL })
});
} catch (error) {
this.handleError('client initialization', error);
}
}
/**
* Handle errors from Vertex AI
* @param {string} operation - Description of the operation that failed
* @param {Error} error - The error object
* @throws {Error} Rethrows the error with additional context
*/
handleError(operation, error) {
log('error', `Vertex AI ${operation} error:`, error);
// Handle known error types
if (
error.name === 'VertexAuthError' ||
error.name === 'VertexConfigError' ||
error.name === 'VertexApiError'
) {
throw error;
}
// Handle network/API errors
if (error.response) {
const statusCode = error.response.status;
const errorMessage = error.response.data?.error?.message || error.message;
// Categorize by status code
if (statusCode === 401 || statusCode === 403) {
throw new VertexAuthError(`Authentication failed: ${errorMessage}`);
} else if (statusCode === 400) {
throw new VertexConfigError(`Invalid request: ${errorMessage}`);
} else {
throw new VertexApiError(
`API error (${statusCode}): ${errorMessage}`,
statusCode
);
}
}
// Generic error handling
throw new Error(`Vertex AI ${operation} failed: ${error.message}`);
}
}

View File

@@ -1,181 +1,39 @@
/**
* google.js
* AI provider implementation for Google AI models (e.g., Gemini) using Vercel AI SDK.
* AI provider implementation for Google AI models using Vercel AI SDK.
*/
// import { GoogleGenerativeAI } from '@ai-sdk/google'; // Incorrect import
import { createGoogleGenerativeAI } from '@ai-sdk/google'; // Correct import for customization
import { generateText, streamText, generateObject } from 'ai'; // Import from main 'ai' package
import { log } from '../../scripts/modules/utils.js'; // Import logging utility
import { createGoogleGenerativeAI } from '@ai-sdk/google';
import { BaseAIProvider } from './base-provider.js';
// Consider making model configurable via config-manager.js later
const DEFAULT_MODEL = 'gemini-2.5-pro-exp-03-25'; // Or a suitable default
const DEFAULT_TEMPERATURE = 0.2; // Or a suitable default
function getClient(apiKey, baseUrl) {
if (!apiKey) {
throw new Error('Google API key is required.');
export class GoogleAIProvider extends BaseAIProvider {
constructor() {
super();
this.name = 'Google';
}
return createGoogleGenerativeAI({
apiKey: apiKey,
...(baseUrl && { baseURL: baseUrl })
});
}
/**
* Generates text using a Google AI model.
*
* @param {object} params - Parameters for the generation.
* @param {string} params.apiKey - Google API Key.
* @param {string} params.modelId - Specific model ID to use (overrides default).
* @param {number} params.temperature - Generation temperature.
* @param {Array<object>} params.messages - The conversation history (system/user prompts).
* @param {number} [params.maxTokens] - Optional max tokens.
* @returns {Promise<string>} The generated text content.
* @throws {Error} If API key is missing or API call fails.
*/
async function generateGoogleText({
apiKey,
modelId = DEFAULT_MODEL,
temperature = DEFAULT_TEMPERATURE,
messages,
maxTokens,
baseUrl
}) {
if (!apiKey) {
throw new Error('Google API key is required.');
}
log('info', `Generating text with Google model: ${modelId}`);
/**
* Creates and returns a Google AI client instance.
* @param {object} params - Parameters for client initialization
* @param {string} params.apiKey - Google API key
* @param {string} [params.baseURL] - Optional custom API endpoint
* @returns {Function} Google AI client function
* @throws {Error} If API key is missing or initialization fails
*/
getClient(params) {
try {
const { apiKey, baseURL } = params;
try {
const googleProvider = getClient(apiKey, baseUrl);
const model = googleProvider(modelId);
const result = await generateText({
model,
messages,
temperature,
maxOutputTokens: maxTokens
});
// Assuming result structure provides text directly or within a property
// return result.text; // Adjust based on actual SDK response
// Return both text and usage
return {
text: result.text,
usage: {
inputTokens: result.usage.promptTokens,
outputTokens: result.usage.completionTokens
if (!apiKey) {
throw new Error('Google API key is required.');
}
};
} catch (error) {
log(
'error',
`Error generating text with Google (${modelId}): ${error.message}`
);
throw error;
return createGoogleGenerativeAI({
apiKey,
...(baseURL && { baseURL })
});
} catch (error) {
this.handleError('client initialization', error);
}
}
}
/**
* Streams text using a Google AI model.
*
* @param {object} params - Parameters for the streaming.
* @param {string} params.apiKey - Google API Key.
* @param {string} params.modelId - Specific model ID to use (overrides default).
* @param {number} params.temperature - Generation temperature.
* @param {Array<object>} params.messages - The conversation history.
* @param {number} [params.maxTokens] - Optional max tokens.
* @returns {Promise<ReadableStream>} A readable stream of text deltas.
* @throws {Error} If API key is missing or API call fails.
*/
async function streamGoogleText({
apiKey,
modelId = DEFAULT_MODEL,
temperature = DEFAULT_TEMPERATURE,
messages,
maxTokens,
baseUrl
}) {
if (!apiKey) {
throw new Error('Google API key is required.');
}
log('info', `Streaming text with Google model: ${modelId}`);
try {
const googleProvider = getClient(apiKey, baseUrl);
const model = googleProvider(modelId);
const stream = await streamText({
model,
messages,
temperature,
maxOutputTokens: maxTokens
});
return stream;
} catch (error) {
log(
'error',
`Error streaming text with Google (${modelId}): ${error.message}`
);
throw error;
}
}
/**
* Generates a structured object using a Google AI model.
*
* @param {object} params - Parameters for the object generation.
* @param {string} params.apiKey - Google API Key.
* @param {string} params.modelId - Specific model ID to use (overrides default).
* @param {number} params.temperature - Generation temperature.
* @param {Array<object>} params.messages - The conversation history.
* @param {import('zod').ZodSchema} params.schema - Zod schema for the expected object.
* @param {string} params.objectName - Name for the object generation context.
* @param {number} [params.maxTokens] - Optional max tokens.
* @returns {Promise<object>} The generated object matching the schema.
* @throws {Error} If API key is missing or API call fails.
*/
async function generateGoogleObject({
apiKey,
modelId = DEFAULT_MODEL,
temperature = DEFAULT_TEMPERATURE,
messages,
schema,
objectName, // Note: Vercel SDK might use this differently or not at all
maxTokens,
baseUrl
}) {
if (!apiKey) {
throw new Error('Google API key is required.');
}
log('info', `Generating object with Google model: ${modelId}`);
try {
const googleProvider = getClient(apiKey, baseUrl);
const model = googleProvider(modelId);
const result = await generateObject({
model,
schema,
messages,
temperature,
maxOutputTokens: maxTokens
});
// return object; // Return the parsed object
// Return both object and usage
return {
object: result.object,
usage: {
inputTokens: result.usage.promptTokens,
outputTokens: result.usage.completionTokens
}
};
} catch (error) {
log(
'error',
`Error generating object with Google (${modelId}): ${error.message}`
);
throw error;
}
}
export { generateGoogleText, streamGoogleText, generateGoogleObject };

15
src/ai-providers/index.js Normal file
View File

@@ -0,0 +1,15 @@
/**
* src/ai-providers/index.js
* Central export point for all AI provider classes
*/
export { AnthropicAIProvider } from './anthropic.js';
export { PerplexityAIProvider } from './perplexity.js';
export { GoogleAIProvider } from './google.js';
export { OpenAIProvider } from './openai.js';
export { XAIProvider } from './xai.js';
export { OpenRouterAIProvider } from './openrouter.js';
export { OllamaAIProvider } from './ollama.js';
export { BedrockAIProvider } from './bedrock.js';
export { AzureProvider } from './azure.js';
export { VertexAIProvider } from './google-vertex.js';

View File

@@ -4,160 +4,39 @@
*/
import { createOllama } from 'ollama-ai-provider';
import { log } from '../../scripts/modules/utils.js'; // Import logging utility
import { generateObject, generateText, streamText } from 'ai';
import { BaseAIProvider } from './base-provider.js';
// Consider making model configurable via config-manager.js later
const DEFAULT_MODEL = 'llama3'; // Or a suitable default for Ollama
const DEFAULT_TEMPERATURE = 0.2;
export class OllamaAIProvider extends BaseAIProvider {
constructor() {
super();
this.name = 'Ollama';
}
function getClient(baseUrl) {
// baseUrl is optional, defaults to http://localhost:11434
return createOllama({
baseUrl: baseUrl || undefined
});
}
/**
* Override auth validation - Ollama doesn't require API keys
* @param {object} params - Parameters to validate
*/
validateAuth(_params) {
// Ollama runs locally and doesn't require API keys
// No authentication validation needed
}
/**
* Generates text using an Ollama model.
*
* @param {object} params - Parameters for the generation.
* @param {string} params.modelId - Specific model ID to use (overrides default).
* @param {number} params.temperature - Generation temperature.
* @param {Array<object>} params.messages - The conversation history (system/user prompts).
* @param {number} [params.maxTokens] - Optional max tokens.
* @param {string} [params.baseUrl] - Optional Ollama base URL.
* @returns {Promise<string>} The generated text content.
* @throws {Error} If API call fails.
*/
async function generateOllamaText({
modelId = DEFAULT_MODEL,
messages,
maxTokens,
temperature = DEFAULT_TEMPERATURE,
baseUrl
}) {
log('info', `Generating text with Ollama model: ${modelId}`);
/**
* Creates and returns an Ollama client instance.
* @param {object} params - Parameters for client initialization
* @param {string} [params.baseURL] - Optional Ollama base URL (defaults to http://localhost:11434)
* @returns {Function} Ollama client function
* @throws {Error} If initialization fails
*/
getClient(params) {
try {
const { baseURL } = params;
try {
const client = getClient(baseUrl);
const result = await generateText({
model: client(modelId),
messages,
maxTokens,
temperature
});
log('debug', `Ollama generated text: ${result.text}`);
return {
text: result.text,
usage: {
inputTokens: result.usage.promptTokens,
outputTokens: result.usage.completionTokens
}
};
} catch (error) {
log(
'error',
`Error generating text with Ollama (${modelId}): ${error.message}`
);
throw error;
return createOllama({
...(baseURL && { baseURL })
});
} catch (error) {
this.handleError('client initialization', error);
}
}
}
/**
* Streams text using an Ollama model.
*
* @param {object} params - Parameters for the streaming.
* @param {string} params.modelId - Specific model ID to use (overrides default).
* @param {number} params.temperature - Generation temperature.
* @param {Array<object>} params.messages - The conversation history.
* @param {number} [params.maxTokens] - Optional max tokens.
* @param {string} [params.baseUrl] - Optional Ollama base URL.
* @returns {Promise<ReadableStream>} A readable stream of text deltas.
* @throws {Error} If API call fails.
*/
async function streamOllamaText({
modelId = DEFAULT_MODEL,
temperature = DEFAULT_TEMPERATURE,
messages,
maxTokens,
baseUrl
}) {
log('info', `Streaming text with Ollama model: ${modelId}`);
try {
const ollama = getClient(baseUrl);
const stream = await streamText({
model: modelId,
messages,
temperature,
maxTokens
});
return stream;
} catch (error) {
log(
'error',
`Error streaming text with Ollama (${modelId}): ${error.message}`
);
throw error;
}
}
/**
* Generates a structured object using an Ollama model using the Vercel AI SDK's generateObject.
*
* @param {object} params - Parameters for the object generation.
* @param {string} params.modelId - Specific model ID to use (overrides default).
* @param {number} params.temperature - Generation temperature.
* @param {Array<object>} params.messages - The conversation history.
* @param {import('zod').ZodSchema} params.schema - Zod schema for the expected object.
* @param {string} params.objectName - Name for the object generation context.
* @param {number} [params.maxTokens] - Optional max tokens.
* @param {number} [params.maxRetries] - Max retries for validation/generation.
* @param {string} [params.baseUrl] - Optional Ollama base URL.
* @returns {Promise<object>} The generated object matching the schema.
* @throws {Error} If generation or validation fails.
*/
async function generateOllamaObject({
modelId = DEFAULT_MODEL,
temperature = DEFAULT_TEMPERATURE,
messages,
schema,
objectName = 'generated_object',
maxTokens,
maxRetries = 3,
baseUrl
}) {
log('info', `Generating object with Ollama model: ${modelId}`);
try {
const ollama = getClient(baseUrl);
const result = await generateObject({
model: ollama(modelId),
mode: 'tool',
schema: schema,
messages: messages,
tool: {
name: objectName,
description: `Generate a ${objectName} based on the prompt.`
},
maxOutputTokens: maxTokens,
temperature: temperature,
maxRetries: maxRetries
});
return {
object: result.object,
usage: {
inputTokens: result.usage.promptTokens,
outputTokens: result.usage.completionTokens
}
};
} catch (error) {
log(
'error',
`Ollama generateObject ('${objectName}') failed: ${error.message}`
);
throw error;
}
}
export { generateOllamaText, streamOllamaText, generateOllamaObject };

View File

@@ -1,199 +1,39 @@
import { createOpenAI } from '@ai-sdk/openai'; // Using openai provider from Vercel AI SDK
import { generateObject, generateText } from 'ai'; // Import necessary functions from 'ai'
import { log } from '../../scripts/modules/utils.js';
function getClient(apiKey, baseUrl) {
if (!apiKey) {
throw new Error('OpenAI API key is required.');
}
return createOpenAI({
apiKey: apiKey,
...(baseUrl && { baseURL: baseUrl })
});
}
/**
* Generates text using OpenAI models via Vercel AI SDK.
*
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature, baseUrl.
* @returns {Promise<object>} The generated text content and usage.
* @throws {Error} If API call fails.
* openai.js
* AI provider implementation for OpenAI models using Vercel AI SDK.
*/
export async function generateOpenAIText(params) {
const { apiKey, modelId, messages, maxTokens, temperature, baseUrl } = params;
log('debug', `generateOpenAIText called with model: ${modelId}`);
if (!apiKey) {
throw new Error('OpenAI API key is required.');
}
if (!modelId) {
throw new Error('OpenAI Model ID is required.');
}
if (!messages || !Array.isArray(messages) || messages.length === 0) {
throw new Error('Invalid or empty messages array provided for OpenAI.');
import { createOpenAI } from '@ai-sdk/openai';
import { BaseAIProvider } from './base-provider.js';
export class OpenAIProvider extends BaseAIProvider {
constructor() {
super();
this.name = 'OpenAI';
}
const openaiClient = getClient(apiKey, baseUrl);
/**
* Creates and returns an OpenAI client instance.
* @param {object} params - Parameters for client initialization
* @param {string} params.apiKey - OpenAI API key
* @param {string} [params.baseURL] - Optional custom API endpoint
* @returns {Function} OpenAI client function
* @throws {Error} If API key is missing or initialization fails
*/
getClient(params) {
try {
const { apiKey, baseURL } = params;
try {
const result = await generateText({
model: openaiClient(modelId),
messages,
maxTokens,
temperature
});
if (!result || !result.text) {
log(
'warn',
'OpenAI generateText response did not contain expected content.',
{ result }
);
throw new Error('Failed to extract content from OpenAI response.');
}
log(
'debug',
`OpenAI generateText completed successfully for model: ${modelId}`
);
return {
text: result.text.trim(),
usage: {
inputTokens: result.usage.promptTokens,
outputTokens: result.usage.completionTokens
if (!apiKey) {
throw new Error('OpenAI API key is required.');
}
};
} catch (error) {
log(
'error',
`Error in generateOpenAIText (Model: ${modelId}): ${error.message}`,
{ error }
);
throw new Error(
`OpenAI API error during text generation: ${error.message}`
);
}
}
/**
* Streams text using OpenAI models via Vercel AI SDK.
*
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature, baseUrl.
* @returns {Promise<ReadableStream>} A readable stream of text deltas.
* @throws {Error} If API call fails.
*/
export async function streamOpenAIText(params) {
const { apiKey, modelId, messages, maxTokens, temperature, baseUrl } = params;
log('debug', `streamOpenAIText called with model: ${modelId}`);
if (!apiKey) {
throw new Error('OpenAI API key is required.');
}
if (!modelId) {
throw new Error('OpenAI Model ID is required.');
}
if (!messages || !Array.isArray(messages) || messages.length === 0) {
throw new Error(
'Invalid or empty messages array provided for OpenAI streaming.'
);
}
const openaiClient = getClient(apiKey, baseUrl);
try {
const stream = await openaiClient.chat.stream(messages, {
model: modelId,
max_tokens: maxTokens,
temperature
});
log(
'debug',
`OpenAI streamText initiated successfully for model: ${modelId}`
);
return stream;
} catch (error) {
log(
'error',
`Error initiating OpenAI stream (Model: ${modelId}): ${error.message}`,
{ error }
);
throw new Error(
`OpenAI API error during streaming initiation: ${error.message}`
);
}
}
/**
* Generates structured objects using OpenAI models via Vercel AI SDK.
*
* @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature, baseUrl.
* @returns {Promise<object>} The generated object matching the schema and usage.
* @throws {Error} If API call fails or object generation fails.
*/
export async function generateOpenAIObject(params) {
const {
apiKey,
modelId,
messages,
schema,
objectName,
maxTokens,
temperature,
baseUrl
} = params;
log(
'debug',
`generateOpenAIObject called with model: ${modelId}, object: ${objectName}`
);
if (!apiKey) throw new Error('OpenAI API key is required.');
if (!modelId) throw new Error('OpenAI Model ID is required.');
if (!messages || !Array.isArray(messages) || messages.length === 0)
throw new Error('Invalid messages array for OpenAI object generation.');
if (!schema)
throw new Error('Schema is required for OpenAI object generation.');
if (!objectName)
throw new Error('Object name is required for OpenAI object generation.');
const openaiClient = getClient(apiKey, baseUrl);
try {
const result = await generateObject({
model: openaiClient(modelId),
schema: schema,
messages: messages,
mode: 'tool',
maxTokens: maxTokens,
temperature: temperature
});
log(
'debug',
`OpenAI generateObject completed successfully for model: ${modelId}`
);
if (!result || typeof result.object === 'undefined') {
log(
'warn',
'OpenAI generateObject response did not contain expected object.',
{ result }
);
throw new Error('Failed to extract object from OpenAI response.');
return createOpenAI({
apiKey,
...(baseURL && { baseURL })
});
} catch (error) {
this.handleError('client initialization', error);
}
return {
object: result.object,
usage: {
inputTokens: result.usage.promptTokens,
outputTokens: result.usage.completionTokens
}
};
} catch (error) {
log(
'error',
`Error in generateOpenAIObject (Model: ${modelId}, Object: ${objectName}): ${error.message}`,
{ error }
);
throw new Error(
`OpenAI API error during object generation: ${error.message}`
);
}
}

View File

@@ -1,246 +1,39 @@
/**
* openrouter.js
* AI provider implementation for OpenRouter models using Vercel AI SDK.
*/
import { createOpenRouter } from '@openrouter/ai-sdk-provider';
import { generateText, streamText, generateObject } from 'ai';
import { log } from '../../scripts/modules/utils.js'; // Assuming utils.js is in scripts/modules
import { BaseAIProvider } from './base-provider.js';
function getClient(apiKey, baseUrl) {
if (!apiKey) throw new Error('OpenRouter API key is required.');
return createOpenRouter({
apiKey,
...(baseUrl && { baseURL: baseUrl })
});
}
export class OpenRouterAIProvider extends BaseAIProvider {
constructor() {
super();
this.name = 'OpenRouter';
}
/**
* Generates text using an OpenRouter chat model.
*
* @param {object} params - Parameters for the text generation.
* @param {string} params.apiKey - OpenRouter API key.
* @param {string} params.modelId - The OpenRouter model ID (e.g., 'anthropic/claude-3.5-sonnet').
* @param {Array<object>} params.messages - Array of message objects (system, user, assistant).
* @param {number} [params.maxTokens] - Maximum tokens to generate.
* @param {number} [params.temperature] - Sampling temperature.
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
* @returns {Promise<string>} The generated text content.
* @throws {Error} If the API call fails.
*/
async function generateOpenRouterText({
apiKey,
modelId,
messages,
maxTokens,
temperature,
baseUrl,
...rest // Capture any other Vercel AI SDK compatible parameters
}) {
if (!apiKey) throw new Error('OpenRouter API key is required.');
if (!modelId) throw new Error('OpenRouter model ID is required.');
if (!messages || messages.length === 0)
throw new Error('Messages array cannot be empty.');
/**
* Creates and returns an OpenRouter client instance.
* @param {object} params - Parameters for client initialization
* @param {string} params.apiKey - OpenRouter API key
* @param {string} [params.baseURL] - Optional custom API endpoint
* @returns {Function} OpenRouter client function
* @throws {Error} If API key is missing or initialization fails
*/
getClient(params) {
try {
const { apiKey, baseURL } = params;
try {
const openrouter = getClient(apiKey, baseUrl);
const model = openrouter.chat(modelId); // Assuming chat model
// Capture the full result from generateText
const result = await generateText({
model,
messages,
maxTokens,
temperature,
...rest // Pass any additional parameters
});
// Check if text and usage are present
if (!result || typeof result.text !== 'string') {
log(
'warn',
`OpenRouter generateText for model ${modelId} did not return expected text.`,
{ result }
);
throw new Error('Failed to extract text from OpenRouter response.');
}
if (!result.usage) {
log(
'warn',
`OpenRouter generateText for model ${modelId} did not return usage data.`,
{ result }
);
// Decide if this is critical. For now, let it pass but telemetry will be incomplete.
}
log('debug', `OpenRouter generateText completed for model ${modelId}`);
// Return text and usage
return {
text: result.text,
usage: {
inputTokens: result.usage.promptTokens,
outputTokens: result.usage.completionTokens
if (!apiKey) {
throw new Error('OpenRouter API key is required.');
}
};
} catch (error) {
let detailedMessage = `OpenRouter generateText failed for model ${modelId}: ${error.message}`;
if (error.cause) {
detailedMessage += `\n\nCause:\n\n ${typeof error.cause === 'string' ? error.cause : JSON.stringify(error.cause)}`;
return createOpenRouter({
apiKey,
...(baseURL && { baseURL })
});
} catch (error) {
this.handleError('client initialization', error);
}
// Vercel AI SDK sometimes wraps the actual API error response in error.data
if (error.data) {
detailedMessage += `\n\nData:\n\n ${JSON.stringify(error.data)}`;
}
// Log the original error object for full context if needed for deeper debugging
log('error', detailedMessage, { originalErrorObject: error });
throw error;
}
}
/**
* Streams text using an OpenRouter chat model.
*
* @param {object} params - Parameters for the text streaming.
* @param {string} params.apiKey - OpenRouter API key.
* @param {string} params.modelId - The OpenRouter model ID (e.g., 'anthropic/claude-3.5-sonnet').
* @param {Array<object>} params.messages - Array of message objects (system, user, assistant).
* @param {number} [params.maxTokens] - Maximum tokens to generate.
* @param {number} [params.temperature] - Sampling temperature.
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
* @returns {Promise<ReadableStream<string>>} A readable stream of text deltas.
* @throws {Error} If the API call fails.
*/
async function streamOpenRouterText({
apiKey,
modelId,
messages,
maxTokens,
temperature,
baseUrl,
...rest
}) {
if (!apiKey) throw new Error('OpenRouter API key is required.');
if (!modelId) throw new Error('OpenRouter model ID is required.');
if (!messages || messages.length === 0)
throw new Error('Messages array cannot be empty.');
try {
const openrouter = getClient(apiKey, baseUrl);
const model = openrouter.chat(modelId);
// Directly return the stream from the Vercel AI SDK function
const stream = await streamText({
model,
messages,
maxTokens,
temperature,
...rest
});
return stream;
} catch (error) {
let detailedMessage = `OpenRouter streamText failed for model ${modelId}: ${error.message}`;
if (error.cause) {
detailedMessage += `\n\nCause:\n\n ${typeof error.cause === 'string' ? error.cause : JSON.stringify(error.cause)}`;
}
if (error.data) {
detailedMessage += `\n\nData:\n\n ${JSON.stringify(error.data)}`;
}
log('error', detailedMessage, { originalErrorObject: error });
throw error;
}
}
/**
* Generates a structured object using an OpenRouter chat model.
*
* @param {object} params - Parameters for object generation.
* @param {string} params.apiKey - OpenRouter API key.
* @param {string} params.modelId - The OpenRouter model ID.
* @param {import('zod').ZodSchema} params.schema - The Zod schema for the expected object.
* @param {Array<object>} params.messages - Array of message objects.
* @param {string} [params.objectName='generated_object'] - Name for object/tool.
* @param {number} [params.maxRetries=3] - Max retries for object generation.
* @param {number} [params.maxTokens] - Maximum tokens.
* @param {number} [params.temperature] - Temperature.
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
* @returns {Promise<object>} The generated object matching the schema.
* @throws {Error} If the API call fails or validation fails.
*/
async function generateOpenRouterObject({
apiKey,
modelId,
schema,
messages,
objectName = 'generated_object',
maxRetries = 3,
maxTokens,
temperature,
baseUrl,
...rest
}) {
if (!apiKey) throw new Error('OpenRouter API key is required.');
if (!modelId) throw new Error('OpenRouter model ID is required.');
if (!schema) throw new Error('Zod schema is required for object generation.');
if (!messages || messages.length === 0)
throw new Error('Messages array cannot be empty.');
try {
const openrouter = getClient(apiKey, baseUrl);
const model = openrouter.chat(modelId);
// Capture the full result from generateObject
const result = await generateObject({
model,
schema,
mode: 'tool',
tool: {
name: objectName,
description: `Generate an object conforming to the ${objectName} schema.`,
parameters: schema
},
messages,
maxTokens,
temperature,
maxRetries,
...rest
});
// Check if object and usage are present
if (!result || typeof result.object === 'undefined') {
log(
'warn',
`OpenRouter generateObject for model ${modelId} did not return expected object.`,
{ result }
);
throw new Error('Failed to extract object from OpenRouter response.');
}
if (!result.usage) {
log(
'warn',
`OpenRouter generateObject for model ${modelId} did not return usage data.`,
{ result }
);
}
log('debug', `OpenRouter generateObject completed for model ${modelId}`);
// Return object and usage
return {
object: result.object,
usage: {
inputTokens: result.usage.promptTokens,
outputTokens: result.usage.completionTokens
}
};
} catch (error) {
let detailedMessage = `OpenRouter generateObject failed for model ${modelId}: ${error.message}`;
if (error.cause) {
detailedMessage += `\n\nCause:\n\n ${typeof error.cause === 'string' ? error.cause : JSON.stringify(error.cause)}`;
}
if (error.data) {
detailedMessage += `\n\nData:\n\n ${JSON.stringify(error.data)}`;
}
log('error', detailedMessage, { originalErrorObject: error });
throw error;
}
}
export {
generateOpenRouterText,
streamOpenRouterText,
generateOpenRouterObject
};

View File

@@ -1,181 +1,39 @@
/**
* src/ai-providers/perplexity.js
*
* Implementation for interacting with Perplexity models
* using the Vercel AI SDK.
* perplexity.js
* AI provider implementation for Perplexity models using Vercel AI SDK.
*/
import { createPerplexity } from '@ai-sdk/perplexity';
import { generateText, streamText, generateObject, streamObject } from 'ai';
import { log } from '../../scripts/modules/utils.js';
import { BaseAIProvider } from './base-provider.js';
// --- Client Instantiation ---
// Similar to Anthropic, this expects the resolved API key to be passed in.
function getClient(apiKey, baseUrl) {
if (!apiKey) {
throw new Error('Perplexity API key is required.');
export class PerplexityAIProvider extends BaseAIProvider {
constructor() {
super();
this.name = 'Perplexity';
}
return createPerplexity({
apiKey: apiKey,
...(baseUrl && { baseURL: baseUrl })
});
}
// --- Standardized Service Function Implementations ---
/**
* Creates and returns a Perplexity client instance.
* @param {object} params - Parameters for client initialization
* @param {string} params.apiKey - Perplexity API key
* @param {string} [params.baseURL] - Optional custom API endpoint
* @returns {Function} Perplexity client function
* @throws {Error} If API key is missing or initialization fails
*/
getClient(params) {
try {
const { apiKey, baseURL } = params;
/**
* Generates text using a Perplexity model.
*
* @param {object} params - Parameters for the text generation.
* @param {string} params.apiKey - The Perplexity API key.
* @param {string} params.modelId - The specific Perplexity model ID.
* @param {Array<object>} params.messages - The messages array.
* @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation.
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
* @returns {Promise<string>} The generated text content.
* @throws {Error} If the API call fails.
*/
export async function generatePerplexityText({
apiKey,
modelId,
messages,
maxTokens,
temperature,
baseUrl
}) {
log('debug', `Generating Perplexity text with model: ${modelId}`);
try {
const client = getClient(apiKey, baseUrl);
const result = await generateText({
model: client(modelId),
messages: messages,
maxTokens: maxTokens,
temperature: temperature
});
log(
'debug',
`Perplexity generateText result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
);
return {
text: result.text,
usage: {
inputTokens: result.usage.promptTokens,
outputTokens: result.usage.completionTokens
if (!apiKey) {
throw new Error('Perplexity API key is required.');
}
};
} catch (error) {
log('error', `Perplexity generateText failed: ${error.message}`);
throw error;
return createPerplexity({
apiKey,
baseURL: baseURL || 'https://api.perplexity.ai'
});
} catch (error) {
this.handleError('client initialization', error);
}
}
}
/**
* Streams text using a Perplexity model.
*
* @param {object} params - Parameters for the text streaming.
* @param {string} params.apiKey - The Perplexity API key.
* @param {string} params.modelId - The specific Perplexity model ID.
* @param {Array<object>} params.messages - The messages array.
* @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation.
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
* @throws {Error} If the API call fails to initiate the stream.
*/
export async function streamPerplexityText({
apiKey,
modelId,
messages,
maxTokens,
temperature,
baseUrl
}) {
log('debug', `Streaming Perplexity text with model: ${modelId}`);
try {
const client = getClient(apiKey, baseUrl);
const stream = await streamText({
model: client(modelId),
messages: messages,
maxTokens: maxTokens,
temperature: temperature
});
return stream;
} catch (error) {
log('error', `Perplexity streamText failed: ${error.message}`);
throw error;
}
}
/**
* Generates a structured object using a Perplexity model.
* Note: Perplexity API might not directly support structured object generation
* in the same way as OpenAI or Anthropic. This function might need
* adjustments or might not be feasible depending on the model's capabilities
* and the Vercel AI SDK's support for Perplexity in this context.
*
* @param {object} params - Parameters for object generation.
* @param {string} params.apiKey - The Perplexity API key.
* @param {string} params.modelId - The specific Perplexity model ID.
* @param {Array<object>} params.messages - The messages array.
* @param {import('zod').ZodSchema} params.schema - The Zod schema for the object.
* @param {string} params.objectName - A name for the object/tool.
* @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation.
* @param {number} [params.maxRetries] - Max retries for validation/generation.
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
* @returns {Promise<object>} The generated object matching the schema.
* @throws {Error} If generation or validation fails or is unsupported.
*/
export async function generatePerplexityObject({
apiKey,
modelId,
messages,
schema,
objectName = 'generated_object',
maxTokens,
temperature,
maxRetries = 1,
baseUrl
}) {
log(
'debug',
`Attempting to generate Perplexity object ('${objectName}') with model: ${modelId}`
);
log(
'warn',
'generateObject support for Perplexity might be limited or experimental.'
);
try {
const client = getClient(apiKey, baseUrl);
const result = await generateObject({
model: client(modelId),
schema: schema,
messages: messages,
maxTokens: maxTokens,
temperature: temperature,
maxRetries: maxRetries
});
log(
'debug',
`Perplexity generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
);
return {
object: result.object,
usage: {
inputTokens: result.usage.promptTokens,
outputTokens: result.usage.completionTokens
}
};
} catch (error) {
log(
'error',
`Perplexity generateObject ('${objectName}') failed: ${error.message}`
);
throw new Error(
`Failed to generate object with Perplexity: ${error.message}. Structured output might not be fully supported.`
);
}
}
// TODO: Implement streamPerplexityObject if needed and feasible.

View File

@@ -1,178 +1,39 @@
/**
* src/ai-providers/xai.js
*
* Implementation for interacting with xAI models (e.g., Grok)
* using the Vercel AI SDK.
* xai.js
* AI provider implementation for xAI models using Vercel AI SDK.
*/
import { createXai } from '@ai-sdk/xai';
import { generateText, streamText, generateObject } from 'ai'; // Only import what's used
import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible
import { BaseAIProvider } from './base-provider.js';
// --- Client Instantiation ---
function getClient(apiKey, baseUrl) {
if (!apiKey) {
throw new Error('xAI API key is required.');
export class XAIProvider extends BaseAIProvider {
constructor() {
super();
this.name = 'xAI';
}
return createXai({
apiKey: apiKey,
...(baseUrl && { baseURL: baseUrl })
});
}
// --- Standardized Service Function Implementations ---
/**
* Creates and returns an xAI client instance.
* @param {object} params - Parameters for client initialization
* @param {string} params.apiKey - xAI API key
* @param {string} [params.baseURL] - Optional custom API endpoint
* @returns {Function} xAI client function
* @throws {Error} If API key is missing or initialization fails
*/
getClient(params) {
try {
const { apiKey, baseURL } = params;
/**
* Generates text using an xAI model.
*
* @param {object} params - Parameters for the text generation.
* @param {string} params.apiKey - The xAI API key.
* @param {string} params.modelId - The specific xAI model ID (e.g., 'grok-3').
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
* @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation.
* @param {string} [params.baseUrl] - The base URL for the xAI API.
* @returns {Promise<object>} The generated text content and usage.
* @throws {Error} If the API call fails.
*/
export async function generateXaiText({
apiKey,
modelId,
messages,
maxTokens,
temperature,
baseUrl
}) {
log('debug', `Generating xAI text with model: ${modelId}`);
try {
const client = getClient(apiKey, baseUrl);
const result = await generateText({
model: client(modelId),
messages: messages,
maxTokens: maxTokens,
temperature: temperature
});
log(
'debug',
`xAI generateText result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
);
// Return text and usage
return {
text: result.text,
usage: {
inputTokens: result.usage.promptTokens,
outputTokens: result.usage.completionTokens
if (!apiKey) {
throw new Error('xAI API key is required.');
}
};
} catch (error) {
log('error', `xAI generateText failed: ${error.message}`);
throw error;
}
}
/**
* Streams text using an xAI model.
*
* @param {object} params - Parameters for the text streaming.
* @param {string} params.apiKey - The xAI API key.
* @param {string} params.modelId - The specific xAI model ID.
* @param {Array<object>} params.messages - The messages array.
* @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation.
* @param {string} [params.baseUrl] - The base URL for the xAI API.
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
* @throws {Error} If the API call fails to initiate the stream.
*/
export async function streamXaiText({
apiKey,
modelId,
messages,
maxTokens,
temperature,
baseUrl
}) {
log('debug', `Streaming xAI text with model: ${modelId}`);
try {
const client = getClient(apiKey, baseUrl);
const stream = await streamText({
model: client(modelId),
messages: messages,
maxTokens: maxTokens,
temperature: temperature
});
return stream;
} catch (error) {
log('error', `xAI streamText failed: ${error.message}`, error.stack);
throw error;
}
}
/**
* Generates a structured object using an xAI model.
* Note: Based on search results, xAI models do not currently support Object Generation.
* This function is included for structural consistency but will likely fail if called.
*
* @param {object} params - Parameters for object generation.
* @param {string} params.apiKey - The xAI API key.
* @param {string} params.modelId - The specific xAI model ID.
* @param {Array<object>} params.messages - The messages array.
* @param {import('zod').ZodSchema} params.schema - The Zod schema for the object.
* @param {string} params.objectName - A name for the object/tool.
* @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation.
* @param {number} [params.maxRetries] - Max retries for validation/generation.
* @param {string} [params.baseUrl] - The base URL for the xAI API.
* @returns {Promise<object>} The generated object matching the schema and its usage.
* @throws {Error} If generation or validation fails.
*/
export async function generateXaiObject({
apiKey,
modelId,
messages,
schema,
objectName = 'generated_xai_object',
maxTokens,
temperature,
maxRetries = 3,
baseUrl
}) {
log(
'warn',
`Attempting to generate xAI object ('${objectName}') with model: ${modelId}. This may not be supported by the provider.`
);
try {
const client = getClient(apiKey, baseUrl);
const result = await generateObject({
model: client(modelId),
// Note: mode might need adjustment if xAI ever supports object generation differently
mode: 'tool',
schema: schema,
messages: messages,
tool: {
name: objectName,
description: `Generate a ${objectName} based on the prompt.`,
parameters: schema
},
maxTokens: maxTokens,
temperature: temperature,
maxRetries: maxRetries
});
log(
'debug',
`xAI generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
);
// Return object and usage
return {
object: result.object,
usage: {
inputTokens: result.usage.promptTokens,
outputTokens: result.usage.completionTokens
}
};
} catch (error) {
log(
'error',
`xAI generateObject ('${objectName}') failed: ${error.message}. (Likely unsupported by provider)`
);
throw error;
return createXai({
apiKey,
baseURL: baseURL || 'https://api.x.ai/v1'
});
} catch (error) {
this.handleError('client initialization', error);
}
}
}