feat: add gpt-5 support (#1105)

* feat: add gpt-5 support
This commit is contained in:
Ralph Khreish
2025-08-08 14:38:44 +02:00
committed by GitHub
parent 41d1e671b1
commit 75c514cf5b
7 changed files with 329 additions and 8 deletions

View File

@@ -61,8 +61,11 @@ export class BaseAIProvider {
) {
throw new Error('Temperature must be between 0 and 1');
}
if (params.maxTokens !== undefined && params.maxTokens <= 0) {
throw new Error('maxTokens must be greater than 0');
if (params.maxTokens !== undefined) {
const maxTokens = Number(params.maxTokens);
if (!Number.isFinite(maxTokens) || maxTokens <= 0) {
throw new Error('maxTokens must be a finite number greater than 0');
}
}
}
@@ -122,6 +125,37 @@ export class BaseAIProvider {
throw new Error('getRequiredApiKeyName must be implemented by provider');
}
/**
* Determines if a model requires max_completion_tokens instead of maxTokens
* Can be overridden by providers to specify their model requirements
* @param {string} modelId - The model ID to check
* @returns {boolean} True if the model requires max_completion_tokens
*/
requiresMaxCompletionTokens(modelId) {
return false; // Default behavior - most models use maxTokens
}
/**
* Prepares token limit parameter based on model requirements
* @param {string} modelId - The model ID
* @param {number} maxTokens - The maximum tokens value
* @returns {object} Object with either maxTokens or max_completion_tokens
*/
prepareTokenParam(modelId, maxTokens) {
if (maxTokens === undefined) {
return {};
}
// Ensure maxTokens is an integer
const tokenValue = Math.floor(Number(maxTokens));
if (this.requiresMaxCompletionTokens(modelId)) {
return { max_completion_tokens: tokenValue };
} else {
return { maxTokens: tokenValue };
}
}
/**
* Generates text using the provider's model
*/
@@ -139,7 +173,7 @@ export class BaseAIProvider {
const result = await generateText({
model: client(params.modelId),
messages: params.messages,
maxTokens: params.maxTokens,
...this.prepareTokenParam(params.modelId, params.maxTokens),
temperature: params.temperature
});
@@ -175,7 +209,7 @@ export class BaseAIProvider {
const stream = await streamText({
model: client(params.modelId),
messages: params.messages,
maxTokens: params.maxTokens,
...this.prepareTokenParam(params.modelId, params.maxTokens),
temperature: params.temperature
});
@@ -216,7 +250,7 @@ export class BaseAIProvider {
messages: params.messages,
schema: zodSchema(params.schema),
mode: params.mode || 'auto',
maxTokens: params.maxTokens,
...this.prepareTokenParam(params.modelId, params.maxTokens),
temperature: params.temperature
});