feat(ai-client-factory): Add xAI and OpenRouter provider support, enhance tests
- Integrate for Grok models and for OpenRouter into the AI client factory (). - Install necessary provider dependencies (, , and other related packages, updated core). - Update environment variable checks () and client creation logic () for the new providers. - Add and correct unit tests in to cover xAI and OpenRouter instantiation, error handling, and environment variable resolution. - Corrected mock paths and names in tests to align with official package names. - Verify all tests (28 total) pass for . - Confirm test coverage remains high (~90%) after additions.
This commit is contained in:
226
package-lock.json
generated
226
package-lock.json
generated
@@ -9,8 +9,16 @@
|
|||||||
"version": "0.11.0",
|
"version": "0.11.0",
|
||||||
"license": "MIT WITH Commons-Clause",
|
"license": "MIT WITH Commons-Clause",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
|
"@ai-sdk/anthropic": "^1.2.10",
|
||||||
|
"@ai-sdk/azure": "^1.3.17",
|
||||||
|
"@ai-sdk/google": "^1.2.12",
|
||||||
|
"@ai-sdk/mistral": "^1.2.7",
|
||||||
|
"@ai-sdk/openai": "^1.3.16",
|
||||||
|
"@ai-sdk/perplexity": "^1.1.7",
|
||||||
|
"@ai-sdk/xai": "^1.2.13",
|
||||||
"@anthropic-ai/sdk": "^0.39.0",
|
"@anthropic-ai/sdk": "^0.39.0",
|
||||||
"ai": "^4.3.6",
|
"@openrouter/ai-sdk-provider": "^0.4.5",
|
||||||
|
"ai": "^4.3.9",
|
||||||
"boxen": "^8.0.1",
|
"boxen": "^8.0.1",
|
||||||
"chalk": "^4.1.2",
|
"chalk": "^4.1.2",
|
||||||
"cli-table3": "^0.6.5",
|
"cli-table3": "^0.6.5",
|
||||||
@@ -26,6 +34,7 @@
|
|||||||
"inquirer": "^12.5.0",
|
"inquirer": "^12.5.0",
|
||||||
"jsonwebtoken": "^9.0.2",
|
"jsonwebtoken": "^9.0.2",
|
||||||
"lru-cache": "^10.2.0",
|
"lru-cache": "^10.2.0",
|
||||||
|
"ollama-ai-provider": "^1.2.0",
|
||||||
"openai": "^4.89.0",
|
"openai": "^4.89.0",
|
||||||
"ora": "^8.2.0",
|
"ora": "^8.2.0",
|
||||||
"uuid": "^11.1.0"
|
"uuid": "^11.1.0"
|
||||||
@@ -48,6 +57,119 @@
|
|||||||
"node": ">=14.0.0"
|
"node": ">=14.0.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/@ai-sdk/anthropic": {
|
||||||
|
"version": "1.2.10",
|
||||||
|
"resolved": "https://registry.npmjs.org/@ai-sdk/anthropic/-/anthropic-1.2.10.tgz",
|
||||||
|
"integrity": "sha512-PyE7EC2fPjs9DnzRAHDrPQmcnI2m2Eojr8pfhckOejOlDEh2w7NnSJr1W3qe5hUWzKr+6d7NG1ZKR9fhmpDdEQ==",
|
||||||
|
"license": "Apache-2.0",
|
||||||
|
"dependencies": {
|
||||||
|
"@ai-sdk/provider": "1.1.3",
|
||||||
|
"@ai-sdk/provider-utils": "2.2.7"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=18"
|
||||||
|
},
|
||||||
|
"peerDependencies": {
|
||||||
|
"zod": "^3.0.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@ai-sdk/azure": {
|
||||||
|
"version": "1.3.17",
|
||||||
|
"resolved": "https://registry.npmjs.org/@ai-sdk/azure/-/azure-1.3.17.tgz",
|
||||||
|
"integrity": "sha512-uGCQ7q81S3mY1EmH2mrsysc/Qw9czMiNTJDr5fc5ocDnHS89rbiaNUdBbdYpjS471EEa2Rcrx2FTCGiQ0gTPDQ==",
|
||||||
|
"license": "Apache-2.0",
|
||||||
|
"dependencies": {
|
||||||
|
"@ai-sdk/openai": "1.3.16",
|
||||||
|
"@ai-sdk/provider": "1.1.3",
|
||||||
|
"@ai-sdk/provider-utils": "2.2.7"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=18"
|
||||||
|
},
|
||||||
|
"peerDependencies": {
|
||||||
|
"zod": "^3.0.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@ai-sdk/google": {
|
||||||
|
"version": "1.2.12",
|
||||||
|
"resolved": "https://registry.npmjs.org/@ai-sdk/google/-/google-1.2.12.tgz",
|
||||||
|
"integrity": "sha512-A8AYqCmBs9SJFiAOP6AX0YEDHWTDrCaUDiRY2cdMSKjJiEknvwnPrAAKf3idgVqYaM2kS0qWz5v9v4pBzXDx+w==",
|
||||||
|
"license": "Apache-2.0",
|
||||||
|
"dependencies": {
|
||||||
|
"@ai-sdk/provider": "1.1.3",
|
||||||
|
"@ai-sdk/provider-utils": "2.2.7"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=18"
|
||||||
|
},
|
||||||
|
"peerDependencies": {
|
||||||
|
"zod": "^3.0.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@ai-sdk/mistral": {
|
||||||
|
"version": "1.2.7",
|
||||||
|
"resolved": "https://registry.npmjs.org/@ai-sdk/mistral/-/mistral-1.2.7.tgz",
|
||||||
|
"integrity": "sha512-MbOMGfnHKcsvjbv4d6OT7Oaz+Wp4jD8yityqC4hASoKoW1s7L52woz25ES8RgAgTRlfbEZ3MOxEzLu58I228bQ==",
|
||||||
|
"license": "Apache-2.0",
|
||||||
|
"dependencies": {
|
||||||
|
"@ai-sdk/provider": "1.1.3",
|
||||||
|
"@ai-sdk/provider-utils": "2.2.7"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=18"
|
||||||
|
},
|
||||||
|
"peerDependencies": {
|
||||||
|
"zod": "^3.0.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@ai-sdk/openai": {
|
||||||
|
"version": "1.3.16",
|
||||||
|
"resolved": "https://registry.npmjs.org/@ai-sdk/openai/-/openai-1.3.16.tgz",
|
||||||
|
"integrity": "sha512-pjtiBKt1GgaSKZryTbM3tqgoegJwgAUlp1+X5uN6T+VPnI4FLSymV65tyloWzDlyqZmi9HXnnSRPu76VoL5D5g==",
|
||||||
|
"license": "Apache-2.0",
|
||||||
|
"dependencies": {
|
||||||
|
"@ai-sdk/provider": "1.1.3",
|
||||||
|
"@ai-sdk/provider-utils": "2.2.7"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=18"
|
||||||
|
},
|
||||||
|
"peerDependencies": {
|
||||||
|
"zod": "^3.0.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@ai-sdk/openai-compatible": {
|
||||||
|
"version": "0.2.11",
|
||||||
|
"resolved": "https://registry.npmjs.org/@ai-sdk/openai-compatible/-/openai-compatible-0.2.11.tgz",
|
||||||
|
"integrity": "sha512-56U0uNCcFTygA4h6R/uREv8r5sKA3/pGkpIAnMOpRzs5wiARlTYakWW3LZgxg6D4Gpeswo4gwNJczB7nM0K1Qg==",
|
||||||
|
"license": "Apache-2.0",
|
||||||
|
"dependencies": {
|
||||||
|
"@ai-sdk/provider": "1.1.3",
|
||||||
|
"@ai-sdk/provider-utils": "2.2.7"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=18"
|
||||||
|
},
|
||||||
|
"peerDependencies": {
|
||||||
|
"zod": "^3.0.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@ai-sdk/perplexity": {
|
||||||
|
"version": "1.1.7",
|
||||||
|
"resolved": "https://registry.npmjs.org/@ai-sdk/perplexity/-/perplexity-1.1.7.tgz",
|
||||||
|
"integrity": "sha512-FH2zEADLU/NTuRkQXMbZkUZ0qSsJ5qhufQ+7IsFMuhhKShGt0M8gOZlnkxuolnIjDrOdD3r1r59nZKMsFHuwqw==",
|
||||||
|
"license": "Apache-2.0",
|
||||||
|
"dependencies": {
|
||||||
|
"@ai-sdk/provider": "1.1.3",
|
||||||
|
"@ai-sdk/provider-utils": "2.2.7"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=18"
|
||||||
|
},
|
||||||
|
"peerDependencies": {
|
||||||
|
"zod": "^3.0.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/@ai-sdk/provider": {
|
"node_modules/@ai-sdk/provider": {
|
||||||
"version": "1.1.3",
|
"version": "1.1.3",
|
||||||
"resolved": "https://registry.npmjs.org/@ai-sdk/provider/-/provider-1.1.3.tgz",
|
"resolved": "https://registry.npmjs.org/@ai-sdk/provider/-/provider-1.1.3.tgz",
|
||||||
@@ -118,6 +240,23 @@
|
|||||||
"zod": "^3.23.8"
|
"zod": "^3.23.8"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/@ai-sdk/xai": {
|
||||||
|
"version": "1.2.13",
|
||||||
|
"resolved": "https://registry.npmjs.org/@ai-sdk/xai/-/xai-1.2.13.tgz",
|
||||||
|
"integrity": "sha512-vJnzpnRVIVuGgDHrHgfIc3ImjVp6YN+salVX99r+HWd2itiGQy+vAmQKen0Ml8BK/avnLyQneeYRfdlgDBkhgQ==",
|
||||||
|
"license": "Apache-2.0",
|
||||||
|
"dependencies": {
|
||||||
|
"@ai-sdk/openai-compatible": "0.2.11",
|
||||||
|
"@ai-sdk/provider": "1.1.3",
|
||||||
|
"@ai-sdk/provider-utils": "2.2.7"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=18"
|
||||||
|
},
|
||||||
|
"peerDependencies": {
|
||||||
|
"zod": "^3.0.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/@ampproject/remapping": {
|
"node_modules/@ampproject/remapping": {
|
||||||
"version": "2.3.0",
|
"version": "2.3.0",
|
||||||
"resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz",
|
"resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz",
|
||||||
@@ -2250,6 +2389,57 @@
|
|||||||
"node": ">= 8"
|
"node": ">= 8"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/@openrouter/ai-sdk-provider": {
|
||||||
|
"version": "0.4.5",
|
||||||
|
"resolved": "https://registry.npmjs.org/@openrouter/ai-sdk-provider/-/ai-sdk-provider-0.4.5.tgz",
|
||||||
|
"integrity": "sha512-gbCOcSjNhyWlLHyYZX2rIFnpJi3C2RXNyyzJj+d6pMRfTS/mdvEEOsU66KxK9H8Qju2i9YRLOn/FdQT26K7bIQ==",
|
||||||
|
"license": "Apache-2.0",
|
||||||
|
"dependencies": {
|
||||||
|
"@ai-sdk/provider": "1.0.9",
|
||||||
|
"@ai-sdk/provider-utils": "2.1.10"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=18"
|
||||||
|
},
|
||||||
|
"peerDependencies": {
|
||||||
|
"zod": "^3.0.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@openrouter/ai-sdk-provider/node_modules/@ai-sdk/provider": {
|
||||||
|
"version": "1.0.9",
|
||||||
|
"resolved": "https://registry.npmjs.org/@ai-sdk/provider/-/provider-1.0.9.tgz",
|
||||||
|
"integrity": "sha512-jie6ZJT2ZR0uVOVCDc9R2xCX5I/Dum/wEK28lx21PJx6ZnFAN9EzD2WsPhcDWfCgGx3OAZZ0GyM3CEobXpa9LA==",
|
||||||
|
"license": "Apache-2.0",
|
||||||
|
"dependencies": {
|
||||||
|
"json-schema": "^0.4.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=18"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@openrouter/ai-sdk-provider/node_modules/@ai-sdk/provider-utils": {
|
||||||
|
"version": "2.1.10",
|
||||||
|
"resolved": "https://registry.npmjs.org/@ai-sdk/provider-utils/-/provider-utils-2.1.10.tgz",
|
||||||
|
"integrity": "sha512-4GZ8GHjOFxePFzkl3q42AU0DQOtTQ5w09vmaWUf/pKFXJPizlnzKSUkF0f+VkapIUfDugyMqPMT1ge8XQzVI7Q==",
|
||||||
|
"license": "Apache-2.0",
|
||||||
|
"dependencies": {
|
||||||
|
"@ai-sdk/provider": "1.0.9",
|
||||||
|
"eventsource-parser": "^3.0.0",
|
||||||
|
"nanoid": "^3.3.8",
|
||||||
|
"secure-json-parse": "^2.7.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=18"
|
||||||
|
},
|
||||||
|
"peerDependencies": {
|
||||||
|
"zod": "^3.0.0"
|
||||||
|
},
|
||||||
|
"peerDependenciesMeta": {
|
||||||
|
"zod": {
|
||||||
|
"optional": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/@opentelemetry/api": {
|
"node_modules/@opentelemetry/api": {
|
||||||
"version": "1.9.0",
|
"version": "1.9.0",
|
||||||
"resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.0.tgz",
|
"resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.0.tgz",
|
||||||
@@ -2514,9 +2704,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/ai": {
|
"node_modules/ai": {
|
||||||
"version": "4.3.6",
|
"version": "4.3.9",
|
||||||
"resolved": "https://registry.npmjs.org/ai/-/ai-4.3.6.tgz",
|
"resolved": "https://registry.npmjs.org/ai/-/ai-4.3.9.tgz",
|
||||||
"integrity": "sha512-cRL/9zFfPRRfVUOk+ll5FHy08FVc692voFzXWJ2YPD9KS+mkjDPp72QT9Etr0ZD/mdlJZHYq4ZHIts7nRpdD6A==",
|
"integrity": "sha512-P2RpV65sWIPdUlA4f1pcJ11pB0N1YmqPVLEmC4j8WuBwKY0L3q9vGhYPh0Iv+spKHKyn0wUbMfas+7Z6nTfS0g==",
|
||||||
"license": "Apache-2.0",
|
"license": "Apache-2.0",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@ai-sdk/provider": "1.1.3",
|
"@ai-sdk/provider": "1.1.3",
|
||||||
@@ -6460,6 +6650,28 @@
|
|||||||
"url": "https://github.com/sponsors/ljharb"
|
"url": "https://github.com/sponsors/ljharb"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/ollama-ai-provider": {
|
||||||
|
"version": "1.2.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/ollama-ai-provider/-/ollama-ai-provider-1.2.0.tgz",
|
||||||
|
"integrity": "sha512-jTNFruwe3O/ruJeppI/quoOUxG7NA6blG3ZyQj3lei4+NnJo7bi3eIRWqlVpRlu/mbzbFXeJSBuYQWF6pzGKww==",
|
||||||
|
"license": "Apache-2.0",
|
||||||
|
"dependencies": {
|
||||||
|
"@ai-sdk/provider": "^1.0.0",
|
||||||
|
"@ai-sdk/provider-utils": "^2.0.0",
|
||||||
|
"partial-json": "0.1.7"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=18"
|
||||||
|
},
|
||||||
|
"peerDependencies": {
|
||||||
|
"zod": "^3.0.0"
|
||||||
|
},
|
||||||
|
"peerDependenciesMeta": {
|
||||||
|
"zod": {
|
||||||
|
"optional": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/on-finished": {
|
"node_modules/on-finished": {
|
||||||
"version": "2.4.1",
|
"version": "2.4.1",
|
||||||
"resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
|
"resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
|
||||||
@@ -6705,6 +6917,12 @@
|
|||||||
"node": ">= 0.8"
|
"node": ">= 0.8"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/partial-json": {
|
||||||
|
"version": "0.1.7",
|
||||||
|
"resolved": "https://registry.npmjs.org/partial-json/-/partial-json-0.1.7.tgz",
|
||||||
|
"integrity": "sha512-Njv/59hHaokb/hRUjce3Hdv12wd60MtM9Z5Olmn+nehe0QDAsRtRbJPvJ0Z91TusF0SuZRIvnM+S4l6EIP8leA==",
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
"node_modules/path-exists": {
|
"node_modules/path-exists": {
|
||||||
"version": "4.0.0",
|
"version": "4.0.0",
|
||||||
"resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
|
"resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
|
||||||
|
|||||||
11
package.json
11
package.json
@@ -38,8 +38,16 @@
|
|||||||
"author": "Eyal Toledano",
|
"author": "Eyal Toledano",
|
||||||
"license": "MIT WITH Commons-Clause",
|
"license": "MIT WITH Commons-Clause",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
|
"@ai-sdk/anthropic": "^1.2.10",
|
||||||
|
"@ai-sdk/azure": "^1.3.17",
|
||||||
|
"@ai-sdk/google": "^1.2.12",
|
||||||
|
"@ai-sdk/mistral": "^1.2.7",
|
||||||
|
"@ai-sdk/openai": "^1.3.16",
|
||||||
|
"@ai-sdk/perplexity": "^1.1.7",
|
||||||
|
"@ai-sdk/xai": "^1.2.13",
|
||||||
"@anthropic-ai/sdk": "^0.39.0",
|
"@anthropic-ai/sdk": "^0.39.0",
|
||||||
"ai": "^4.3.6",
|
"@openrouter/ai-sdk-provider": "^0.4.5",
|
||||||
|
"ai": "^4.3.9",
|
||||||
"boxen": "^8.0.1",
|
"boxen": "^8.0.1",
|
||||||
"chalk": "^4.1.2",
|
"chalk": "^4.1.2",
|
||||||
"cli-table3": "^0.6.5",
|
"cli-table3": "^0.6.5",
|
||||||
@@ -55,6 +63,7 @@
|
|||||||
"inquirer": "^12.5.0",
|
"inquirer": "^12.5.0",
|
||||||
"jsonwebtoken": "^9.0.2",
|
"jsonwebtoken": "^9.0.2",
|
||||||
"lru-cache": "^10.2.0",
|
"lru-cache": "^10.2.0",
|
||||||
|
"ollama-ai-provider": "^1.2.0",
|
||||||
"openai": "^4.89.0",
|
"openai": "^4.89.0",
|
||||||
"ora": "^8.2.0",
|
"ora": "^8.2.0",
|
||||||
"uuid": "^11.1.0"
|
"uuid": "^11.1.0"
|
||||||
|
|||||||
348
scripts/modules/ai-client-factory.js
Normal file
348
scripts/modules/ai-client-factory.js
Normal file
@@ -0,0 +1,348 @@
|
|||||||
|
import fs from 'fs';
|
||||||
|
import path from 'path';
|
||||||
|
import { createOpenAI } from '@ai-sdk/openai';
|
||||||
|
import { createAnthropic } from '@ai-sdk/anthropic';
|
||||||
|
import { createGoogle } from '@ai-sdk/google';
|
||||||
|
import { createPerplexity } from '@ai-sdk/perplexity';
|
||||||
|
import { createOllama } from 'ollama-ai-provider';
|
||||||
|
import { createMistral } from '@ai-sdk/mistral';
|
||||||
|
import { createAzure } from '@ai-sdk/azure';
|
||||||
|
import { createXai } from '@ai-sdk/xai';
|
||||||
|
import { createOpenRouter } from '@openrouter/ai-sdk-provider';
|
||||||
|
// TODO: Add imports for other supported providers like OpenRouter, Grok
|
||||||
|
|
||||||
|
import {
|
||||||
|
getProviderAndModelForRole,
|
||||||
|
findProjectRoot // Assuming config-manager exports this
|
||||||
|
} from './config-manager.js';
|
||||||
|
|
||||||
|
const clientCache = new Map();
|
||||||
|
|
||||||
|
// Using a Symbol for a unique, unmistakable value
|
||||||
|
const VALIDATION_SKIPPED = Symbol('validation_skipped');
|
||||||
|
|
||||||
|
// --- Load Supported Models Data (Lazily) ---
|
||||||
|
let supportedModelsData = null;
|
||||||
|
let modelsDataLoaded = false;
|
||||||
|
|
||||||
|
function loadSupportedModelsData() {
|
||||||
|
console.log(
|
||||||
|
`DEBUG: loadSupportedModelsData called. modelsDataLoaded=${modelsDataLoaded}`
|
||||||
|
);
|
||||||
|
if (modelsDataLoaded) {
|
||||||
|
console.log('DEBUG: Returning cached supported models data.');
|
||||||
|
return supportedModelsData;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
const projectRoot = findProjectRoot(process.cwd());
|
||||||
|
const supportedModelsPath = path.join(
|
||||||
|
projectRoot,
|
||||||
|
'data',
|
||||||
|
'supported-models.json'
|
||||||
|
);
|
||||||
|
console.log(
|
||||||
|
`DEBUG: Checking for supported models at: ${supportedModelsPath}`
|
||||||
|
);
|
||||||
|
const exists = fs.existsSync(supportedModelsPath);
|
||||||
|
console.log(`DEBUG: fs.existsSync result: ${exists}`);
|
||||||
|
|
||||||
|
if (exists) {
|
||||||
|
const fileContent = fs.readFileSync(supportedModelsPath, 'utf-8');
|
||||||
|
supportedModelsData = JSON.parse(fileContent);
|
||||||
|
console.log(
|
||||||
|
'DEBUG: Successfully loaded and parsed supported-models.json'
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
console.warn(
|
||||||
|
`Warning: Could not find supported models file at ${supportedModelsPath}. Skipping model validation.`
|
||||||
|
);
|
||||||
|
supportedModelsData = {}; // Treat as empty if not found, allowing skip
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error(
|
||||||
|
`Error loading or parsing supported models file: ${error.message}`
|
||||||
|
);
|
||||||
|
console.error('Stack Trace:', error.stack);
|
||||||
|
supportedModelsData = {}; // Treat as empty on error, allowing skip
|
||||||
|
}
|
||||||
|
modelsDataLoaded = true;
|
||||||
|
console.log(
|
||||||
|
`DEBUG: Setting modelsDataLoaded=true, returning: ${JSON.stringify(supportedModelsData)}`
|
||||||
|
);
|
||||||
|
return supportedModelsData;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validates if a model is supported for a given provider and role.
|
||||||
|
* @param {string} providerName - The name of the provider.
|
||||||
|
* @param {string} modelId - The ID of the model.
|
||||||
|
* @param {string} role - The role ('main', 'research', 'fallback').
|
||||||
|
* @returns {boolean|Symbol} True if valid, false if invalid, VALIDATION_SKIPPED if data was missing.
|
||||||
|
*/
|
||||||
|
function isModelSupportedAndAllowed(providerName, modelId, role) {
|
||||||
|
const modelsData = loadSupportedModelsData();
|
||||||
|
|
||||||
|
if (
|
||||||
|
!modelsData ||
|
||||||
|
typeof modelsData !== 'object' ||
|
||||||
|
Object.keys(modelsData).length === 0
|
||||||
|
) {
|
||||||
|
console.warn(
|
||||||
|
'Skipping model validation as supported models data is unavailable or invalid.'
|
||||||
|
);
|
||||||
|
// Return the specific symbol instead of true
|
||||||
|
return VALIDATION_SKIPPED;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure consistent casing for provider lookup
|
||||||
|
const providerKey = providerName?.toLowerCase();
|
||||||
|
if (!providerKey || !modelsData.hasOwnProperty(providerKey)) {
|
||||||
|
console.warn(
|
||||||
|
`Provider '${providerName}' not found in supported-models.json.`
|
||||||
|
);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
const providerModels = modelsData[providerKey];
|
||||||
|
if (!Array.isArray(providerModels)) {
|
||||||
|
console.warn(
|
||||||
|
`Invalid format for provider '${providerName}' models in supported-models.json. Expected an array.`
|
||||||
|
);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
const modelInfo = providerModels.find((m) => m && m.id === modelId);
|
||||||
|
if (!modelInfo) {
|
||||||
|
console.warn(
|
||||||
|
`Model '${modelId}' not found for provider '${providerName}' in supported-models.json.`
|
||||||
|
);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the role is allowed for this model
|
||||||
|
if (!Array.isArray(modelInfo.allowed_roles)) {
|
||||||
|
console.warn(
|
||||||
|
`Model '${modelId}' (Provider: '${providerName}') has invalid or missing 'allowed_roles' array in supported-models.json.`
|
||||||
|
);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
const isAllowed = modelInfo.allowed_roles.includes(role);
|
||||||
|
if (!isAllowed) {
|
||||||
|
console.warn(
|
||||||
|
`Role '${role}' is not allowed for model '${modelId}' (Provider: '${providerName}'). Allowed roles: ${modelInfo.allowed_roles.join(', ')}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return isAllowed;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Resolves an environment variable by checking process.env first, then session.env.
|
||||||
|
* @param {string} varName - The name of the environment variable.
|
||||||
|
* @param {object|null} session - The MCP session object (optional).
|
||||||
|
* @returns {string|undefined} The value of the environment variable or undefined if not found.
|
||||||
|
*/
|
||||||
|
function resolveEnvVariable(varName, session) {
|
||||||
|
return process.env[varName] ?? session?.env?.[varName];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validates if the required environment variables are set for a given provider,
|
||||||
|
* checking process.env and falling back to session.env.
|
||||||
|
* Throws an error if any required variable is missing.
|
||||||
|
* @param {string} providerName - The name of the provider (e.g., 'openai', 'anthropic').
|
||||||
|
* @param {object|null} session - The MCP session object (optional).
|
||||||
|
*/
|
||||||
|
function validateEnvironment(providerName, session) {
|
||||||
|
// Define requirements based on the provider
|
||||||
|
const requirements = {
|
||||||
|
openai: ['OPENAI_API_KEY'],
|
||||||
|
anthropic: ['ANTHROPIC_API_KEY'],
|
||||||
|
google: ['GOOGLE_API_KEY'],
|
||||||
|
perplexity: ['PERPLEXITY_API_KEY'],
|
||||||
|
ollama: ['OLLAMA_BASE_URL'], // Ollama only needs Base URL typically
|
||||||
|
mistral: ['MISTRAL_API_KEY'],
|
||||||
|
azure: ['AZURE_OPENAI_API_KEY', 'AZURE_OPENAI_ENDPOINT'],
|
||||||
|
openrouter: ['OPENROUTER_API_KEY'],
|
||||||
|
xai: ['XAI_API_KEY']
|
||||||
|
// Add requirements for other providers
|
||||||
|
};
|
||||||
|
|
||||||
|
const providerKey = providerName?.toLowerCase();
|
||||||
|
if (!providerKey || !requirements[providerKey]) {
|
||||||
|
// If the provider itself isn't in our requirements list, we can't validate.
|
||||||
|
// This might happen if config has an unsupported provider. Validation should happen earlier.
|
||||||
|
// Or, we could throw an error here if the provider is unknown.
|
||||||
|
console.warn(
|
||||||
|
`Cannot validate environment for unknown or unsupported provider: ${providerName}`
|
||||||
|
);
|
||||||
|
return; // Proceed without validation for unknown providers
|
||||||
|
}
|
||||||
|
|
||||||
|
const missing =
|
||||||
|
requirements[providerKey]?.filter(
|
||||||
|
(envVar) => !resolveEnvVariable(envVar, session)
|
||||||
|
) || [];
|
||||||
|
|
||||||
|
if (missing.length > 0) {
|
||||||
|
throw new Error(
|
||||||
|
`Missing environment variables for provider '${providerName}': ${missing.join(', ')}. Please check your .env file or session configuration.`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates an AI client instance for the specified provider.
|
||||||
|
* Assumes environment validation has already passed.
|
||||||
|
* @param {string} providerName - The name of the provider.
|
||||||
|
* @param {object|null} session - The MCP session object (optional).
|
||||||
|
* @param {object} [options={}] - Additional options for the client creation (e.g., model).
|
||||||
|
* @returns {object} The created AI client instance.
|
||||||
|
* @throws {Error} If the provider is unsupported.
|
||||||
|
*/
|
||||||
|
function createClientInstance(providerName, session, options = {}) {
|
||||||
|
// Validation is now done before calling this function
|
||||||
|
const getEnv = (varName) => resolveEnvVariable(varName, session);
|
||||||
|
|
||||||
|
switch (providerName?.toLowerCase()) {
|
||||||
|
case 'openai':
|
||||||
|
return createOpenAI({ apiKey: getEnv('OPENAI_API_KEY'), ...options });
|
||||||
|
case 'anthropic':
|
||||||
|
return createAnthropic({
|
||||||
|
apiKey: getEnv('ANTHROPIC_API_KEY'),
|
||||||
|
...options
|
||||||
|
});
|
||||||
|
case 'google':
|
||||||
|
return createGoogle({ apiKey: getEnv('GOOGLE_API_KEY'), ...options });
|
||||||
|
case 'perplexity':
|
||||||
|
return createPerplexity({
|
||||||
|
apiKey: getEnv('PERPLEXITY_API_KEY'),
|
||||||
|
...options
|
||||||
|
});
|
||||||
|
case 'ollama':
|
||||||
|
const ollamaBaseUrl =
|
||||||
|
getEnv('OLLAMA_BASE_URL') || 'http://localhost:11434/api'; // Default from ollama-ai-provider docs
|
||||||
|
// ollama-ai-provider uses baseURL directly
|
||||||
|
return createOllama({ baseURL: ollamaBaseUrl, ...options });
|
||||||
|
case 'mistral':
|
||||||
|
return createMistral({ apiKey: getEnv('MISTRAL_API_KEY'), ...options });
|
||||||
|
case 'azure':
|
||||||
|
return createAzure({
|
||||||
|
apiKey: getEnv('AZURE_OPENAI_API_KEY'),
|
||||||
|
endpoint: getEnv('AZURE_OPENAI_ENDPOINT'),
|
||||||
|
...(options.model && { deploymentName: options.model }), // Azure often uses deployment name
|
||||||
|
...options
|
||||||
|
});
|
||||||
|
case 'openrouter':
|
||||||
|
return createOpenRouter({
|
||||||
|
apiKey: getEnv('OPENROUTER_API_KEY'),
|
||||||
|
...options
|
||||||
|
});
|
||||||
|
case 'xai':
|
||||||
|
return createXai({ apiKey: getEnv('XAI_API_KEY'), ...options });
|
||||||
|
// TODO: Add cases for OpenRouter, Grok
|
||||||
|
default:
|
||||||
|
throw new Error(`Unsupported AI provider specified: ${providerName}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets or creates an AI client instance based on the configured model for a specific role.
|
||||||
|
* Validates the configured model against supported models and role allowances.
|
||||||
|
* @param {string} role - The role ('main', 'research', or 'fallback').
|
||||||
|
* @param {object|null} [session=null] - The MCP session object (optional).
|
||||||
|
* @param {object} [overrideOptions={}] - Optional overrides for { provider, modelId }.
|
||||||
|
* @returns {object} The cached or newly created AI client instance.
|
||||||
|
* @throws {Error} If configuration is missing, invalid, or environment validation fails.
|
||||||
|
*/
|
||||||
|
export function getClient(role, session = null, overrideOptions = {}) {
|
||||||
|
if (!role) {
|
||||||
|
throw new Error(
|
||||||
|
`Client role ('main', 'research', 'fallback') must be specified.`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1. Determine Provider and Model ID
|
||||||
|
let providerName = overrideOptions.provider;
|
||||||
|
let modelId = overrideOptions.modelId;
|
||||||
|
|
||||||
|
if (!providerName || !modelId) {
|
||||||
|
// If not fully overridden, get from config
|
||||||
|
try {
|
||||||
|
const config = getProviderAndModelForRole(role); // Fetch from config manager
|
||||||
|
providerName = providerName || config.provider;
|
||||||
|
modelId = modelId || config.modelId;
|
||||||
|
} catch (configError) {
|
||||||
|
throw new Error(
|
||||||
|
`Failed to get configuration for role '${role}': ${configError.message}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!providerName || !modelId) {
|
||||||
|
throw new Error(
|
||||||
|
`Could not determine provider or modelId for role '${role}' from configuration or overrides.`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Validate Provider/Model Combination and Role Allowance
|
||||||
|
const validationResult = isModelSupportedAndAllowed(
|
||||||
|
providerName,
|
||||||
|
modelId,
|
||||||
|
role
|
||||||
|
);
|
||||||
|
|
||||||
|
// Only throw if validation explicitly returned false (meaning invalid/disallowed)
|
||||||
|
// If it returned VALIDATION_SKIPPED, we proceed but skip strict validation.
|
||||||
|
if (validationResult === false) {
|
||||||
|
throw new Error(
|
||||||
|
`Model '${modelId}' from provider '${providerName}' is either not supported or not allowed for the '${role}' role. Check supported-models.json and your .taskmasterconfig.`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
// Note: If validationResult === VALIDATION_SKIPPED, we continue to env validation
|
||||||
|
|
||||||
|
// 3. Validate Environment Variables for the chosen provider
|
||||||
|
try {
|
||||||
|
validateEnvironment(providerName, session);
|
||||||
|
} catch (envError) {
|
||||||
|
// Re-throw the original environment error for clearer test messages
|
||||||
|
throw envError;
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. Check Cache
|
||||||
|
const cacheKey = `${providerName.toLowerCase()}:${modelId}`;
|
||||||
|
if (clientCache.has(cacheKey)) {
|
||||||
|
return clientCache.get(cacheKey);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5. Create New Client Instance
|
||||||
|
console.log(
|
||||||
|
`Creating new client for role '${role}': Provider=${providerName}, Model=${modelId}`
|
||||||
|
);
|
||||||
|
try {
|
||||||
|
const clientInstance = createClientInstance(providerName, session, {
|
||||||
|
model: modelId
|
||||||
|
});
|
||||||
|
|
||||||
|
clientCache.set(cacheKey, clientInstance);
|
||||||
|
return clientInstance;
|
||||||
|
} catch (creationError) {
|
||||||
|
throw new Error(
|
||||||
|
`Failed to create client instance for provider '${providerName}' (role: '${role}'): ${creationError.message}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Optional: Function to clear the cache if needed
|
||||||
|
export function clearClientCache() {
|
||||||
|
clientCache.clear();
|
||||||
|
console.log('AI client cache cleared.');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exported for testing purposes only
|
||||||
|
export function _resetSupportedModelsCache() {
|
||||||
|
console.log('DEBUG: Resetting supported models cache...');
|
||||||
|
supportedModelsData = null;
|
||||||
|
modelsDataLoaded = false;
|
||||||
|
console.log('DEBUG: Supported models cache reset.');
|
||||||
|
}
|
||||||
@@ -271,7 +271,7 @@ The configuration management module should be updated to:
|
|||||||
7. Implement command output formatting for model listings
|
7. Implement command output formatting for model listings
|
||||||
8. Testing approach: Create integration tests that verify CLI commands correctly interact with the configuration manager
|
8. Testing approach: Create integration tests that verify CLI commands correctly interact with the configuration manager
|
||||||
|
|
||||||
## 3. Integrate Vercel AI SDK and Create Client Factory [pending]
|
## 3. Integrate Vercel AI SDK and Create Client Factory [done]
|
||||||
### Dependencies: 61.1
|
### Dependencies: 61.1
|
||||||
### Description: Set up Vercel AI SDK integration and implement a client factory pattern to create and manage AI model clients.
|
### Description: Set up Vercel AI SDK integration and implement a client factory pattern to create and manage AI model clients.
|
||||||
### Details:
|
### Details:
|
||||||
|
|||||||
@@ -2775,7 +2775,7 @@
|
|||||||
1
|
1
|
||||||
],
|
],
|
||||||
"details": "1. Install Vercel AI SDK: `npm install @vercel/ai`\n2. Create an `ai-client-factory.js` module that implements the Factory pattern\n3. Define client creation functions for each supported model (Claude, OpenAI, Ollama, Gemini, OpenRouter, Perplexity, Grok)\n4. Implement error handling for missing API keys or configuration issues\n5. Add caching mechanism to reuse existing clients\n6. Create a unified interface for all clients regardless of the underlying model\n7. Implement client validation to ensure proper initialization\n8. Testing approach: Mock API responses to test client creation and error handling\n\n<info added on 2025-04-14T23:02:30.519Z>\nHere's additional information for the client factory implementation:\n\nFor the client factory implementation:\n\n1. Structure the factory with a modular approach:\n```javascript\n// ai-client-factory.js\nimport { createOpenAI } from '@ai-sdk/openai';\nimport { createAnthropic } from '@ai-sdk/anthropic';\nimport { createGoogle } from '@ai-sdk/google';\nimport { createPerplexity } from '@ai-sdk/perplexity';\n\nconst clientCache = new Map();\n\nexport function createClientInstance(providerName, options = {}) {\n // Implementation details below\n}\n```\n\n2. For OpenAI-compatible providers (Ollama), implement specific configuration:\n```javascript\ncase 'ollama':\n const ollamaBaseUrl = process.env.OLLAMA_BASE_URL || 'http://localhost:11434';\n return createOpenAI({\n baseURL: ollamaBaseUrl,\n apiKey: 'ollama', // Ollama doesn't require a real API key\n ...options\n });\n```\n\n3. Add provider-specific model mapping:\n```javascript\n// Model mapping helper\nconst getModelForProvider = (provider, requestedModel) => {\n const modelMappings = {\n openai: {\n default: 'gpt-3.5-turbo',\n // Add other mappings\n },\n anthropic: {\n default: 'claude-3-opus-20240229',\n // Add other mappings\n },\n // Add mappings for other providers\n };\n \n return (modelMappings[provider] && modelMappings[provider][requestedModel]) \n || modelMappings[provider]?.default \n || requestedModel;\n};\n```\n\n4. Implement caching with provider+model as key:\n```javascript\nexport function getClient(providerName, model) {\n const cacheKey = `${providerName}:${model || 'default'}`;\n \n if (clientCache.has(cacheKey)) {\n return clientCache.get(cacheKey);\n }\n \n const modelName = getModelForProvider(providerName, model);\n const client = createClientInstance(providerName, { model: modelName });\n clientCache.set(cacheKey, client);\n \n return client;\n}\n```\n\n5. Add detailed environment variable validation:\n```javascript\nfunction validateEnvironment(provider) {\n const requirements = {\n openai: ['OPENAI_API_KEY'],\n anthropic: ['ANTHROPIC_API_KEY'],\n google: ['GOOGLE_API_KEY'],\n perplexity: ['PERPLEXITY_API_KEY'],\n openrouter: ['OPENROUTER_API_KEY'],\n ollama: ['OLLAMA_BASE_URL'],\n grok: ['GROK_API_KEY', 'GROK_BASE_URL']\n };\n \n const missing = requirements[provider]?.filter(env => !process.env[env]) || [];\n \n if (missing.length > 0) {\n throw new Error(`Missing environment variables for ${provider}: ${missing.join(', ')}`);\n }\n}\n```\n\n6. Add Jest test examples:\n```javascript\n// ai-client-factory.test.js\ndescribe('AI Client Factory', () => {\n beforeEach(() => {\n // Mock environment variables\n process.env.OPENAI_API_KEY = 'test-openai-key';\n process.env.ANTHROPIC_API_KEY = 'test-anthropic-key';\n // Add other mocks\n });\n \n test('creates OpenAI client with correct configuration', () => {\n const client = getClient('openai');\n expect(client).toBeDefined();\n // Add assertions for client configuration\n });\n \n test('throws error when environment variables are missing', () => {\n delete process.env.OPENAI_API_KEY;\n expect(() => getClient('openai')).toThrow(/Missing environment variables/);\n });\n \n // Add tests for other providers\n});\n```\n</info added on 2025-04-14T23:02:30.519Z>",
|
"details": "1. Install Vercel AI SDK: `npm install @vercel/ai`\n2. Create an `ai-client-factory.js` module that implements the Factory pattern\n3. Define client creation functions for each supported model (Claude, OpenAI, Ollama, Gemini, OpenRouter, Perplexity, Grok)\n4. Implement error handling for missing API keys or configuration issues\n5. Add caching mechanism to reuse existing clients\n6. Create a unified interface for all clients regardless of the underlying model\n7. Implement client validation to ensure proper initialization\n8. Testing approach: Mock API responses to test client creation and error handling\n\n<info added on 2025-04-14T23:02:30.519Z>\nHere's additional information for the client factory implementation:\n\nFor the client factory implementation:\n\n1. Structure the factory with a modular approach:\n```javascript\n// ai-client-factory.js\nimport { createOpenAI } from '@ai-sdk/openai';\nimport { createAnthropic } from '@ai-sdk/anthropic';\nimport { createGoogle } from '@ai-sdk/google';\nimport { createPerplexity } from '@ai-sdk/perplexity';\n\nconst clientCache = new Map();\n\nexport function createClientInstance(providerName, options = {}) {\n // Implementation details below\n}\n```\n\n2. For OpenAI-compatible providers (Ollama), implement specific configuration:\n```javascript\ncase 'ollama':\n const ollamaBaseUrl = process.env.OLLAMA_BASE_URL || 'http://localhost:11434';\n return createOpenAI({\n baseURL: ollamaBaseUrl,\n apiKey: 'ollama', // Ollama doesn't require a real API key\n ...options\n });\n```\n\n3. Add provider-specific model mapping:\n```javascript\n// Model mapping helper\nconst getModelForProvider = (provider, requestedModel) => {\n const modelMappings = {\n openai: {\n default: 'gpt-3.5-turbo',\n // Add other mappings\n },\n anthropic: {\n default: 'claude-3-opus-20240229',\n // Add other mappings\n },\n // Add mappings for other providers\n };\n \n return (modelMappings[provider] && modelMappings[provider][requestedModel]) \n || modelMappings[provider]?.default \n || requestedModel;\n};\n```\n\n4. Implement caching with provider+model as key:\n```javascript\nexport function getClient(providerName, model) {\n const cacheKey = `${providerName}:${model || 'default'}`;\n \n if (clientCache.has(cacheKey)) {\n return clientCache.get(cacheKey);\n }\n \n const modelName = getModelForProvider(providerName, model);\n const client = createClientInstance(providerName, { model: modelName });\n clientCache.set(cacheKey, client);\n \n return client;\n}\n```\n\n5. Add detailed environment variable validation:\n```javascript\nfunction validateEnvironment(provider) {\n const requirements = {\n openai: ['OPENAI_API_KEY'],\n anthropic: ['ANTHROPIC_API_KEY'],\n google: ['GOOGLE_API_KEY'],\n perplexity: ['PERPLEXITY_API_KEY'],\n openrouter: ['OPENROUTER_API_KEY'],\n ollama: ['OLLAMA_BASE_URL'],\n grok: ['GROK_API_KEY', 'GROK_BASE_URL']\n };\n \n const missing = requirements[provider]?.filter(env => !process.env[env]) || [];\n \n if (missing.length > 0) {\n throw new Error(`Missing environment variables for ${provider}: ${missing.join(', ')}`);\n }\n}\n```\n\n6. Add Jest test examples:\n```javascript\n// ai-client-factory.test.js\ndescribe('AI Client Factory', () => {\n beforeEach(() => {\n // Mock environment variables\n process.env.OPENAI_API_KEY = 'test-openai-key';\n process.env.ANTHROPIC_API_KEY = 'test-anthropic-key';\n // Add other mocks\n });\n \n test('creates OpenAI client with correct configuration', () => {\n const client = getClient('openai');\n expect(client).toBeDefined();\n // Add assertions for client configuration\n });\n \n test('throws error when environment variables are missing', () => {\n delete process.env.OPENAI_API_KEY;\n expect(() => getClient('openai')).toThrow(/Missing environment variables/);\n });\n \n // Add tests for other providers\n});\n```\n</info added on 2025-04-14T23:02:30.519Z>",
|
||||||
"status": "pending",
|
"status": "done",
|
||||||
"parentTaskId": 61
|
"parentTaskId": 61
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
550
tests/unit/ai-client-factory.test.js
Normal file
550
tests/unit/ai-client-factory.test.js
Normal file
@@ -0,0 +1,550 @@
|
|||||||
|
import { jest } from '@jest/globals';
|
||||||
|
import path from 'path'; // Needed for mocking fs
|
||||||
|
|
||||||
|
// --- Mock Vercel AI SDK Modules ---
|
||||||
|
// Mock implementations - they just need to be callable and return a basic object
|
||||||
|
const mockCreateOpenAI = jest.fn(() => ({ provider: 'openai', type: 'mock' }));
|
||||||
|
const mockCreateAnthropic = jest.fn(() => ({
|
||||||
|
provider: 'anthropic',
|
||||||
|
type: 'mock'
|
||||||
|
}));
|
||||||
|
const mockCreateGoogle = jest.fn(() => ({ provider: 'google', type: 'mock' }));
|
||||||
|
const mockCreatePerplexity = jest.fn(() => ({
|
||||||
|
provider: 'perplexity',
|
||||||
|
type: 'mock'
|
||||||
|
}));
|
||||||
|
const mockCreateOllama = jest.fn(() => ({ provider: 'ollama', type: 'mock' }));
|
||||||
|
const mockCreateMistral = jest.fn(() => ({
|
||||||
|
provider: 'mistral',
|
||||||
|
type: 'mock'
|
||||||
|
}));
|
||||||
|
const mockCreateAzure = jest.fn(() => ({ provider: 'azure', type: 'mock' }));
|
||||||
|
const mockCreateXai = jest.fn(() => ({ provider: 'xai', type: 'mock' }));
|
||||||
|
// jest.unstable_mockModule('@ai-sdk/grok', () => ({
|
||||||
|
// createGrok: mockCreateGrok
|
||||||
|
// }));
|
||||||
|
const mockCreateOpenRouter = jest.fn(() => ({
|
||||||
|
provider: 'openrouter',
|
||||||
|
type: 'mock'
|
||||||
|
}));
|
||||||
|
|
||||||
|
jest.unstable_mockModule('@ai-sdk/openai', () => ({
|
||||||
|
createOpenAI: mockCreateOpenAI
|
||||||
|
}));
|
||||||
|
jest.unstable_mockModule('@ai-sdk/anthropic', () => ({
|
||||||
|
createAnthropic: mockCreateAnthropic
|
||||||
|
}));
|
||||||
|
jest.unstable_mockModule('@ai-sdk/google', () => ({
|
||||||
|
createGoogle: mockCreateGoogle
|
||||||
|
}));
|
||||||
|
jest.unstable_mockModule('@ai-sdk/perplexity', () => ({
|
||||||
|
createPerplexity: mockCreatePerplexity
|
||||||
|
}));
|
||||||
|
jest.unstable_mockModule('ollama-ai-provider', () => ({
|
||||||
|
createOllama: mockCreateOllama
|
||||||
|
}));
|
||||||
|
jest.unstable_mockModule('@ai-sdk/mistral', () => ({
|
||||||
|
createMistral: mockCreateMistral
|
||||||
|
}));
|
||||||
|
jest.unstable_mockModule('@ai-sdk/azure', () => ({
|
||||||
|
createAzure: mockCreateAzure
|
||||||
|
}));
|
||||||
|
jest.unstable_mockModule('@ai-sdk/xai', () => ({
|
||||||
|
createXai: mockCreateXai
|
||||||
|
}));
|
||||||
|
// jest.unstable_mockModule('@ai-sdk/openrouter', () => ({
|
||||||
|
// createOpenRouter: mockCreateOpenRouter
|
||||||
|
// }));
|
||||||
|
jest.unstable_mockModule('@openrouter/ai-sdk-provider', () => ({
|
||||||
|
createOpenRouter: mockCreateOpenRouter
|
||||||
|
}));
|
||||||
|
// TODO: Mock other providers (OpenRouter, Grok) when added
|
||||||
|
|
||||||
|
// --- Mock Config Manager ---
|
||||||
|
const mockGetProviderAndModelForRole = jest.fn();
|
||||||
|
const mockFindProjectRoot = jest.fn();
|
||||||
|
jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
|
||||||
|
getProviderAndModelForRole: mockGetProviderAndModelForRole,
|
||||||
|
findProjectRoot: mockFindProjectRoot
|
||||||
|
}));
|
||||||
|
|
||||||
|
// --- Mock File System (for supported-models.json loading) ---
|
||||||
|
const mockFsExistsSync = jest.fn();
|
||||||
|
const mockFsReadFileSync = jest.fn();
|
||||||
|
jest.unstable_mockModule('fs', () => ({
|
||||||
|
__esModule: true, // Important for ES modules with default exports
|
||||||
|
default: {
|
||||||
|
// Provide the default export expected by `import fs from 'fs'`
|
||||||
|
existsSync: mockFsExistsSync,
|
||||||
|
readFileSync: mockFsReadFileSync
|
||||||
|
},
|
||||||
|
// Also provide named exports if they were directly imported elsewhere, though not needed here
|
||||||
|
existsSync: mockFsExistsSync,
|
||||||
|
readFileSync: mockFsReadFileSync
|
||||||
|
}));
|
||||||
|
|
||||||
|
// --- Mock path (specifically path.join used for supported-models.json) ---
|
||||||
|
const mockPathJoin = jest.fn((...args) => args.join(path.sep)); // Simple mock
|
||||||
|
const actualPath = jest.requireActual('path'); // Get the actual path module
|
||||||
|
jest.unstable_mockModule('path', () => ({
|
||||||
|
__esModule: true, // Indicate ES module mock
|
||||||
|
default: {
|
||||||
|
// Provide the default export
|
||||||
|
...actualPath, // Spread actual functions
|
||||||
|
join: mockPathJoin // Override join
|
||||||
|
},
|
||||||
|
// Also provide named exports for consistency
|
||||||
|
...actualPath,
|
||||||
|
join: mockPathJoin
|
||||||
|
}));
|
||||||
|
|
||||||
|
// --- Define Mock Data ---
|
||||||
|
const mockSupportedModels = {
|
||||||
|
openai: [
|
||||||
|
{ id: 'gpt-4o', allowed_roles: ['main', 'fallback'] },
|
||||||
|
{ id: 'gpt-3.5-turbo', allowed_roles: ['main', 'fallback'] }
|
||||||
|
],
|
||||||
|
anthropic: [
|
||||||
|
{ id: 'claude-3.5-sonnet-20240620', allowed_roles: ['main'] },
|
||||||
|
{ id: 'claude-3-haiku-20240307', allowed_roles: ['fallback'] }
|
||||||
|
],
|
||||||
|
perplexity: [{ id: 'sonar-pro', allowed_roles: ['research'] }],
|
||||||
|
ollama: [{ id: 'llama3', allowed_roles: ['main', 'fallback'] }],
|
||||||
|
google: [{ id: 'gemini-pro', allowed_roles: ['main'] }],
|
||||||
|
mistral: [{ id: 'mistral-large-latest', allowed_roles: ['main'] }],
|
||||||
|
azure: [{ id: 'azure-gpt4o', allowed_roles: ['main'] }],
|
||||||
|
xai: [{ id: 'grok-basic', allowed_roles: ['main'] }],
|
||||||
|
openrouter: [{ id: 'openrouter-model', allowed_roles: ['main'] }]
|
||||||
|
// Add other providers as needed for tests
|
||||||
|
};
|
||||||
|
|
||||||
|
// --- Import the module AFTER mocks ---
|
||||||
|
const { getClient, clearClientCache, _resetSupportedModelsCache } =
|
||||||
|
await import('../../scripts/modules/ai-client-factory.js');
|
||||||
|
|
||||||
|
describe('AI Client Factory (Role-Based)', () => {
|
||||||
|
const OLD_ENV = process.env;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
// Reset state before each test
|
||||||
|
clearClientCache(); // Use the correct function name
|
||||||
|
_resetSupportedModelsCache(); // Reset the models cache
|
||||||
|
mockFsExistsSync.mockClear();
|
||||||
|
mockFsReadFileSync.mockClear();
|
||||||
|
mockGetProviderAndModelForRole.mockClear(); // Reset this mock too
|
||||||
|
|
||||||
|
// Reset environment to avoid test pollution
|
||||||
|
process.env = { ...OLD_ENV };
|
||||||
|
|
||||||
|
// Default mock implementations (can be overridden)
|
||||||
|
mockFindProjectRoot.mockReturnValue('/fake/project/root');
|
||||||
|
mockPathJoin.mockImplementation((...args) => args.join(actualPath.sep)); // Use actualPath.sep
|
||||||
|
|
||||||
|
// Default FS mocks for model/config loading
|
||||||
|
mockFsExistsSync.mockImplementation((filePath) => {
|
||||||
|
// Default to true for the files we expect to load
|
||||||
|
if (filePath.endsWith('supported-models.json')) return true;
|
||||||
|
// Add other expected files if necessary
|
||||||
|
return false; // Default to false for others
|
||||||
|
});
|
||||||
|
mockFsReadFileSync.mockImplementation((filePath) => {
|
||||||
|
if (filePath.endsWith('supported-models.json')) {
|
||||||
|
return JSON.stringify(mockSupportedModels);
|
||||||
|
}
|
||||||
|
// Throw if an unexpected file is read
|
||||||
|
throw new Error(`Unexpected readFileSync call in test: ${filePath}`);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Default config mock
|
||||||
|
mockGetProviderAndModelForRole.mockImplementation((role) => {
|
||||||
|
if (role === 'main') return { provider: 'openai', modelId: 'gpt-4o' };
|
||||||
|
if (role === 'research')
|
||||||
|
return { provider: 'perplexity', modelId: 'sonar-pro' };
|
||||||
|
if (role === 'fallback')
|
||||||
|
return { provider: 'anthropic', modelId: 'claude-3-haiku-20240307' };
|
||||||
|
return {}; // Default empty for unconfigured roles
|
||||||
|
});
|
||||||
|
|
||||||
|
// Set default required env vars (can be overridden in tests)
|
||||||
|
process.env.OPENAI_API_KEY = 'test-openai-key';
|
||||||
|
process.env.ANTHROPIC_API_KEY = 'test-anthropic-key';
|
||||||
|
process.env.PERPLEXITY_API_KEY = 'test-perplexity-key';
|
||||||
|
process.env.GOOGLE_API_KEY = 'test-google-key';
|
||||||
|
process.env.MISTRAL_API_KEY = 'test-mistral-key';
|
||||||
|
process.env.AZURE_OPENAI_API_KEY = 'test-azure-key';
|
||||||
|
process.env.AZURE_OPENAI_ENDPOINT = 'test-azure-endpoint';
|
||||||
|
process.env.XAI_API_KEY = 'test-xai-key';
|
||||||
|
process.env.OPENROUTER_API_KEY = 'test-openrouter-key';
|
||||||
|
});
|
||||||
|
|
||||||
|
afterAll(() => {
|
||||||
|
process.env = OLD_ENV;
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should throw error if role is missing', () => {
|
||||||
|
expect(() => getClient()).toThrow(
|
||||||
|
"Client role ('main', 'research', 'fallback') must be specified."
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should throw error if config manager fails to get role config', () => {
|
||||||
|
mockGetProviderAndModelForRole.mockImplementation((role) => {
|
||||||
|
if (role === 'main') throw new Error('Config file not found');
|
||||||
|
});
|
||||||
|
expect(() => getClient('main')).toThrow(
|
||||||
|
"Failed to get configuration for role 'main': Config file not found"
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should throw error if config manager returns undefined provider/model', () => {
|
||||||
|
mockGetProviderAndModelForRole.mockReturnValue({}); // Empty object
|
||||||
|
expect(() => getClient('main')).toThrow(
|
||||||
|
"Could not determine provider or modelId for role 'main'"
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should throw error if configured model is not supported for the role', () => {
|
||||||
|
mockGetProviderAndModelForRole.mockReturnValue({
|
||||||
|
provider: 'anthropic',
|
||||||
|
modelId: 'claude-3.5-sonnet-20240620' // Only allowed for 'main' in mock data
|
||||||
|
});
|
||||||
|
expect(() => getClient('research')).toThrow(
|
||||||
|
/Model 'claude-3.5-sonnet-20240620' from provider 'anthropic' is either not supported or not allowed for the 'research' role/
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should throw error if configured model is not found in supported list', () => {
|
||||||
|
mockGetProviderAndModelForRole.mockReturnValue({
|
||||||
|
provider: 'openai',
|
||||||
|
modelId: 'gpt-unknown'
|
||||||
|
});
|
||||||
|
expect(() => getClient('main')).toThrow(
|
||||||
|
/Model 'gpt-unknown' from provider 'openai' is either not supported or not allowed for the 'main' role/
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should throw error if configured provider is not found in supported list', () => {
|
||||||
|
mockGetProviderAndModelForRole.mockReturnValue({
|
||||||
|
provider: 'unknown-provider',
|
||||||
|
modelId: 'some-model'
|
||||||
|
});
|
||||||
|
expect(() => getClient('main')).toThrow(
|
||||||
|
/Model 'some-model' from provider 'unknown-provider' is either not supported or not allowed for the 'main' role/
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should skip model validation if supported-models.json is not found', () => {
|
||||||
|
mockFsExistsSync.mockReturnValue(false); // Simulate file not found
|
||||||
|
const consoleWarnSpy = jest.spyOn(console, 'warn').mockImplementation(); // Suppress warning
|
||||||
|
|
||||||
|
mockGetProviderAndModelForRole.mockReturnValue({
|
||||||
|
provider: 'openai',
|
||||||
|
modelId: 'gpt-any' // Doesn't matter, validation skipped
|
||||||
|
});
|
||||||
|
process.env.OPENAI_API_KEY = 'test-key';
|
||||||
|
|
||||||
|
expect(() => getClient('main')).not.toThrow(); // Should not throw validation error
|
||||||
|
expect(mockCreateOpenAI).toHaveBeenCalled();
|
||||||
|
expect(consoleWarnSpy).toHaveBeenCalledWith(
|
||||||
|
expect.stringContaining('Skipping model validation')
|
||||||
|
);
|
||||||
|
consoleWarnSpy.mockRestore();
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should throw environment validation error', () => {
|
||||||
|
mockGetProviderAndModelForRole.mockReturnValue({
|
||||||
|
provider: 'openai',
|
||||||
|
modelId: 'gpt-4o'
|
||||||
|
});
|
||||||
|
delete process.env.OPENAI_API_KEY; // Trigger missing env var
|
||||||
|
expect(() => getClient('main')).toThrow(
|
||||||
|
// Expect the original error message from validateEnvironment
|
||||||
|
/Missing environment variables for provider 'openai': OPENAI_API_KEY\. Please check your \.env file or session configuration\./
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should successfully create client using config and process.env', () => {
|
||||||
|
mockGetProviderAndModelForRole.mockReturnValue({
|
||||||
|
provider: 'openai',
|
||||||
|
modelId: 'gpt-4o'
|
||||||
|
});
|
||||||
|
process.env.OPENAI_API_KEY = 'env-key';
|
||||||
|
|
||||||
|
const client = getClient('main');
|
||||||
|
|
||||||
|
expect(client).toBeDefined();
|
||||||
|
expect(mockGetProviderAndModelForRole).toHaveBeenCalledWith('main');
|
||||||
|
expect(mockCreateOpenAI).toHaveBeenCalledWith(
|
||||||
|
expect.objectContaining({ apiKey: 'env-key', model: 'gpt-4o' })
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should successfully create client using config and session.env', () => {
|
||||||
|
mockGetProviderAndModelForRole.mockReturnValue({
|
||||||
|
provider: 'anthropic',
|
||||||
|
modelId: 'claude-3.5-sonnet-20240620'
|
||||||
|
});
|
||||||
|
delete process.env.ANTHROPIC_API_KEY;
|
||||||
|
const session = { env: { ANTHROPIC_API_KEY: 'session-key' } };
|
||||||
|
|
||||||
|
const client = getClient('main', session);
|
||||||
|
|
||||||
|
expect(client).toBeDefined();
|
||||||
|
expect(mockGetProviderAndModelForRole).toHaveBeenCalledWith('main');
|
||||||
|
expect(mockCreateAnthropic).toHaveBeenCalledWith(
|
||||||
|
expect.objectContaining({
|
||||||
|
apiKey: 'session-key',
|
||||||
|
model: 'claude-3.5-sonnet-20240620'
|
||||||
|
})
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should use overrideOptions when provided', () => {
|
||||||
|
process.env.PERPLEXITY_API_KEY = 'env-key';
|
||||||
|
const override = { provider: 'perplexity', modelId: 'sonar-pro' };
|
||||||
|
|
||||||
|
const client = getClient('research', null, override);
|
||||||
|
|
||||||
|
expect(client).toBeDefined();
|
||||||
|
expect(mockGetProviderAndModelForRole).not.toHaveBeenCalled(); // Config shouldn't be called
|
||||||
|
expect(mockCreatePerplexity).toHaveBeenCalledWith(
|
||||||
|
expect.objectContaining({ apiKey: 'env-key', model: 'sonar-pro' })
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should throw validation error even with override if role is disallowed', () => {
|
||||||
|
process.env.OPENAI_API_KEY = 'env-key';
|
||||||
|
// gpt-4o is not allowed for 'research' in mock data
|
||||||
|
const override = { provider: 'openai', modelId: 'gpt-4o' };
|
||||||
|
|
||||||
|
expect(() => getClient('research', null, override)).toThrow(
|
||||||
|
/Model 'gpt-4o' from provider 'openai' is either not supported or not allowed for the 'research' role/
|
||||||
|
);
|
||||||
|
expect(mockGetProviderAndModelForRole).not.toHaveBeenCalled();
|
||||||
|
expect(mockCreateOpenAI).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Caching Behavior (Role-Based)', () => {
|
||||||
|
test('should return cached client instance for the same provider/model derived from role', () => {
|
||||||
|
mockGetProviderAndModelForRole.mockReturnValue({
|
||||||
|
provider: 'openai',
|
||||||
|
modelId: 'gpt-4o'
|
||||||
|
});
|
||||||
|
process.env.OPENAI_API_KEY = 'test-key';
|
||||||
|
|
||||||
|
const client1 = getClient('main');
|
||||||
|
const client2 = getClient('main'); // Same role, same config result
|
||||||
|
|
||||||
|
expect(client1).toBe(client2); // Should be the exact same instance
|
||||||
|
expect(mockGetProviderAndModelForRole).toHaveBeenCalledTimes(2); // Config lookup happens each time
|
||||||
|
expect(mockCreateOpenAI).toHaveBeenCalledTimes(1); // Instance created only once
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should return different client instances for different roles if config differs', () => {
|
||||||
|
mockGetProviderAndModelForRole.mockImplementation((role) => {
|
||||||
|
if (role === 'main') return { provider: 'openai', modelId: 'gpt-4o' };
|
||||||
|
if (role === 'research')
|
||||||
|
return { provider: 'perplexity', modelId: 'sonar-pro' };
|
||||||
|
return {};
|
||||||
|
});
|
||||||
|
process.env.OPENAI_API_KEY = 'test-key-1';
|
||||||
|
process.env.PERPLEXITY_API_KEY = 'test-key-2';
|
||||||
|
|
||||||
|
const client1 = getClient('main');
|
||||||
|
const client2 = getClient('research');
|
||||||
|
|
||||||
|
expect(client1).not.toBe(client2);
|
||||||
|
expect(mockCreateOpenAI).toHaveBeenCalledTimes(1);
|
||||||
|
expect(mockCreatePerplexity).toHaveBeenCalledTimes(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should return same client instance if different roles resolve to same provider/model', () => {
|
||||||
|
mockGetProviderAndModelForRole.mockImplementation((role) => {
|
||||||
|
// Both roles point to the same model
|
||||||
|
return { provider: 'openai', modelId: 'gpt-4o' };
|
||||||
|
});
|
||||||
|
process.env.OPENAI_API_KEY = 'test-key';
|
||||||
|
|
||||||
|
const client1 = getClient('main');
|
||||||
|
const client2 = getClient('fallback'); // Different role, same config result
|
||||||
|
|
||||||
|
expect(client1).toBe(client2); // Should be the exact same instance
|
||||||
|
expect(mockCreateOpenAI).toHaveBeenCalledTimes(1); // Instance created only once
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Add tests for specific providers
|
||||||
|
describe('Specific Provider Instantiation', () => {
|
||||||
|
test('should successfully create Google client with GOOGLE_API_KEY', () => {
|
||||||
|
mockGetProviderAndModelForRole.mockReturnValue({
|
||||||
|
provider: 'google',
|
||||||
|
modelId: 'gemini-pro'
|
||||||
|
}); // Assume gemini-pro is supported
|
||||||
|
process.env.GOOGLE_API_KEY = 'test-google-key';
|
||||||
|
const client = getClient('main');
|
||||||
|
expect(client).toBeDefined();
|
||||||
|
expect(mockCreateGoogle).toHaveBeenCalledWith(
|
||||||
|
expect.objectContaining({ apiKey: 'test-google-key' })
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should throw environment error if GOOGLE_API_KEY is missing', () => {
|
||||||
|
mockGetProviderAndModelForRole.mockReturnValue({
|
||||||
|
provider: 'google',
|
||||||
|
modelId: 'gemini-pro'
|
||||||
|
});
|
||||||
|
delete process.env.GOOGLE_API_KEY;
|
||||||
|
expect(() => getClient('main')).toThrow(
|
||||||
|
/Missing environment variables for provider 'google': GOOGLE_API_KEY/
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should successfully create Ollama client with OLLAMA_BASE_URL', () => {
|
||||||
|
mockGetProviderAndModelForRole.mockReturnValue({
|
||||||
|
provider: 'ollama',
|
||||||
|
modelId: 'llama3'
|
||||||
|
}); // Use supported llama3
|
||||||
|
process.env.OLLAMA_BASE_URL = 'http://test-ollama:11434';
|
||||||
|
const client = getClient('main');
|
||||||
|
expect(client).toBeDefined();
|
||||||
|
expect(mockCreateOllama).toHaveBeenCalledWith(
|
||||||
|
expect.objectContaining({ baseURL: 'http://test-ollama:11434' })
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should throw environment error if OLLAMA_BASE_URL is missing', () => {
|
||||||
|
mockGetProviderAndModelForRole.mockReturnValue({
|
||||||
|
provider: 'ollama',
|
||||||
|
modelId: 'llama3'
|
||||||
|
});
|
||||||
|
delete process.env.OLLAMA_BASE_URL;
|
||||||
|
expect(() => getClient('main')).toThrow(
|
||||||
|
/Missing environment variables for provider 'ollama': OLLAMA_BASE_URL/
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should successfully create Mistral client with MISTRAL_API_KEY', () => {
|
||||||
|
mockGetProviderAndModelForRole.mockReturnValue({
|
||||||
|
provider: 'mistral',
|
||||||
|
modelId: 'mistral-large-latest'
|
||||||
|
}); // Assume supported
|
||||||
|
process.env.MISTRAL_API_KEY = 'test-mistral-key';
|
||||||
|
const client = getClient('main');
|
||||||
|
expect(client).toBeDefined();
|
||||||
|
expect(mockCreateMistral).toHaveBeenCalledWith(
|
||||||
|
expect.objectContaining({ apiKey: 'test-mistral-key' })
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should throw environment error if MISTRAL_API_KEY is missing', () => {
|
||||||
|
mockGetProviderAndModelForRole.mockReturnValue({
|
||||||
|
provider: 'mistral',
|
||||||
|
modelId: 'mistral-large-latest'
|
||||||
|
});
|
||||||
|
delete process.env.MISTRAL_API_KEY;
|
||||||
|
expect(() => getClient('main')).toThrow(
|
||||||
|
/Missing environment variables for provider 'mistral': MISTRAL_API_KEY/
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should successfully create Azure client with AZURE_OPENAI_API_KEY and AZURE_OPENAI_ENDPOINT', () => {
|
||||||
|
mockGetProviderAndModelForRole.mockReturnValue({
|
||||||
|
provider: 'azure',
|
||||||
|
modelId: 'azure-gpt4o'
|
||||||
|
}); // Assume supported
|
||||||
|
process.env.AZURE_OPENAI_API_KEY = 'test-azure-key';
|
||||||
|
process.env.AZURE_OPENAI_ENDPOINT = 'https://test-azure.openai.azure.com';
|
||||||
|
const client = getClient('main');
|
||||||
|
expect(client).toBeDefined();
|
||||||
|
expect(mockCreateAzure).toHaveBeenCalledWith(
|
||||||
|
expect.objectContaining({
|
||||||
|
apiKey: 'test-azure-key',
|
||||||
|
endpoint: 'https://test-azure.openai.azure.com'
|
||||||
|
})
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should throw environment error if AZURE_OPENAI_API_KEY or AZURE_OPENAI_ENDPOINT is missing', () => {
|
||||||
|
mockGetProviderAndModelForRole.mockReturnValue({
|
||||||
|
provider: 'azure',
|
||||||
|
modelId: 'azure-gpt4o'
|
||||||
|
});
|
||||||
|
process.env.AZURE_OPENAI_API_KEY = 'test-azure-key';
|
||||||
|
delete process.env.AZURE_OPENAI_ENDPOINT;
|
||||||
|
expect(() => getClient('main')).toThrow(
|
||||||
|
/Missing environment variables for provider 'azure': AZURE_OPENAI_ENDPOINT/
|
||||||
|
);
|
||||||
|
|
||||||
|
process.env.AZURE_OPENAI_ENDPOINT = 'https://test-azure.openai.azure.com';
|
||||||
|
delete process.env.AZURE_OPENAI_API_KEY;
|
||||||
|
expect(() => getClient('main')).toThrow(
|
||||||
|
/Missing environment variables for provider 'azure': AZURE_OPENAI_API_KEY/
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should successfully create xAI (Grok) client with XAI_API_KEY', () => {
|
||||||
|
mockGetProviderAndModelForRole.mockReturnValue({
|
||||||
|
provider: 'xai',
|
||||||
|
modelId: 'grok-basic'
|
||||||
|
});
|
||||||
|
process.env.XAI_API_KEY = 'test-xai-key-specific';
|
||||||
|
const client = getClient('main');
|
||||||
|
expect(client).toBeDefined();
|
||||||
|
expect(mockCreateXai).toHaveBeenCalledWith(
|
||||||
|
expect.objectContaining({ apiKey: 'test-xai-key-specific' })
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should throw environment error if XAI_API_KEY is missing', () => {
|
||||||
|
mockGetProviderAndModelForRole.mockReturnValue({
|
||||||
|
provider: 'xai',
|
||||||
|
modelId: 'grok-basic'
|
||||||
|
});
|
||||||
|
delete process.env.XAI_API_KEY;
|
||||||
|
expect(() => getClient('main')).toThrow(
|
||||||
|
/Missing environment variables for provider 'xai': XAI_API_KEY/
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should successfully create OpenRouter client with OPENROUTER_API_KEY', () => {
|
||||||
|
mockGetProviderAndModelForRole.mockReturnValue({
|
||||||
|
provider: 'openrouter',
|
||||||
|
modelId: 'openrouter-model'
|
||||||
|
});
|
||||||
|
process.env.OPENROUTER_API_KEY = 'test-openrouter-key-specific';
|
||||||
|
const client = getClient('main');
|
||||||
|
expect(client).toBeDefined();
|
||||||
|
expect(mockCreateOpenRouter).toHaveBeenCalledWith(
|
||||||
|
expect.objectContaining({ apiKey: 'test-openrouter-key-specific' })
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should throw environment error if OPENROUTER_API_KEY is missing', () => {
|
||||||
|
mockGetProviderAndModelForRole.mockReturnValue({
|
||||||
|
provider: 'openrouter',
|
||||||
|
modelId: 'openrouter-model'
|
||||||
|
});
|
||||||
|
delete process.env.OPENROUTER_API_KEY;
|
||||||
|
expect(() => getClient('main')).toThrow(
|
||||||
|
/Missing environment variables for provider 'openrouter': OPENROUTER_API_KEY/
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Environment Variable Precedence', () => {
|
||||||
|
test('should prioritize process.env over session.env for API keys', () => {
|
||||||
|
mockGetProviderAndModelForRole.mockReturnValue({
|
||||||
|
provider: 'openai',
|
||||||
|
modelId: 'gpt-4o'
|
||||||
|
});
|
||||||
|
process.env.OPENAI_API_KEY = 'process-env-key'; // This should be used
|
||||||
|
const session = { env: { OPENAI_API_KEY: 'session-env-key' } };
|
||||||
|
|
||||||
|
const client = getClient('main', session);
|
||||||
|
expect(client).toBeDefined();
|
||||||
|
expect(mockCreateOpenAI).toHaveBeenCalledWith(
|
||||||
|
expect.objectContaining({ apiKey: 'process-env-key', model: 'gpt-4o' })
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
Reference in New Issue
Block a user