feat: fix context7 mcp tool calling mode

- todo, upgrade to ai sdk v6 (openrouter not upgraded to ai sdk v6 yet, waiting on that)
This commit is contained in:
Ralph Khreish
2026-01-06 17:17:44 +01:00
parent af20b4250b
commit 8913342592
9 changed files with 3934 additions and 1486 deletions

5225
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -58,30 +58,31 @@
"author": "Eyal Toledano",
"license": "MIT WITH Commons-Clause",
"dependencies": {
"@ai-sdk/amazon-bedrock": "^3.0.23",
"@ai-sdk/anthropic": "^2.0.18",
"@ai-sdk/azure": "^2.0.89",
"@ai-sdk/google": "^2.0.16",
"@ai-sdk/google-vertex": "^3.0.86",
"@ai-sdk/groq": "^2.0.21",
"@ai-sdk/mistral": "^2.0.16",
"@ai-sdk/openai": "^2.0.34",
"@ai-sdk/openai-compatible": "^1.0.25",
"@ai-sdk/perplexity": "^2.0.10",
"@ai-sdk/provider": "^2.0.0",
"@ai-sdk/provider-utils": "^3.0.10",
"@ai-sdk/xai": "^2.0.22",
"@aws-sdk/credential-providers": "^3.895.0",
"@inquirer/search": "^3.0.15",
"@openrouter/ai-sdk-provider": "^1.2.0",
"@sentry/node": "^10.27.0",
"@ai-sdk/amazon-bedrock": "^4.0.9",
"@ai-sdk/anthropic": "^3.0.7",
"@ai-sdk/azure": "^3.0.5",
"@ai-sdk/google": "^3.0.4",
"@ai-sdk/google-vertex": "^4.0.7",
"@ai-sdk/groq": "^3.0.4",
"@ai-sdk/mcp": "^1.0.5",
"@ai-sdk/mistral": "^3.0.5",
"@ai-sdk/openai": "^3.0.5",
"@ai-sdk/openai-compatible": "^2.0.4",
"@ai-sdk/perplexity": "^3.0.4",
"@ai-sdk/provider": "^3.0.2",
"@ai-sdk/provider-utils": "^4.0.4",
"@ai-sdk/xai": "^3.0.10",
"@aws-sdk/credential-providers": "^3.962.0",
"@inquirer/search": "^3.2.2",
"@openrouter/ai-sdk-provider": "^1.5.4",
"@sentry/node": "^10.32.1",
"@streamparser/json": "^0.0.22",
"@supabase/supabase-js": "^2.57.4",
"@types/turndown": "^5.0.6",
"ai": "^5.0.51",
"ai-sdk-provider-claude-code": "^2.2.4",
"ai-sdk-provider-codex-cli": "^0.7.0",
"ai-sdk-provider-gemini-cli": "^1.4.0",
"ai": "^6.0.13",
"ai-sdk-provider-claude-code": "^3.1.0",
"ai-sdk-provider-codex-cli": "^1.0.4",
"ai-sdk-provider-gemini-cli": "^2.0.1",
"ajv": "^8.17.1",
"ajv-formats": "^3.0.1",
"boxen": "^8.0.1",

View File

@@ -15,14 +15,14 @@
"typecheck": "tsc --noEmit"
},
"dependencies": {
"@ai-sdk/provider": "^2.0.0",
"@ai-sdk/provider-utils": "^3.0.10",
"@ai-sdk/provider": "^3.0.2",
"@ai-sdk/provider-utils": "^4.0.4",
"jsonc-parser": "^3.3.1"
},
"devDependencies": {
"@types/node": "^22.18.6",
"typescript": "^5.9.2",
"vitest": "^4.0.10"
"typescript": "^5.9.3",
"vitest": "^4.0.16"
},
"engines": {
"node": ">=20"

View File

@@ -26,7 +26,7 @@ describe('createGrokCli', () => {
expect(typeof provider).toBe('function');
expect(typeof provider.languageModel).toBe('function');
expect(typeof provider.chat).toBe('function');
expect(typeof provider.textEmbeddingModel).toBe('function');
expect(typeof provider.embeddingModel).toBe('function');
expect(typeof provider.imageModel).toBe('function');
});
@@ -91,7 +91,7 @@ describe('createGrokCli', () => {
it('should throw NoSuchModelError for textEmbeddingModel', () => {
const provider = createGrokCli();
expect(() => {
provider.textEmbeddingModel('test-model');
provider.embeddingModel('test-model');
}).toThrow(NoSuchModelError);
});

View File

@@ -84,7 +84,7 @@ export function createGrokCli(
provider.chat = createModel; // Alias for languageModel
// Add textEmbeddingModel method that throws NoSuchModelError
provider.textEmbeddingModel = (modelId: string) => {
provider.embeddingModel = (modelId: string) => {
throw new NoSuchModelError({
modelId,
modelType: 'textEmbeddingModel'

View File

@@ -60,6 +60,8 @@ export async function createContext7Tools(
): Promise<Context7ToolsResult> {
const apiKey = getApiKey(options?.config);
console.log('[Context7] Initializing MCP client...');
const transport = new Experimental_StdioMCPTransport({
command: 'npx',
args: ['-y', '@upstash/context7-mcp', '--api-key', apiKey]
@@ -69,14 +71,23 @@ export async function createContext7Tools(
transport
});
console.log('[Context7] MCP client connected successfully');
options?.onReady?.();
const tools = await client.tools();
const toolNames = Object.keys(tools);
console.log(
`[Context7] Tools available: ${toolNames.length > 0 ? toolNames.join(', ') : 'none'}`
);
return {
tools,
close: async () => {
console.log('[Context7] Closing MCP client connection...');
await client.close();
console.log('[Context7] MCP client disconnected');
}
};
}
@@ -87,6 +98,7 @@ export async function createContext7Tools(
export function isContext7Available(config?: Context7Config): boolean {
try {
getApiKey(config);
console.log('[Context7] API key detected - Context7 tools will be enabled');
return true;
} catch {
return false;

View File

@@ -6,6 +6,9 @@
// Vercel AI SDK functions are NOT called directly anymore.
// import { generateText, streamText, generateObject } from 'ai';
// --- MCP Tools Integration ---
import { createMCPTools, isContext7Available } from '@tm/ai-tools';
// --- Core Dependencies ---
import {
MODEL_MAP,
@@ -59,6 +62,84 @@ import {
// Import the provider registry
import ProviderRegistry from '../../src/provider-registry/index.js';
// --- MCP Tools Cache ---
let _mcpToolsCache = null;
let _mcpToolsPromise = null;
/**
* Get or create MCP tools (singleton pattern for connection reuse)
* @returns {Promise<{tools: Record<string, unknown>, close: () => Promise<void>, enabledSources: string[]}>}
*/
async function _getMCPTools() {
// Return cached tools if available
if (_mcpToolsCache) {
return _mcpToolsCache;
}
// Return existing promise if tools are being created
if (_mcpToolsPromise) {
return _mcpToolsPromise;
}
// Check if any MCP tools are available
if (!isContext7Available()) {
return { tools: {}, close: async () => {}, enabledSources: [] };
}
// Create tools (this spawns MCP server subprocess)
_mcpToolsPromise = createMCPTools()
.then((result) => {
_mcpToolsCache = result;
_mcpToolsPromise = null;
if (result.enabledSources.length > 0) {
log('info', `MCP tools enabled: ${result.enabledSources.join(', ')}`);
}
return result;
})
.catch((error) => {
_mcpToolsPromise = null;
log('warn', `Failed to initialize MCP tools: ${error.message}`);
return { tools: {}, close: async () => {}, enabledSources: [] };
});
return _mcpToolsPromise;
}
/**
* Close MCP tools connections (call on process exit)
*/
async function closeMCPTools() {
if (_mcpToolsCache) {
try {
await _mcpToolsCache.close();
log('debug', 'MCP tools connections closed');
} catch (error) {
log('warn', `Error closing MCP tools: ${error.message}`);
}
_mcpToolsCache = null;
}
}
// Register cleanup on process exit
process.on('exit', () => {
if (_mcpToolsCache) {
// Synchronous cleanup attempt - close() is async so may not complete
_mcpToolsCache.close().catch(() => {});
}
});
process.on('SIGINT', async () => {
await closeMCPTools();
process.exit(0);
});
process.on('SIGTERM', async () => {
await closeMCPTools();
process.exit(0);
});
// Create provider instances
const PROVIDERS = {
anthropic: new AnthropicAIProvider(),
@@ -544,6 +625,17 @@ async function _unifiedServiceRunner(serviceType, params) {
let lastCleanErrorMessage =
'AI service call failed for all configured roles.';
// Get MCP tools (cached singleton - won't recreate on each call)
const mcpTools = await _getMCPTools();
const hasTools = Object.keys(mcpTools.tools).length > 0;
if (hasTools && getDebugFlag()) {
log(
'debug',
`MCP tools available for AI call: ${mcpTools.enabledSources.join(', ')}`
);
}
for (const currentRole of sequence) {
let providerName;
let modelId;
@@ -676,6 +768,7 @@ async function _unifiedServiceRunner(serviceType, params) {
...(baseURL && { baseURL }),
...((serviceType === 'generateObject' ||
serviceType === 'streamObject') && { schema, objectName }),
...(hasTools && { tools: mcpTools.tools }),
...(commandName && { commandName }), // Pass commandName for Sentry telemetry functionId
...(outputType && { outputType }), // Pass outputType for Sentry telemetry metadata
...(projectRoot && { projectRoot }), // Pass projectRoot for Sentry telemetry hashing
@@ -951,5 +1044,6 @@ export {
streamTextService,
streamObjectService,
generateObjectService,
logAiUsage
logAiUsage,
closeMCPTools
};

View File

@@ -1048,24 +1048,8 @@ function getAvailableModels() {
const sweScore = modelObj.swe_score;
const cost = modelObj.cost_per_1m_tokens;
const allowedRoles = modelObj.allowed_roles || ['main', 'fallback'];
// Use name from JSON if available, otherwise generate from ID
let name = modelObj.name;
if (!name) {
const nameParts = modelId
.split('-')
.map((p) => p.charAt(0).toUpperCase() + p.slice(1));
name = nameParts.join(' ');
// Handle specific known names better if needed
if (modelId === 'claude-3.5-sonnet-20240620')
name = 'Claude 3.5 Sonnet';
if (modelId === 'claude-3-7-sonnet-20250219')
name = 'Claude 3.7 Sonnet';
if (modelId === 'gpt-4o') name = 'GPT-4o';
if (modelId === 'gpt-4-turbo') name = 'GPT-4 Turbo';
if (modelId === 'sonar-pro') name = 'Perplexity Sonar Pro';
if (modelId === 'sonar-mini') name = 'Perplexity Sonar Mini';
}
// Use explicit name from model definition, otherwise use ID as-is
const name = modelObj.name || modelId;
available.push({
id: modelId,

View File

@@ -1,3 +1,4 @@
// todo: migrate to new ai sdk v6 correctly, use streamText and generateText instead of streamObject and generateObject with the right parameters
import {
JSONParseError,
NoObjectGeneratedError,
@@ -196,6 +197,7 @@ export class BaseAIProvider {
/**
* Generates text using the provider's model
* Supports tools parameter for MCP tool integration
*/
async generateText(params) {
try {
@@ -233,6 +235,7 @@ export class BaseAIProvider {
...(this.supportsTemperature && params.temperature !== undefined
? { temperature: params.temperature }
: {}),
...(params.tools ? { tools: params.tools } : {}),
...(telemetryConfig && { experimental_telemetry: telemetryConfig })
});
@@ -263,6 +266,7 @@ export class BaseAIProvider {
/**
* Streams text using the provider's model
* Supports tools parameter for MCP tool integration
*/
async streamText(params) {
try {
@@ -297,6 +301,7 @@ export class BaseAIProvider {
...(this.supportsTemperature && params.temperature !== undefined
? { temperature: params.temperature }
: {}),
...(params.tools ? { tools: params.tools } : {}),
...(telemetryConfig && { experimental_telemetry: telemetryConfig }),
...(params.experimental_transform && {
experimental_transform: params.experimental_transform
@@ -316,6 +321,7 @@ export class BaseAIProvider {
/**
* Streams a structured object using the provider's model
* Note: AI SDK v5 streamObject doesn't support tools - use streamText for tool support
*/
async streamObject(params) {
try {
@@ -350,6 +356,7 @@ export class BaseAIProvider {
const telemetryConfig = getAITelemetryConfig(functionId, metadata);
// AI SDK v5: Use streamObject directly (no tool support)
const result = await streamObject({
model: client(params.modelId),
messages: params.messages,
@@ -368,7 +375,6 @@ export class BaseAIProvider {
);
// Return the stream result directly
// The stream result contains partialObjectStream and other properties
return result;
} catch (error) {
this.handleError('object streaming', error);
@@ -377,6 +383,7 @@ export class BaseAIProvider {
/**
* Generates a structured object using the provider's model
* Note: AI SDK v5 generateObject doesn't support tools - use generateText for tool support
*/
async generateObject(params) {
try {
@@ -414,6 +421,7 @@ export class BaseAIProvider {
const telemetryConfig = getAITelemetryConfig(functionId, metadata);
// AI SDK v5: Use generateObject directly (no tool support)
const result = await generateObject({
model: client(params.modelId),
messages: params.messages,