fix: update MCP provider to support AI SDK v5 with specification version v2

- Create new TypeScript package @tm/ai-sdk-provider-mcp-sampling
- Implement LanguageModelV2 interface with v2 specification
- Update mcp-server to use new AI SDK v5 compatible provider
- Fix error: "AI SDK 5 only supports models that implement specification version 'v2'"

Closes #1449

Co-authored-by: Ralph Khreish <Crunchyman-ralph@users.noreply.github.com>
This commit is contained in:
claude[bot]
2025-11-27 19:35:28 +00:00
parent 5cd089dea6
commit a94b0a21a0
13 changed files with 974 additions and 3 deletions

View File

@@ -6,7 +6,7 @@
* Follows the Claude Code provider pattern for session-based providers.
*/
import { createMCP } from '../custom-sdk/index.js';
import { createMCPSampling } from '@tm/ai-sdk-provider-mcp-sampling';
import { BaseAIProvider } from '../../../src/ai-providers/base-provider.js';
export class MCPProvider extends BaseAIProvider {
@@ -47,8 +47,8 @@ export class MCPProvider extends BaseAIProvider {
*/
getClient(params) {
try {
// Pass MCP session to AI SDK implementation
return createMCP({
// Pass MCP session to AI SDK v5 implementation
return createMCPSampling({
session: this.session,
defaultSettings: {
temperature: params.temperature,

View File

@@ -0,0 +1,13 @@
# @tm/ai-sdk-provider-mcp-sampling
## 0.1.0
### Minor Changes
- Initial release of MCP Sampling AI SDK provider
- Support for AI SDK v5 with v2 specification
- Full MCP sampling integration
- TypeScript support
- Streaming support (simulated)
- Structured output support
- Comprehensive error handling

View File

@@ -0,0 +1,64 @@
# MCP Sampling AI SDK Provider
AI SDK v5 provider for MCP (Model Context Protocol) Sampling integration with Task Master.
## Overview
This package provides an AI SDK v5 compatible provider for using MCP sampling capabilities within Task Master. It implements the v2 specification required by AI SDK v5.
## Usage
```typescript
import { createMCPSampling } from '@tm/ai-sdk-provider-mcp-sampling';
// Create provider with MCP session
const mcpProvider = createMCPSampling({
session: mcpSession, // Your MCP session object
defaultSettings: {
temperature: 0.7,
maxTokens: 1000
}
});
// Use with AI SDK
const model = mcpProvider('claude-3-5-sonnet-20241022');
const result = await generateText({
model,
prompt: 'Hello, world!'
});
```
## Features
- AI SDK v5 compatible with v2 specification
- Full support for MCP sampling protocol
- TypeScript support with comprehensive types
- Streaming support (simulated)
- Structured output support via JSON extraction
- Comprehensive error handling
- Proper usage tracking
## Requirements
- Node.js >= 20
- AI SDK v5
- Active MCP session with sampling capabilities
## Architecture
This provider follows the same patterns as other Task Master AI SDK providers:
- `MCPSamplingLanguageModel` - Main language model implementation
- `createMCPSampling` - Provider factory function
- Message conversion between AI SDK and MCP formats
- Error handling and mapping to AI SDK error types
- JSON extraction for structured outputs
## Error Handling
The provider maps MCP errors to appropriate AI SDK error types:
- Session errors → `MCPSamplingError`
- Authentication errors → `LoadAPIKeyError`
- API errors → `APICallError`
- Model not found → `NoSuchModelError`

View File

@@ -0,0 +1,33 @@
{
"name": "@tm/ai-sdk-provider-mcp-sampling",
"private": true,
"description": "AI SDK provider for MCP Sampling integration",
"type": "module",
"types": "./src/index.ts",
"main": "./dist/index.js",
"exports": {
".": "./src/index.ts"
},
"scripts": {
"test": "vitest run",
"test:watch": "vitest",
"test:ui": "vitest --ui",
"typecheck": "tsc --noEmit"
},
"dependencies": {
"@ai-sdk/provider": "^2.0.0",
"@ai-sdk/provider-utils": "^3.0.10",
"jsonc-parser": "^3.3.1"
},
"devDependencies": {
"@types/node": "^22.18.6",
"typescript": "^5.9.2",
"vitest": "^4.0.10"
},
"engines": {
"node": ">=20"
},
"keywords": ["ai", "mcp", "sampling", "language-model", "provider"],
"files": ["dist/**/*", "README.md"],
"version": ""
}

View File

@@ -0,0 +1,103 @@
/**
* Error classes and utilities for MCP Sampling provider
*/
import {
APICallError,
LoadAPIKeyError,
NoSuchModelError
} from '@ai-sdk/provider';
export interface MCPSamplingErrorOptions {
message?: string;
cause?: unknown;
session?: unknown;
responseData?: unknown;
isRetryable?: boolean;
}
export class MCPSamplingError extends Error {
constructor(message: string, public readonly options: MCPSamplingErrorOptions = {}) {
super(message);
this.name = 'MCPSamplingError';
}
}
export function createMCPAPICallError(
options: MCPSamplingErrorOptions & {
statusCode?: number;
responseHeaders?: Record<string, string>;
}
): APICallError {
return new APICallError({
message: options.message || 'MCP API call failed',
cause: options.cause,
data: options.responseData,
isRetryable: options.isRetryable ?? false,
responseHeaders: options.responseHeaders,
statusCode: options.statusCode
});
}
export function createMCPAuthenticationError(
options: MCPSamplingErrorOptions = {}
): LoadAPIKeyError {
return new LoadAPIKeyError({
message: options.message || 'MCP session authentication failed'
});
}
export function createMCPSessionError(
options: MCPSamplingErrorOptions = {}
): MCPSamplingError {
return new MCPSamplingError(
options.message || 'MCP session error',
options
);
}
export function mapMCPError(error: unknown): Error {
if (error instanceof MCPSamplingError) {
return error;
}
if (error instanceof Error) {
// Map common MCP errors to appropriate AI SDK errors
if (error.message.includes('unauthorized') ||
error.message.includes('authentication')) {
return createMCPAuthenticationError({
message: `MCP authentication failed: ${error.message}`,
cause: error
});
}
if (error.message.includes('timeout') ||
error.message.includes('timed out')) {
return createMCPAPICallError({
message: `MCP request timed out: ${error.message}`,
cause: error,
isRetryable: true
});
}
if (error.message.includes('model') &&
error.message.includes('not found')) {
return new NoSuchModelError({
modelId: 'unknown',
modelType: 'languageModel'
});
}
return createMCPAPICallError({
message: `MCP API error: ${error.message}`,
cause: error,
isRetryable: false
});
}
return createMCPAPICallError({
message: 'Unknown MCP error occurred',
cause: error,
isRetryable: false
});
}

View File

@@ -0,0 +1,32 @@
/**
* MCP Sampling Provider for AI SDK v5
*/
export { createMCPSampling } from './mcp-sampling-provider.js';
export { MCPSamplingLanguageModel } from './mcp-sampling-language-model.js';
// Export types
export type {
MCPSamplingModelId,
MCPSamplingSettings,
MCPSamplingLanguageModelOptions,
MCPSession,
MCPSamplingResponse
} from './types.js';
// Export error utilities
export {
MCPSamplingError,
createMCPAPICallError,
createMCPAuthenticationError,
createMCPSessionError,
mapMCPError
} from './errors.js';
// Export utility functions
export { extractJson } from './json-extractor.js';
export {
convertToMCPFormat,
convertFromMCPFormat,
createPromptFromMessages
} from './message-converter.js';

View File

@@ -0,0 +1,56 @@
/**
* JSON extraction utilities for MCP Sampling provider
*/
/**
* Extract JSON from text response
* Handles various formats including code blocks and plain JSON
*/
export function extractJson(text: string): string {
if (!text || typeof text !== 'string') {
throw new Error('Input text is empty or not a string');
}
const trimmedText = text.trim();
// Try to find JSON in code blocks first
const codeBlockMatch = trimmedText.match(/```(?:json)?\s*([\s\S]*?)\s*```/i);
if (codeBlockMatch) {
return codeBlockMatch[1].trim();
}
// Try to find JSON between specific markers
const markerMatch = trimmedText.match(/```json\s*([\s\S]*?)\s*```/i);
if (markerMatch) {
return markerMatch[1].trim();
}
// Look for JSON object/array patterns
const jsonObjectMatch = trimmedText.match(/\{[\s\S]*\}/);
const jsonArrayMatch = trimmedText.match(/\[[\s\S]*\]/);
if (jsonObjectMatch && jsonArrayMatch) {
// Return the first match that appears
const objectIndex = trimmedText.indexOf(jsonObjectMatch[0]);
const arrayIndex = trimmedText.indexOf(jsonArrayMatch[0]);
return objectIndex < arrayIndex ? jsonObjectMatch[0] : jsonArrayMatch[0];
}
if (jsonObjectMatch) {
return jsonObjectMatch[0];
}
if (jsonArrayMatch) {
return jsonArrayMatch[0];
}
// If nothing found, try to parse the entire text as JSON
try {
JSON.parse(trimmedText);
return trimmedText;
} catch {
// If all else fails, return the original text
// The caller should handle JSON parsing errors
return trimmedText;
}
}

View File

@@ -0,0 +1,341 @@
/**
* MCP Sampling Language Model implementation for AI SDK v5
*/
import type {
LanguageModelV2,
LanguageModelV2CallOptions,
LanguageModelV2CallWarning
} from '@ai-sdk/provider';
import { NoSuchModelError } from '@ai-sdk/provider';
import { generateId } from '@ai-sdk/provider-utils';
import {
createMCPAPICallError,
createMCPSessionError,
mapMCPError
} from './errors.js';
import { extractJson } from './json-extractor.js';
import {
convertFromMCPFormat,
convertToMCPFormat,
createPromptFromMessages
} from './message-converter.js';
import type {
MCPSamplingLanguageModelOptions,
MCPSamplingModelId,
MCPSamplingSettings,
MCPSession
} from './types.js';
/**
* MCP Sampling Language Model implementation for AI SDK v5
*/
export class MCPSamplingLanguageModel implements LanguageModelV2 {
readonly specificationVersion = 'v2' as const;
readonly defaultObjectGenerationMode = 'json' as const;
readonly supportsImageUrls = false;
readonly supportsStructuredOutputs = true;
readonly supportedUrls: Record<string, RegExp[]> = {};
readonly modelId: MCPSamplingModelId;
readonly settings: MCPSamplingSettings;
readonly session: MCPSession;
constructor(options: MCPSamplingLanguageModelOptions & { session: MCPSession }) {
this.modelId = options.id;
this.settings = options.settings ?? {};
this.session = options.session;
// Validate model ID format
if (
!this.modelId ||
typeof this.modelId !== 'string' ||
this.modelId.trim() === ''
) {
throw new NoSuchModelError({
modelId: this.modelId,
modelType: 'languageModel'
});
}
// Validate MCP session
this.validateSession();
}
get provider(): string {
return 'mcp-sampling';
}
/**
* Validate that the MCP session has required capabilities
*/
private validateSession(): void {
if (!this.session) {
throw createMCPSessionError({
message: 'MCP session is required'
});
}
if (!this.session.clientCapabilities?.sampling) {
throw createMCPSessionError({
message: 'MCP session must have client sampling capabilities'
});
}
}
/**
* Generate comprehensive warnings for unsupported parameters
*/
private generateWarnings(
options: LanguageModelV2CallOptions
): LanguageModelV2CallWarning[] {
const warnings: LanguageModelV2CallWarning[] = [];
const unsupportedParams: string[] = [];
// Check for unsupported parameters
if (options.topP !== undefined) unsupportedParams.push('topP');
if (options.topK !== undefined) unsupportedParams.push('topK');
if (options.presencePenalty !== undefined)
unsupportedParams.push('presencePenalty');
if (options.frequencyPenalty !== undefined)
unsupportedParams.push('frequencyPenalty');
if (options.stopSequences !== undefined && options.stopSequences.length > 0)
unsupportedParams.push('stopSequences');
if (options.seed !== undefined) unsupportedParams.push('seed');
if (unsupportedParams.length > 0) {
// Add a warning for each unsupported parameter
for (const param of unsupportedParams) {
warnings.push({
type: 'unsupported-setting',
setting: param as
| 'topP'
| 'topK'
| 'presencePenalty'
| 'frequencyPenalty'
| 'stopSequences'
| 'seed',
details: `MCP Sampling does not support the ${param} parameter. It will be ignored.`
});
}
}
return warnings;
}
/**
* Generate text using MCP session sampling
*/
async doGenerate(options: LanguageModelV2CallOptions) {
// Handle abort signal early
if (options.abortSignal?.aborted) {
throw options.abortSignal.reason || new Error('Request aborted');
}
const prompt = createPromptFromMessages(options.prompt);
const warnings = this.generateWarnings(options);
try {
// Convert AI SDK prompt to MCP format
const { messages, systemPrompt } = convertToMCPFormat(options.prompt);
// Use MCP session.requestSampling
const response = await this.session.requestSampling(
{
messages,
systemPrompt,
temperature: options.temperature ?? this.settings.temperature,
maxTokens: options.maxTokens ?? this.settings.maxTokens,
includeContext: 'thisServer'
},
{
timeout: this.settings.timeout ?? 240000 // 4 minutes default
}
);
// Convert MCP response back to AI SDK format
const result = convertFromMCPFormat(response);
// Extract JSON if in object-json mode
let text = result.text || '';
const isObjectJson = (
o: unknown
): o is { mode: { type: 'object-json' } } =>
!!o &&
typeof o === 'object' &&
'mode' in o &&
(o as any).mode?.type === 'object-json';
if (isObjectJson(options) && text) {
text = extractJson(text);
}
return {
content: [
{
type: 'text' as const,
text: text || ''
}
],
usage: result.usage
? {
inputTokens: result.usage.inputTokens,
outputTokens: result.usage.outputTokens,
totalTokens: result.usage.inputTokens + result.usage.outputTokens
}
: { inputTokens: 0, outputTokens: 0, totalTokens: 0 },
finishReason: (result.finishReason as any) || 'stop',
rawCall: {
rawPrompt: prompt,
rawSettings: {
temperature: options.temperature ?? this.settings.temperature,
maxTokens: options.maxTokens ?? this.settings.maxTokens
}
},
warnings: warnings,
response: {
id: generateId(),
timestamp: new Date(),
modelId: this.modelId
},
request: {
body: JSON.stringify({ messages, systemPrompt })
},
providerMetadata: {
'mcp-sampling': {
modelId: this.modelId,
sessionCapabilities: this.session.clientCapabilities
}
}
};
} catch (error) {
throw mapMCPError(error);
}
}
/**
* Stream text using MCP sampling
* Note: MCP may not support native streaming, so this simulates streaming
* by generating the full response and then streaming it in chunks
*/
async doStream(options: LanguageModelV2CallOptions) {
const prompt = createPromptFromMessages(options.prompt);
const warnings = this.generateWarnings(options);
const stream = new ReadableStream({
start: async (controller) => {
let abortListener: (() => void) | undefined;
try {
// Handle abort signal
if (options.abortSignal?.aborted) {
throw options.abortSignal.reason || new Error('Request aborted');
}
// Set up abort listener
if (options.abortSignal) {
abortListener = () => {
controller.enqueue({
type: 'error',
error:
options.abortSignal?.reason || new Error('Request aborted')
});
controller.close();
};
options.abortSignal.addEventListener('abort', abortListener, {
once: true
});
}
// Emit stream-start with warnings
controller.enqueue({ type: 'stream-start', warnings });
// Generate the full response first
const result = await this.doGenerate(options);
// Emit response metadata
controller.enqueue({
type: 'response-metadata',
id: result.response.id,
timestamp: result.response.timestamp,
modelId: result.response.modelId
});
// Simulate streaming by chunking the text
const content = result.content || [];
const text =
content.length > 0 && content[0].type === 'text'
? content[0].text
: '';
const chunkSize = 50; // Characters per chunk
let textPartId: string | undefined;
// Emit text-start if we have content
if (text.length > 0) {
textPartId = generateId();
controller.enqueue({
type: 'text-start',
id: textPartId
});
}
for (let i = 0; i < text.length; i += chunkSize) {
// Check for abort during streaming
if (options.abortSignal?.aborted) {
throw options.abortSignal.reason || new Error('Request aborted');
}
const chunk = text.slice(i, i + chunkSize);
controller.enqueue({
type: 'text-delta',
id: textPartId!,
delta: chunk
});
// Add small delay to simulate streaming
await new Promise((resolve) => setTimeout(resolve, 20));
}
// Close text part if opened
if (textPartId) {
controller.enqueue({
type: 'text-end',
id: textPartId
});
}
// Emit finish event
controller.enqueue({
type: 'finish',
finishReason: result.finishReason,
usage: result.usage,
providerMetadata: result.providerMetadata
});
controller.close();
} catch (error) {
controller.enqueue({
type: 'error',
error: mapMCPError(error)
});
controller.close();
} finally {
// Clean up abort listener
if (options.abortSignal && abortListener) {
options.abortSignal.removeEventListener('abort', abortListener);
}
}
},
cancel: () => {
// Clean up if stream is cancelled
}
});
return {
stream,
request: {
body: prompt
}
};
}
}

View File

@@ -0,0 +1,64 @@
/**
* MCP Sampling Provider for AI SDK v5
*/
import type { LanguageModelV2 } from '@ai-sdk/provider';
import { NoSuchModelError } from '@ai-sdk/provider';
import { MCPSamplingLanguageModel } from './mcp-sampling-language-model.js';
import type {
MCPSamplingModelId,
MCPSamplingSettings,
MCPSession
} from './types.js';
/**
* Create an MCP Sampling provider instance
*/
export function createMCPSampling(options: {
session: MCPSession;
defaultSettings?: MCPSamplingSettings;
}) {
if (!options.session) {
throw new Error('MCP session is required');
}
/**
* Create an MCP Sampling language model
*/
function languageModel(
modelId: MCPSamplingModelId,
settings?: MCPSamplingSettings
): LanguageModelV2 {
if (new.target) {
throw new Error(
'The MCP Sampling model function cannot be called with the new keyword.'
);
}
// Validate model ID
if (!modelId || typeof modelId !== 'string' || modelId.trim() === '') {
throw new NoSuchModelError({
modelId: modelId || 'undefined',
modelType: 'languageModel'
});
}
return new MCPSamplingLanguageModel({
id: modelId,
settings: {
...options.defaultSettings,
...settings
},
session: options.session
});
}
const provider = languageModel;
// Add required provider methods
provider.languageModel = languageModel;
provider.chat = languageModel; // Alias for compatibility
return provider;
}

View File

@@ -0,0 +1,125 @@
/**
* Message conversion utilities for MCP Sampling provider
*/
import type { LanguageModelV2Prompt } from '@ai-sdk/provider';
import type { MCPSamplingResponse } from './types.js';
/**
* Convert AI SDK prompt to MCP format
*/
export function convertToMCPFormat(prompt: LanguageModelV2Prompt): {
messages: Array<{
role: 'user' | 'assistant' | 'system';
content: string;
}>;
systemPrompt?: string;
} {
const messages: Array<{
role: 'user' | 'assistant' | 'system';
content: string;
}> = [];
let systemPrompt: string | undefined;
for (const message of prompt) {
if (message.role === 'system') {
// MCP handles system messages separately
systemPrompt = message.content;
} else if (message.role === 'user' || message.role === 'assistant') {
// Convert content array to string
let content = '';
if (typeof message.content === 'string') {
content = message.content;
} else if (Array.isArray(message.content)) {
content = message.content
.map((part) => {
if (part.type === 'text') {
return part.text;
}
// Skip non-text content for now (images, etc.)
return '';
})
.join('');
}
messages.push({
role: message.role,
content
});
}
}
return { messages, systemPrompt };
}
/**
* Convert MCP response to AI SDK format
*/
export function convertFromMCPFormat(response: {
content: Array<{
type: 'text';
text: string;
}>;
usage?: {
inputTokens?: number;
outputTokens?: number;
};
stopReason?: 'endTurn' | 'stopSequence' | 'maxTokens';
}): MCPSamplingResponse {
// Extract text from content array
const text = response.content
?.map((item) => (item.type === 'text' ? item.text : ''))
.join('') || '';
// Map MCP stop reason to AI SDK finish reason
let finishReason: string = 'stop';
switch (response.stopReason) {
case 'endTurn':
finishReason = 'stop';
break;
case 'stopSequence':
finishReason = 'stop';
break;
case 'maxTokens':
finishReason = 'length';
break;
default:
finishReason = 'stop';
}
return {
text,
finishReason,
usage: response.usage ? {
inputTokens: response.usage.inputTokens || 0,
outputTokens: response.usage.outputTokens || 0
} : undefined
};
}
/**
* Create a simple prompt from messages (for debugging/logging)
*/
export function createPromptFromMessages(prompt: LanguageModelV2Prompt): string {
return prompt
.map((message) => {
const role = message.role.toUpperCase();
let content = '';
if (typeof message.content === 'string') {
content = message.content;
} else if (Array.isArray(message.content)) {
content = message.content
.map((part) => {
if (part.type === 'text') {
return part.text;
}
return '[non-text content]';
})
.join('');
}
return `${role}: ${content}`;
})
.join('\n\n');
}

View File

@@ -0,0 +1,62 @@
/**
* Type definitions for MCP Sampling provider
*/
export interface MCPSamplingLanguageModelOptions {
/** MCP model identifier */
id: string;
/** Provider-specific settings */
settings?: MCPSamplingSettings;
}
export interface MCPSamplingSettings {
/** Temperature setting (0-1) */
temperature?: number;
/** Maximum tokens to generate */
maxTokens?: number;
/** API timeout in milliseconds */
timeout?: number;
}
export type MCPSamplingModelId = string;
export interface MCPSession {
clientCapabilities?: {
sampling?: boolean;
};
requestSampling(
request: {
messages: Array<{
role: 'user' | 'assistant' | 'system';
content: string;
}>;
systemPrompt?: string;
temperature?: number;
maxTokens?: number;
includeContext?: 'none' | 'thisServer' | 'allServers';
},
options?: {
timeout?: number;
}
): Promise<{
content: {
type: 'text';
text: string;
}[];
usage?: {
inputTokens?: number;
outputTokens?: number;
};
stopReason?: 'endTurn' | 'stopSequence' | 'maxTokens';
}>;
}
export interface MCPSamplingResponse {
text: string;
finishReason?: string;
usage?: {
inputTokens?: number;
outputTokens?: number;
};
warnings?: string[];
}

View File

@@ -0,0 +1,9 @@
{
"extends": "../../tsconfig.json",
"compilerOptions": {
"outDir": "./dist",
"rootDir": "./src"
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist", "**/*.test.ts", "**/*.spec.ts"]
}

69
test-mcp-v2.js Normal file
View File

@@ -0,0 +1,69 @@
/**
* Quick test to verify the MCP provider uses v2 specification
*/
// Mock MCP session for testing
const mockSession = {
clientCapabilities: {
sampling: true
},
requestSampling: async (request, options) => {
return {
content: [{
type: 'text',
text: 'Test response'
}],
usage: {
inputTokens: 10,
outputTokens: 5
},
stopReason: 'endTurn'
};
}
};
// Test the import and basic functionality
async function testMCPProvider() {
try {
// Import the new package
const { createMCPSampling } = await import('./packages/ai-sdk-provider-mcp-sampling/src/index.js');
console.log('✅ Successfully imported createMCPSampling');
// Create provider
const provider = createMCPSampling({
session: mockSession,
defaultSettings: {
temperature: 0.7,
maxTokens: 1000
}
});
console.log('✅ Successfully created MCP provider');
// Create model
const model = provider('test-model');
console.log('✅ Successfully created language model');
console.log(`✅ Specification version: ${model.specificationVersion}`);
// Verify it's v2
if (model.specificationVersion === 'v2') {
console.log('🎉 SUCCESS: MCP provider now uses v2 specification!');
return true;
} else {
console.log(`❌ FAIL: Expected v2, got ${model.specificationVersion}`);
return false;
}
} catch (error) {
console.log(`❌ Test failed: ${error.message}`);
console.log(error.stack);
return false;
}
}
// Run the test
testMCPProvider().then(success => {
process.exit(success ? 0 : 1);
});