diff --git a/packages/cli/src/types/inquirer.d.ts b/packages/cli/src/types/inquirer.d.ts new file mode 100644 index 0000000..a833a6e --- /dev/null +++ b/packages/cli/src/types/inquirer.d.ts @@ -0,0 +1,48 @@ +// Type declarations for @inquirer packages +declare module '@inquirer/input' { + import { DistinctChoice } from '@inquirer/core'; + interface PromptConfig { + message: string; + default?: string; + } + export default function prompt(config: PromptConfig): Promise; +} + +declare module '@inquirer/confirm' { + interface PromptConfig { + message: string; + default?: boolean; + } + export default function prompt(config: PromptConfig): Promise; +} + +declare module '@inquirer/select' { + export default function prompt(config: { + message: string; + choices: Array<{ name: string; value: T; description?: string }>; + default?: T; + }): Promise; +} + +declare module '@inquirer/password' { + interface PromptConfig { + message: string; + mask?: string; + } + export default function prompt(config: PromptConfig): Promise; +} + +declare module '@inquirer/checkbox' { + export default function prompt(config: { + message: string; + choices: Array<{ name: string; value: T; checked?: boolean }>; + }): Promise; +} + +declare module '@inquirer/editor' { + interface PromptConfig { + message: string; + default?: string; + } + export default function prompt(config: PromptConfig): Promise; +} diff --git a/packages/core/.npmignore b/packages/core/.npmignore new file mode 100644 index 0000000..3d54f88 --- /dev/null +++ b/packages/core/.npmignore @@ -0,0 +1,17 @@ +src +node_modules +.claude +CLAUDE.md +screenshoots +.DS_Store +.vscode +.idea +.env +.blog +docs +scripts +eslint.config.cjs +*.log +config.json +tsconfig.json +dist diff --git a/packages/core/package.json b/packages/core/package.json new file mode 100644 index 0000000..8efa6d9 --- /dev/null +++ b/packages/core/package.json @@ -0,0 +1,52 @@ +{ + "name": "@musistudio/llms", + "version": "1.0.51", + "description": "A universal LLM API transformation server", + "main": "dist/cjs/server.cjs", + "module": "dist/esm/server.mjs", + "type": "module", + "exports": { + ".": { + "import": "./dist/esm/server.mjs", + "require": "./dist/cjs/server.cjs" + } + }, + "scripts": { + "tsx": "tsx", + "build": "tsx scripts/build.ts", + "build:watch": "tsx scripts/build.ts --watch", + "dev": "nodemon", + "start": "node dist/cjs/server.cjs", + "start:esm": "node dist/esm/server.mjs", + "lint": "eslint src --ext .ts,.tsx" + }, + "keywords": [ + "llm", + "anthropic", + "openai", + "gemini", + "transformer", + "api" + ], + "author": "musistudio", + "license": "MIT", + "dependencies": { + "@anthropic-ai/sdk": "^0.54.0", + "@fastify/cors": "^11.0.1", + "@google/genai": "^1.7.0", + "dotenv": "^16.5.0", + "fastify": "^5.4.0", + "google-auth-library": "^10.1.0", + "json5": "^2.2.3", + "jsonrepair": "^3.13.0", + "openai": "^5.6.0", + "undici": "^7.10.0", + "uuid": "^11.1.0" + }, + "devDependencies": { + "@types/node": "^24.0.15", + "esbuild": "^0.25.1", + "tsx": "^4.20.3", + "typescript": "^5.8.2" + } +} diff --git a/packages/core/scripts/build.ts b/packages/core/scripts/build.ts new file mode 100644 index 0000000..5681482 --- /dev/null +++ b/packages/core/scripts/build.ts @@ -0,0 +1,62 @@ +import * as esbuild from "esbuild"; + +const watch = process.argv.includes("--watch"); + +const baseConfig: esbuild.BuildOptions = { + entryPoints: ["src/server.ts"], + bundle: true, + minify: true, + sourcemap: true, + platform: "node", + target: "node18", + plugins: [], + external: ["fastify", "dotenv", "@fastify/cors", "undici"], +}; + +const cjsConfig: esbuild.BuildOptions = { + ...baseConfig, + outdir: "dist/cjs", + format: "cjs", + outExtension: { ".js": ".cjs" }, +}; + +const esmConfig: esbuild.BuildOptions = { + ...baseConfig, + outdir: "dist/esm", + format: "esm", + outExtension: { ".js": ".mjs" }, +}; + +async function build() { + console.log("Building CJS and ESM versions..."); + + const cjsCtx = await esbuild.context(cjsConfig); + const esmCtx = await esbuild.context(esmConfig); + + if (watch) { + console.log("Watching for changes..."); + await Promise.all([ + cjsCtx.watch(), + esmCtx.watch(), + ]); + } else { + await Promise.all([ + cjsCtx.rebuild(), + esmCtx.rebuild(), + ]); + + await Promise.all([ + cjsCtx.dispose(), + esmCtx.dispose(), + ]); + + console.log("✅ Build completed successfully!"); + console.log(" - CJS: dist/cjs/server.cjs"); + console.log(" - ESM: dist/esm/server.mjs"); + } +} + +build().catch((err) => { + console.error(err); + process.exit(1); +}); diff --git a/packages/core/src/api/middleware.ts b/packages/core/src/api/middleware.ts new file mode 100644 index 0000000..b04e74a --- /dev/null +++ b/packages/core/src/api/middleware.ts @@ -0,0 +1,39 @@ +import { FastifyRequest, FastifyReply } from "fastify"; + +export interface ApiError extends Error { + statusCode?: number; + code?: string; + type?: string; +} + +export function createApiError( + message: string, + statusCode: number = 500, + code: string = "internal_error", + type: string = "api_error" +): ApiError { + const error = new Error(message) as ApiError; + error.statusCode = statusCode; + error.code = code; + error.type = type; + return error; +} + +export async function errorHandler( + error: ApiError, + request: FastifyRequest, + reply: FastifyReply +) { + request.log.error(error); + + const statusCode = error.statusCode || 500; + const response = { + error: { + message: error.message + error.stack || "Internal Server Error", + type: error.type || "api_error", + code: error.code || "internal_error", + }, + }; + + return reply.code(statusCode).send(response); +} diff --git a/packages/core/src/api/routes.ts b/packages/core/src/api/routes.ts new file mode 100644 index 0000000..5896a57 --- /dev/null +++ b/packages/core/src/api/routes.ts @@ -0,0 +1,571 @@ +import { + FastifyInstance, + FastifyPluginAsync, + FastifyRequest, + FastifyReply, +} from "fastify"; +import { RegisterProviderRequest, LLMProvider } from "@/types/llm"; +import { sendUnifiedRequest } from "@/utils/request"; +import { createApiError } from "./middleware"; +import { version } from "../../package.json"; + +/** + * 处理transformer端点的主函数 + * 协调整个请求处理流程:验证提供者、处理请求转换器、发送请求、处理响应转换器、格式化响应 + */ +async function handleTransformerEndpoint( + req: FastifyRequest, + reply: FastifyReply, + fastify: FastifyInstance, + transformer: any +) { + const body = req.body as any; + const providerName = req.provider!; + const provider = fastify._server!.providerService.getProvider(providerName); + + // 验证提供者是否存在 + if (!provider) { + throw createApiError( + `Provider '${providerName}' not found`, + 404, + "provider_not_found" + ); + } + + // 处理请求转换器链 + const { requestBody, config, bypass } = await processRequestTransformers( + body, + provider, + transformer, + req.headers, + { + req, + } + ); + + // 发送请求到LLM提供者 + const response = await sendRequestToProvider( + requestBody, + config, + provider, + fastify, + bypass, + transformer, + { + req, + } + ); + + // 处理响应转换器链 + const finalResponse = await processResponseTransformers( + requestBody, + response, + provider, + transformer, + bypass, + { + req, + } + ); + + // 格式化并返回响应 + return formatResponse(finalResponse, reply, body); +} + +/** + * 处理请求转换器链 + * 依次执行transformRequestOut、provider transformers、model-specific transformers + * 返回处理后的请求体、配置和是否跳过转换器的标志 + */ +async function processRequestTransformers( + body: any, + provider: any, + transformer: any, + headers: any, + context: any +) { + let requestBody = body; + let config: any = {}; + let bypass = false; + + // 检查是否应该跳过转换器(透传参数) + bypass = shouldBypassTransformers(provider, transformer, body); + + if (bypass) { + if (headers instanceof Headers) { + headers.delete("content-length"); + } else { + delete headers["content-length"]; + } + config.headers = headers; + } + + // 执行transformer的transformRequestOut方法 + if (!bypass && typeof transformer.transformRequestOut === "function") { + const transformOut = await transformer.transformRequestOut(requestBody); + if (transformOut.body) { + requestBody = transformOut.body; + config = transformOut.config || {}; + } else { + requestBody = transformOut; + } + } + + // 执行provider级别的转换器 + if (!bypass && provider.transformer?.use?.length) { + for (const providerTransformer of provider.transformer.use) { + if ( + !providerTransformer || + typeof providerTransformer.transformRequestIn !== "function" + ) { + continue; + } + const transformIn = await providerTransformer.transformRequestIn( + requestBody, + provider, + context + ); + if (transformIn.body) { + requestBody = transformIn.body; + config = { ...config, ...transformIn.config }; + } else { + requestBody = transformIn; + } + } + } + + // 执行模型特定的转换器 + if (!bypass && provider.transformer?.[body.model]?.use?.length) { + for (const modelTransformer of provider.transformer[body.model].use) { + if ( + !modelTransformer || + typeof modelTransformer.transformRequestIn !== "function" + ) { + continue; + } + requestBody = await modelTransformer.transformRequestIn( + requestBody, + provider, + context + ); + } + } + + return { requestBody, config, bypass }; +} + +/** + * 判断是否应该跳过转换器(透传参数) + * 当provider只使用一个transformer且该transformer与当前transformer相同时,跳过其他转换器 + */ +function shouldBypassTransformers( + provider: any, + transformer: any, + body: any +): boolean { + return ( + provider.transformer?.use?.length === 1 && + provider.transformer.use[0].name === transformer.name && + (!provider.transformer?.[body.model]?.use.length || + (provider.transformer?.[body.model]?.use.length === 1 && + provider.transformer?.[body.model]?.use[0].name === transformer.name)) + ); +} + +/** + * 发送请求到LLM提供者 + * 处理认证、构建请求配置、发送请求并处理错误 + */ +async function sendRequestToProvider( + requestBody: any, + config: any, + provider: any, + fastify: FastifyInstance, + bypass: boolean, + transformer: any, + context: any +) { + const url = config.url || new URL(provider.baseUrl); + + // 在透传参数下处理认证 + if (bypass && typeof transformer.auth === "function") { + const auth = await transformer.auth(requestBody, provider); + if (auth.body) { + requestBody = auth.body; + let headers = config.headers || {}; + if (auth.config?.headers) { + headers = { + ...headers, + ...auth.config.headers, + }; + delete headers.host; + delete auth.config.headers; + } + config = { + ...config, + ...auth.config, + headers, + }; + } else { + requestBody = auth; + } + } + + // 发送HTTP请求 + // 准备headers + const requestHeaders: Record = { + Authorization: `Bearer ${provider.apiKey}`, + ...(config?.headers || {}), + }; + + for (const key in requestHeaders) { + if (requestHeaders[key] === "undefined") { + delete requestHeaders[key]; + } else if ( + ["authorization", "Authorization"].includes(key) && + requestHeaders[key]?.includes("undefined") + ) { + delete requestHeaders[key]; + } + } + + const response = await sendUnifiedRequest( + url, + requestBody, + { + httpsProxy: fastify._server!.configService.getHttpsProxy(), + ...config, + headers: JSON.parse(JSON.stringify(requestHeaders)), + }, + context, + fastify.log + ); + + // 处理请求错误 + if (!response.ok) { + const errorText = await response.text(); + fastify.log.error( + `[provider_response_error] Error from provider(${provider.name},${requestBody.model}: ${response.status}): ${errorText}`, + ); + throw createApiError( + `Error from provider(${provider.name},${requestBody.model}: ${response.status}): ${errorText}`, + response.status, + "provider_response_error" + ); + } + + return response; +} + +/** + * 处理响应转换器链 + * 依次执行provider transformers、model-specific transformers、transformer的transformResponseIn + */ +async function processResponseTransformers( + requestBody: any, + response: any, + provider: any, + transformer: any, + bypass: boolean, + context: any +) { + let finalResponse = response; + + // 执行provider级别的响应转换器 + if (!bypass && provider.transformer?.use?.length) { + for (const providerTransformer of Array.from( + provider.transformer.use + ).reverse()) { + if ( + !providerTransformer || + typeof providerTransformer.transformResponseOut !== "function" + ) { + continue; + } + finalResponse = await providerTransformer.transformResponseOut( + finalResponse, + context + ); + } + } + + // 执行模型特定的响应转换器 + if (!bypass && provider.transformer?.[requestBody.model]?.use?.length) { + for (const modelTransformer of Array.from( + provider.transformer[requestBody.model].use + ).reverse()) { + if ( + !modelTransformer || + typeof modelTransformer.transformResponseOut !== "function" + ) { + continue; + } + finalResponse = await modelTransformer.transformResponseOut( + finalResponse, + context + ); + } + } + + // 执行transformer的transformResponseIn方法 + if (!bypass && transformer.transformResponseIn) { + finalResponse = await transformer.transformResponseIn( + finalResponse, + context + ); + } + + return finalResponse; +} + +/** + * 格式化并返回响应 + * 处理HTTP状态码、流式响应和普通响应的格式化 + */ +function formatResponse(response: any, reply: FastifyReply, body: any) { + // 设置HTTP状态码 + if (!response.ok) { + reply.code(response.status); + } + + // 处理流式响应 + const isStream = body.stream === true; + if (isStream) { + reply.header("Content-Type", "text/event-stream"); + reply.header("Cache-Control", "no-cache"); + reply.header("Connection", "keep-alive"); + return reply.send(response.body); + } else { + // 处理普通JSON响应 + return response.json(); + } +} + +export const registerApiRoutes: FastifyPluginAsync = async ( + fastify: FastifyInstance +) => { + // Health and info endpoints + fastify.get("/", async () => { + return { message: "LLMs API", version }; + }); + + fastify.get("/health", async () => { + return { status: "ok", timestamp: new Date().toISOString() }; + }); + + const transformersWithEndpoint = + fastify._server!.transformerService.getTransformersWithEndpoint(); + + for (const { transformer } of transformersWithEndpoint) { + if (transformer.endPoint) { + fastify.post( + transformer.endPoint, + async (req: FastifyRequest, reply: FastifyReply) => { + return handleTransformerEndpoint(req, reply, fastify, transformer); + } + ); + } + } + + fastify.post( + "/providers", + { + schema: { + body: { + type: "object", + properties: { + id: { type: "string" }, + name: { type: "string" }, + type: { type: "string", enum: ["openai", "anthropic"] }, + baseUrl: { type: "string" }, + apiKey: { type: "string" }, + models: { type: "array", items: { type: "string" } }, + }, + required: ["id", "name", "type", "baseUrl", "apiKey", "models"], + }, + }, + }, + async ( + request: FastifyRequest<{ Body: RegisterProviderRequest }>, + reply: FastifyReply + ) => { + // Validation + const { name, baseUrl, apiKey, models } = request.body; + + if (!name?.trim()) { + throw createApiError( + "Provider name is required", + 400, + "invalid_request" + ); + } + + if (!baseUrl || !isValidUrl(baseUrl)) { + throw createApiError( + "Valid base URL is required", + 400, + "invalid_request" + ); + } + + if (!apiKey?.trim()) { + throw createApiError("API key is required", 400, "invalid_request"); + } + + if (!models || !Array.isArray(models) || models.length === 0) { + throw createApiError( + "At least one model is required", + 400, + "invalid_request" + ); + } + + // Check if provider already exists + if (fastify._server!.providerService.getProvider(request.body.name)) { + throw createApiError( + `Provider with name '${request.body.name}' already exists`, + 400, + "provider_exists" + ); + } + + return fastify._server!.providerService.registerProvider(request.body); + } + ); + + fastify.get("/providers", async () => { + return fastify._server!.providerService.getProviders(); + }); + + fastify.get( + "/providers/:id", + { + schema: { + params: { + type: "object", + properties: { id: { type: "string" } }, + required: ["id"], + }, + }, + }, + async (request: FastifyRequest<{ Params: { id: string } }>) => { + const provider = fastify._server!.providerService.getProvider( + request.params.id + ); + if (!provider) { + throw createApiError("Provider not found", 404, "provider_not_found"); + } + return provider; + } + ); + + fastify.put( + "/providers/:id", + { + schema: { + params: { + type: "object", + properties: { id: { type: "string" } }, + required: ["id"], + }, + body: { + type: "object", + properties: { + name: { type: "string" }, + type: { type: "string", enum: ["openai", "anthropic"] }, + baseUrl: { type: "string" }, + apiKey: { type: "string" }, + models: { type: "array", items: { type: "string" } }, + enabled: { type: "boolean" }, + }, + }, + }, + }, + async ( + request: FastifyRequest<{ + Params: { id: string }; + Body: Partial; + }>, + reply + ) => { + const provider = fastify._server!.providerService.updateProvider( + request.params.id, + request.body + ); + if (!provider) { + throw createApiError("Provider not found", 404, "provider_not_found"); + } + return provider; + } + ); + + fastify.delete( + "/providers/:id", + { + schema: { + params: { + type: "object", + properties: { id: { type: "string" } }, + required: ["id"], + }, + }, + }, + async (request: FastifyRequest<{ Params: { id: string } }>) => { + const success = fastify._server!.providerService.deleteProvider( + request.params.id + ); + if (!success) { + throw createApiError("Provider not found", 404, "provider_not_found"); + } + return { message: "Provider deleted successfully" }; + } + ); + + fastify.patch( + "/providers/:id/toggle", + { + schema: { + params: { + type: "object", + properties: { id: { type: "string" } }, + required: ["id"], + }, + body: { + type: "object", + properties: { enabled: { type: "boolean" } }, + required: ["enabled"], + }, + }, + }, + async ( + request: FastifyRequest<{ + Params: { id: string }; + Body: { enabled: boolean }; + }>, + reply + ) => { + const success = fastify._server!.providerService.toggleProvider( + request.params.id, + request.body.enabled + ); + if (!success) { + throw createApiError("Provider not found", 404, "provider_not_found"); + } + return { + message: `Provider ${ + request.body.enabled ? "enabled" : "disabled" + } successfully`, + }; + } + ); +}; + +// Helper function +function isValidUrl(url: string): boolean { + try { + new URL(url); + return true; + } catch { + return false; + } +} diff --git a/packages/core/src/server.ts b/packages/core/src/server.ts new file mode 100644 index 0000000..796e9dc --- /dev/null +++ b/packages/core/src/server.ts @@ -0,0 +1,207 @@ +import Fastify, { + FastifyInstance, + FastifyReply, + FastifyRequest, + FastifyPluginAsync, + FastifyPluginCallback, + FastifyPluginOptions, + FastifyRegisterOptions, + preHandlerHookHandler, + onRequestHookHandler, + preParsingHookHandler, + preValidationHookHandler, + preSerializationHookHandler, + onSendHookHandler, + onResponseHookHandler, + onTimeoutHookHandler, + onErrorHookHandler, + onRouteHookHandler, + onRegisterHookHandler, + onReadyHookHandler, + onListenHookHandler, + onCloseHookHandler, + FastifyBaseLogger, + FastifyLoggerOptions, + FastifyServerOptions, +} from "fastify"; +import cors from "@fastify/cors"; +import { ConfigService, AppConfig } from "./services/config"; +import { errorHandler } from "./api/middleware"; +import { registerApiRoutes } from "./api/routes"; +import { ProviderService } from "./services/provider"; +import { TransformerService } from "./services/transformer"; + +// Extend FastifyRequest to include custom properties +declare module "fastify" { + interface FastifyRequest { + provider?: string; + } + interface FastifyInstance { + _server?: Server; + } +} + +interface ServerOptions extends FastifyServerOptions { + initialConfig?: AppConfig; +} + +// Application factory +function createApp(options: FastifyServerOptions = {}): FastifyInstance { + const fastify = Fastify({ + bodyLimit: 50 * 1024 * 1024, + ...options, + }); + + // Register error handler + fastify.setErrorHandler(errorHandler); + + // Register CORS + fastify.register(cors); + return fastify; +} + +// Server class +class Server { + private app: FastifyInstance; + configService: ConfigService; + providerService!: ProviderService; + transformerService: TransformerService; + + constructor(options: ServerOptions = {}) { + const { initialConfig, ...fastifyOptions } = options; + this.app = createApp({ + ...fastifyOptions, + logger: fastifyOptions.logger ?? true, + }); + this.configService = new ConfigService(options); + this.transformerService = new TransformerService( + this.configService, + this.app.log + ); + this.transformerService.initialize().finally(() => { + this.providerService = new ProviderService( + this.configService, + this.transformerService, + this.app.log + ); + }); + } + + async register( + plugin: FastifyPluginAsync | FastifyPluginCallback, + options?: FastifyRegisterOptions + ): Promise { + await (this.app as any).register(plugin, options); + } + + addHook(hookName: "onRequest", hookFunction: onRequestHookHandler): void; + addHook(hookName: "preParsing", hookFunction: preParsingHookHandler): void; + addHook( + hookName: "preValidation", + hookFunction: preValidationHookHandler + ): void; + addHook(hookName: "preHandler", hookFunction: preHandlerHookHandler): void; + addHook( + hookName: "preSerialization", + hookFunction: preSerializationHookHandler + ): void; + addHook(hookName: "onSend", hookFunction: onSendHookHandler): void; + addHook(hookName: "onResponse", hookFunction: onResponseHookHandler): void; + addHook(hookName: "onTimeout", hookFunction: onTimeoutHookHandler): void; + addHook(hookName: "onError", hookFunction: onErrorHookHandler): void; + addHook(hookName: "onRoute", hookFunction: onRouteHookHandler): void; + addHook(hookName: "onRegister", hookFunction: onRegisterHookHandler): void; + addHook(hookName: "onReady", hookFunction: onReadyHookHandler): void; + addHook(hookName: "onListen", hookFunction: onListenHookHandler): void; + addHook(hookName: "onClose", hookFunction: onCloseHookHandler): void; + public addHook(hookName: string, hookFunction: any): void { + this.app.addHook(hookName as any, hookFunction); + } + + public async registerNamespace(name: string, options: any) { + if (!name) throw new Error("name is required"); + const configService = new ConfigService(options); + const transformerService = new TransformerService( + configService, + this.app.log + ); + await transformerService.initialize(); + const providerService = new ProviderService( + configService, + transformerService, + this.app.log + ); + this.app.register((fastify) => { + fastify.decorate('configService', configService); + fastify.decorate('transformerService', transformerService); + fastify.decorate('providerService', providerService); + }, { prefix: name }); + this.app.register(registerApiRoutes, { prefix: name }); + } + + async start(): Promise { + try { + this.app._server = this; + + this.app.addHook("preHandler", (req, reply, done) => { + const url = new URL(`http://127.0.0.1${req.url}`); + if (url.pathname.endsWith("/v1/messages") && req.body) { + const body = req.body as any; + req.log.info({ data: body, type: "request body" }); + if (!body.stream) { + body.stream = false; + } + } + done(); + }); + + this.app.addHook( + "preHandler", + async (req: FastifyRequest, reply: FastifyReply) => { + const url = new URL(`http://127.0.0.1${req.url}`); + if (url.pathname.endsWith("/v1/messages") && req.body) { + try { + const body = req.body as any; + if (!body || !body.model) { + return reply + .code(400) + .send({ error: "Missing model in request body" }); + } + const [provider, ...model] = body.model.split(","); + body.model = model.join(","); + req.provider = provider; + return; + } catch (err) { + req.log.error({error: err}, "Error in modelProviderMiddleware:"); + return reply.code(500).send({ error: "Internal server error" }); + } + } + } + ); + + this.app.register(registerApiRoutes); + + const address = await this.app.listen({ + port: parseInt(this.configService.get("PORT") || "3000", 10), + host: this.configService.get("HOST") || "127.0.0.1", + }); + + this.app.log.info(`🚀 LLMs API server listening on ${address}`); + + const shutdown = async (signal: string) => { + this.app.log.info(`Received ${signal}, shutting down gracefully...`); + await this.app.close(); + process.exit(0); + }; + + process.on("SIGINT", () => shutdown("SIGINT")); + process.on("SIGTERM", () => shutdown("SIGTERM")); + } catch (error) { + this.app.log.error(`Error starting server: ${error}`); + process.exit(1); + } + } +} + +// Export for external use +export default Server; diff --git a/packages/core/src/services/config.ts b/packages/core/src/services/config.ts new file mode 100644 index 0000000..1be4a19 --- /dev/null +++ b/packages/core/src/services/config.ts @@ -0,0 +1,179 @@ +import { readFileSync, existsSync } from "fs"; +import { join } from "path"; +import { config } from "dotenv"; +import JSON5 from 'json5'; + +export interface ConfigOptions { + envPath?: string; + jsonPath?: string; + useEnvFile?: boolean; + useJsonFile?: boolean; + useEnvironmentVariables?: boolean; + initialConfig?: AppConfig; +} + +export interface AppConfig { + [key: string]: any; +} + +export class ConfigService { + private config: AppConfig = {}; + private options: ConfigOptions; + + constructor( + options: ConfigOptions = { + jsonPath: "./config.json", + } + ) { + this.options = { + envPath: options.envPath || ".env", + jsonPath: options.jsonPath, + useEnvFile: false, + useJsonFile: options.useJsonFile !== false, + useEnvironmentVariables: options.useEnvironmentVariables !== false, + ...options, + }; + + this.loadConfig(); + } + + private loadConfig(): void { + if (this.options.useJsonFile && this.options.jsonPath) { + this.loadJsonConfig(); + } + + if (this.options.initialConfig) { + this.config = { ...this.config, ...this.options.initialConfig }; + } + + if (this.options.useEnvFile) { + this.loadEnvConfig(); + } + + // if (this.options.useEnvironmentVariables) { + // this.loadEnvironmentVariables(); + // } + + if (this.config.LOG_FILE) { + process.env.LOG_FILE = this.config.LOG_FILE; + } + if (this.config.LOG) { + process.env.LOG = this.config.LOG; + } + } + + private loadJsonConfig(): void { + if (!this.options.jsonPath) return; + + const jsonPath = this.isAbsolutePath(this.options.jsonPath) + ? this.options.jsonPath + : join(process.cwd(), this.options.jsonPath); + + if (existsSync(jsonPath)) { + try { + const jsonContent = readFileSync(jsonPath, "utf-8"); + const jsonConfig = JSON5.parse(jsonContent); + this.config = { ...this.config, ...jsonConfig }; + console.log(`Loaded JSON config from: ${jsonPath}`); + } catch (error) { + console.warn(`Failed to load JSON config from ${jsonPath}:`, error); + } + } else { + console.warn(`JSON config file not found: ${jsonPath}`); + } + } + + private loadEnvConfig(): void { + const envPath = this.isAbsolutePath(this.options.envPath!) + ? this.options.envPath! + : join(process.cwd(), this.options.envPath!); + + if (existsSync(envPath)) { + try { + const result = config({ path: envPath }); + if (result.parsed) { + this.config = { + ...this.config, + ...this.parseEnvConfig(result.parsed), + }; + } + } catch (error) { + console.warn(`Failed to load .env config from ${envPath}:`, error); + } + } + } + + private loadEnvironmentVariables(): void { + const envConfig = this.parseEnvConfig(process.env); + this.config = { ...this.config, ...envConfig }; + } + + private parseEnvConfig( + env: Record + ): Partial { + const parsed: Partial = {}; + + Object.assign(parsed, env); + + return parsed; + } + + private isAbsolutePath(path: string): boolean { + return path.startsWith("/") || path.includes(":"); + } + + public get(key: keyof AppConfig): T | undefined; + public get(key: keyof AppConfig, defaultValue: T): T; + public get(key: keyof AppConfig, defaultValue?: T): T | undefined { + const value = this.config[key]; + return value !== undefined ? (value as T) : defaultValue; + } + + public getAll(): AppConfig { + return { ...this.config }; + } + + public getHttpsProxy(): string | undefined { + return ( + this.get("HTTPS_PROXY") || + this.get("https_proxy") || + this.get("httpsProxy") || + this.get("PROXY_URL") + ); + } + + public has(key: keyof AppConfig): boolean { + return this.config[key] !== undefined; + } + + public set(key: keyof AppConfig, value: any): void { + this.config[key] = value; + } + + public reload(): void { + this.config = {}; + this.loadConfig(); + } + + public getConfigSummary(): string { + const summary: string[] = []; + + if (this.options.initialConfig) { + summary.push("Initial Config"); + } + + if (this.options.useJsonFile && this.options.jsonPath) { + summary.push(`JSON: ${this.options.jsonPath}`); + } + + if (this.options.useEnvFile) { + summary.push(`ENV: ${this.options.envPath}`); + } + + if (this.options.useEnvironmentVariables) { + summary.push("Environment Variables"); + } + + return `Config sources: ${summary.join(", ")}`; + } +} diff --git a/packages/core/src/services/provider.ts b/packages/core/src/services/provider.ts new file mode 100644 index 0000000..aa08a20 --- /dev/null +++ b/packages/core/src/services/provider.ts @@ -0,0 +1,287 @@ +import { TransformerConstructor } from "@/types/transformer"; +import { + LLMProvider, + RegisterProviderRequest, + ModelRoute, + RequestRouteInfo, + ConfigProvider, +} from "../types/llm"; +import { ConfigService } from "./config"; +import { TransformerService } from "./transformer"; + +export class ProviderService { + private providers: Map = new Map(); + private modelRoutes: Map = new Map(); + + constructor(private readonly configService: ConfigService, private readonly transformerService: TransformerService, private readonly logger: any) { + this.initializeCustomProviders(); + } + + private initializeCustomProviders() { + const providersConfig = + this.configService.get("providers"); + if (providersConfig && Array.isArray(providersConfig)) { + this.initializeFromProvidersArray(providersConfig); + return; + } + } + + private initializeFromProvidersArray(providersConfig: ConfigProvider[]) { + providersConfig.forEach((providerConfig: ConfigProvider) => { + try { + if ( + !providerConfig.name || + !providerConfig.api_base_url || + !providerConfig.api_key + ) { + return; + } + + const transformer: LLMProvider["transformer"] = {} + + if (providerConfig.transformer) { + Object.keys(providerConfig.transformer).forEach(key => { + if (key === 'use') { + if (Array.isArray(providerConfig.transformer.use)) { + transformer.use = providerConfig.transformer.use.map((transformer) => { + if (Array.isArray(transformer) && typeof transformer[0] === 'string') { + const Constructor = this.transformerService.getTransformer(transformer[0]); + if (Constructor) { + return new (Constructor as TransformerConstructor)(transformer[1]); + } + } + if (typeof transformer === 'string') { + const transformerInstance = this.transformerService.getTransformer(transformer); + if (typeof transformerInstance === 'function') { + return new transformerInstance(); + } + return transformerInstance; + } + }).filter((transformer) => typeof transformer !== 'undefined'); + } + } else { + if (Array.isArray(providerConfig.transformer[key]?.use)) { + transformer[key] = { + use: providerConfig.transformer[key].use.map((transformer) => { + if (Array.isArray(transformer) && typeof transformer[0] === 'string') { + const Constructor = this.transformerService.getTransformer(transformer[0]); + if (Constructor) { + return new (Constructor as TransformerConstructor)(transformer[1]); + } + } + if (typeof transformer === 'string') { + const transformerInstance = this.transformerService.getTransformer(transformer); + if (typeof transformerInstance === 'function') { + return new transformerInstance(); + } + return transformerInstance; + } + }).filter((transformer) => typeof transformer !== 'undefined') + } + } + } + }) + } + + this.registerProvider({ + name: providerConfig.name, + baseUrl: providerConfig.api_base_url, + apiKey: providerConfig.api_key, + models: providerConfig.models || [], + transformer: providerConfig.transformer ? transformer : undefined, + }); + + this.logger.info(`${providerConfig.name} provider registered`); + } catch (error) { + this.logger.error(`${providerConfig.name} provider registered error: ${error}`); + } + }); + } + + registerProvider(request: RegisterProviderRequest): LLMProvider { + const provider: LLMProvider = { + ...request, + }; + + this.providers.set(provider.name, provider); + + request.models.forEach((model) => { + const fullModel = `${provider.name},${model}`; + const route: ModelRoute = { + provider: provider.name, + model, + fullModel, + }; + this.modelRoutes.set(fullModel, route); + if (!this.modelRoutes.has(model)) { + this.modelRoutes.set(model, route); + } + }); + + return provider; + } + + getProviders(): LLMProvider[] { + return Array.from(this.providers.values()); + } + + getProvider(name: string): LLMProvider | undefined { + return this.providers.get(name); + } + + updateProvider( + id: string, + updates: Partial + ): LLMProvider | null { + const provider = this.providers.get(id); + if (!provider) { + return null; + } + + const updatedProvider = { + ...provider, + ...updates, + updatedAt: new Date(), + }; + + this.providers.set(id, updatedProvider); + + if (updates.models) { + provider.models.forEach((model) => { + const fullModel = `${provider.name},${model}`; + this.modelRoutes.delete(fullModel); + this.modelRoutes.delete(model); + }); + + updates.models.forEach((model) => { + const fullModel = `${provider.name},${model}`; + const route: ModelRoute = { + provider: provider.name, + model, + fullModel, + }; + this.modelRoutes.set(fullModel, route); + if (!this.modelRoutes.has(model)) { + this.modelRoutes.set(model, route); + } + }); + } + + return updatedProvider; + } + + deleteProvider(id: string): boolean { + const provider = this.providers.get(id); + if (!provider) { + return false; + } + + provider.models.forEach((model) => { + const fullModel = `${provider.name},${model}`; + this.modelRoutes.delete(fullModel); + this.modelRoutes.delete(model); + }); + + this.providers.delete(id); + return true; + } + + toggleProvider(name: string, enabled: boolean): boolean { + const provider = this.providers.get(name); + if (!provider) { + return false; + } + return true; + } + + resolveModelRoute(modelName: string): RequestRouteInfo | null { + const route = this.modelRoutes.get(modelName); + if (!route) { + return null; + } + + const provider = this.providers.get(route.provider); + if (!provider) { + return null; + } + + return { + provider, + originalModel: modelName, + targetModel: route.model, + }; + } + + getAvailableModelNames(): string[] { + const modelNames: string[] = []; + this.providers.forEach((provider) => { + provider.models.forEach((model) => { + modelNames.push(model); + modelNames.push(`${provider.name},${model}`); + }); + }); + return modelNames; + } + + getModelRoutes(): ModelRoute[] { + return Array.from(this.modelRoutes.values()); + } + + private parseTransformerConfig(transformerConfig: any): any { + if (!transformerConfig) return {}; + + if (Array.isArray(transformerConfig)) { + return transformerConfig.reduce((acc, item) => { + if (Array.isArray(item)) { + const [name, config = {}] = item; + acc[name] = config; + } else { + acc[item] = {}; + } + return acc; + }, {}); + } + + return transformerConfig; + } + + async getAvailableModels(): Promise<{ + object: string; + data: Array<{ + id: string; + object: string; + owned_by: string; + provider: string; + }>; + }> { + const models: Array<{ + id: string; + object: string; + owned_by: string; + provider: string; + }> = []; + + this.providers.forEach((provider) => { + provider.models.forEach((model) => { + models.push({ + id: model, + object: "model", + owned_by: provider.name, + provider: provider.name, + }); + + models.push({ + id: `${provider.name},${model}`, + object: "model", + owned_by: provider.name, + provider: provider.name, + }); + }); + }); + + return { + object: "list", + data: models, + }; + } +} diff --git a/packages/core/src/services/transformer.ts b/packages/core/src/services/transformer.ts new file mode 100644 index 0000000..4a49f48 --- /dev/null +++ b/packages/core/src/services/transformer.ts @@ -0,0 +1,165 @@ +import { Transformer, TransformerConstructor } from "@/types/transformer"; +import { ConfigService } from "./config"; +import Transformers from "@/transformer"; +import Module from "node:module"; + +interface TransformerConfig { + transformers: Array<{ + name: string; + type: "class" | "module"; + path?: string; + options?: any; + }>; +} + +export class TransformerService { + private transformers: Map = + new Map(); + + constructor( + private readonly configService: ConfigService, + private readonly logger: any + ) {} + + registerTransformer(name: string, transformer: Transformer): void { + this.transformers.set(name, transformer); + this.logger.info( + `register transformer: ${name}${ + transformer.endPoint + ? ` (endpoint: ${transformer.endPoint})` + : " (no endpoint)" + }` + ); + } + + getTransformer( + name: string + ): Transformer | TransformerConstructor | undefined { + return this.transformers.get(name); + } + + getAllTransformers(): Map { + return new Map(this.transformers); + } + + getTransformersWithEndpoint(): { name: string; transformer: Transformer }[] { + const result: { name: string; transformer: Transformer }[] = []; + + this.transformers.forEach((transformer, name) => { + // Check if it's an instance with endPoint + if (typeof transformer === 'object' && transformer.endPoint) { + result.push({ name, transformer }); + } + }); + + return result; + } + + getTransformersWithoutEndpoint(): { + name: string; + transformer: Transformer; + }[] { + const result: { name: string; transformer: Transformer }[] = []; + + this.transformers.forEach((transformer, name) => { + // Check if it's an instance without endPoint + if (typeof transformer === 'object' && !transformer.endPoint) { + result.push({ name, transformer }); + } + }); + + return result; + } + + removeTransformer(name: string): boolean { + return this.transformers.delete(name); + } + + hasTransformer(name: string): boolean { + return this.transformers.has(name); + } + + async registerTransformerFromConfig(config: { + path?: string; + options?: any; + }): Promise { + try { + if (config.path) { + const module = require(require.resolve(config.path)); + if (module) { + const instance = new module(config.options); + // Set logger for transformer instance + if (instance && typeof instance === "object") { + (instance as any).logger = this.logger; + } + if (!instance.name) { + throw new Error( + `Transformer instance from ${config.path} does not have a name property.` + ); + } + this.registerTransformer(instance.name, instance); + return true; + } + } + return false; + } catch (error: any) { + this.logger.error( + `load transformer (${config.path}) \nerror: ${error.message}\nstack: ${error.stack}` + ); + return false; + } + } + + async initialize(): Promise { + try { + await this.registerDefaultTransformersInternal(); + await this.loadFromConfig(); + } catch (error: any) { + this.logger.error( + `TransformerService init error: ${error.message}\nStack: ${error.stack}` + ); + } + } + + private async registerDefaultTransformersInternal(): Promise { + try { + Object.values(Transformers).forEach( + (TransformerStatic: any) => { + if ( + "TransformerName" in TransformerStatic && + typeof TransformerStatic.TransformerName === "string" + ) { + this.registerTransformer( + TransformerStatic.TransformerName, + TransformerStatic + ); + } else { + const transformerInstance = new TransformerStatic(); + // Set logger for transformer instance + if ( + transformerInstance && + typeof transformerInstance === "object" + ) { + (transformerInstance as any).logger = this.logger; + } + this.registerTransformer( + transformerInstance.name!, + transformerInstance + ); + } + } + ); + } catch (error) { + this.logger.error({ error }, "transformer regist error:"); + } + } + + private async loadFromConfig(): Promise { + const transformers = this.configService.get< + TransformerConfig["transformers"] + >("transformers", []); + for (const transformer of transformers) { + await this.registerTransformerFromConfig(transformer); + } + } +} diff --git a/packages/core/src/transformer/anthropic.transformer.ts b/packages/core/src/transformer/anthropic.transformer.ts new file mode 100644 index 0000000..3231c80 --- /dev/null +++ b/packages/core/src/transformer/anthropic.transformer.ts @@ -0,0 +1,1069 @@ +import { ChatCompletion } from "openai/resources"; +import { + LLMProvider, + UnifiedChatRequest, + UnifiedMessage, + UnifiedTool, +} from "@/types/llm"; +import { + Transformer, + TransformerContext, + TransformerOptions, +} from "@/types/transformer"; +import { v4 as uuidv4 } from "uuid"; +import { getThinkLevel } from "@/utils/thinking"; +import { createApiError } from "@/api/middleware"; +import { formatBase64 } from "@/utils/image"; + +export class AnthropicTransformer implements Transformer { + name = "Anthropic"; + endPoint = "/v1/messages"; + private useBearer: boolean; + logger?: any; + + constructor(private readonly options?: TransformerOptions) { + this.useBearer = this.options?.UseBearer ?? false; + } + + async auth(request: any, provider: LLMProvider): Promise { + const headers: Record = {}; + + if (this.useBearer) { + headers["authorization"] = `Bearer ${provider.apiKey}`; + headers["x-api-key"] = undefined; + } else { + headers["x-api-key"] = provider.apiKey; + headers["authorization"] = undefined; + } + + return { + body: request, + config: { + headers, + }, + }; + } + + async transformRequestOut( + request: Record + ): Promise { + const messages: UnifiedMessage[] = []; + + if (request.system) { + if (typeof request.system === "string") { + messages.push({ + role: "system", + content: request.system, + }); + } else if (Array.isArray(request.system) && request.system.length) { + const textParts = request.system + .filter((item: any) => item.type === "text" && item.text) + .map((item: any) => ({ + type: "text" as const, + text: item.text, + cache_control: item.cache_control, + })); + messages.push({ + role: "system", + content: textParts, + }); + } + } + + const requestMessages = JSON.parse(JSON.stringify(request.messages || [])); + + requestMessages?.forEach((msg: any) => { + if (msg.role === "user" || msg.role === "assistant") { + if (typeof msg.content === "string") { + messages.push({ + role: msg.role, + content: msg.content, + }); + return; + } + + if (Array.isArray(msg.content)) { + if (msg.role === "user") { + const toolParts = msg.content.filter( + (c: any) => c.type === "tool_result" && c.tool_use_id + ); + if (toolParts.length) { + toolParts.forEach((tool: any) => { + const toolMessage: UnifiedMessage = { + role: "tool", + content: + typeof tool.content === "string" + ? tool.content + : JSON.stringify(tool.content), + tool_call_id: tool.tool_use_id, + cache_control: tool.cache_control, + }; + messages.push(toolMessage); + }); + } + + const textAndMediaParts = msg.content.filter( + (c: any) => + (c.type === "text" && c.text) || + (c.type === "image" && c.source) + ); + if (textAndMediaParts.length) { + messages.push({ + role: "user", + content: textAndMediaParts.map((part: any) => { + if (part?.type === "image") { + return { + type: "image_url", + image_url: { + url: + part.source?.type === "base64" + ? formatBase64( + part.source.data, + part.source.media_type + ) + : part.source.url, + }, + media_type: part.source.media_type, + }; + } + return part; + }), + }); + } + } else if (msg.role === "assistant") { + const assistantMessage: UnifiedMessage = { + role: "assistant", + content: "", + }; + const textParts = msg.content.filter( + (c: any) => c.type === "text" && c.text + ); + if (textParts.length) { + assistantMessage.content = textParts + .map((text: any) => text.text) + .join("\n"); + } + + const toolCallParts = msg.content.filter( + (c: any) => c.type === "tool_use" && c.id + ); + if (toolCallParts.length) { + assistantMessage.tool_calls = toolCallParts.map((tool: any) => { + return { + id: tool.id, + type: "function" as const, + function: { + name: tool.name, + arguments: JSON.stringify(tool.input || {}), + }, + }; + }); + } + + const thinkingPart = msg.content.find( + (c: any) => c.type === "thinking" && c.signature + ); + if (thinkingPart) { + assistantMessage.thinking = { + content: thinkingPart.thinking, + signature: thinkingPart.signature, + }; + } + + messages.push(assistantMessage); + } + return; + } + } + }); + + const result: UnifiedChatRequest = { + messages, + model: request.model, + max_tokens: request.max_tokens, + temperature: request.temperature, + stream: request.stream, + tools: request.tools?.length + ? this.convertAnthropicToolsToUnified(request.tools) + : undefined, + tool_choice: request.tool_choice, + }; + if (request.thinking) { + result.reasoning = { + effort: getThinkLevel(request.thinking.budget_tokens), + // max_tokens: request.thinking.budget_tokens, + enabled: request.thinking.type === "enabled", + }; + } + if (request.tool_choice) { + if (request.tool_choice.type === "tool") { + result.tool_choice = { + type: "function", + function: { name: request.tool_choice.name }, + }; + } else { + result.tool_choice = request.tool_choice.type; + } + } + return result; + } + + async transformResponseIn( + response: Response, + context?: TransformerContext + ): Promise { + const isStream = response.headers + .get("Content-Type") + ?.includes("text/event-stream"); + if (isStream) { + if (!response.body) { + throw new Error("Stream response body is null"); + } + const convertedStream = await this.convertOpenAIStreamToAnthropic( + response.body, + context! + ); + return new Response(convertedStream, { + headers: { + "Content-Type": "text/event-stream", + "Cache-Control": "no-cache", + Connection: "keep-alive", + }, + }); + } else { + const data = (await response.json()) as any; + const anthropicResponse = this.convertOpenAIResponseToAnthropic( + data, + context! + ); + return new Response(JSON.stringify(anthropicResponse), { + headers: { "Content-Type": "application/json" }, + }); + } + } + + private convertAnthropicToolsToUnified(tools: any[]): UnifiedTool[] { + return tools.map((tool) => ({ + type: "function", + function: { + name: tool.name, + description: tool.description || "", + parameters: tool.input_schema, + }, + })); + } + + private async convertOpenAIStreamToAnthropic( + openaiStream: ReadableStream, + context: TransformerContext + ): Promise { + const readable = new ReadableStream({ + start: async (controller) => { + const encoder = new TextEncoder(); + const messageId = `msg_${Date.now()}`; + let stopReasonMessageDelta: null | Record = null; + let model = "unknown"; + let hasStarted = false; + let hasTextContentStarted = false; + let hasFinished = false; + const toolCalls = new Map(); + const toolCallIndexToContentBlockIndex = new Map(); + let totalChunks = 0; + let contentChunks = 0; + let toolCallChunks = 0; + let isClosed = false; + let isThinkingStarted = false; + let contentIndex = 0; + let currentContentBlockIndex = -1; // Track the current content block index + + // 原子性的content block index分配函数 + const assignContentBlockIndex = (): number => { + const currentIndex = contentIndex; + contentIndex++; + return currentIndex; + }; + + const safeEnqueue = (data: Uint8Array) => { + if (!isClosed) { + try { + controller.enqueue(data); + const dataStr = new TextDecoder().decode(data); + this.logger.debug({ + reqId: context.req.id, + data: dataStr, + type: "send data", + }); + } catch (error) { + if ( + error instanceof TypeError && + error.message.includes("Controller is already closed") + ) { + isClosed = true; + } else { + this.logger.debug({ + reqId: context.req.id, + error: error instanceof Error ? error.message : String(error), + type: "send data error", + }); + throw error; + } + } + } + }; + + const safeClose = () => { + if (!isClosed) { + try { + // Close any remaining open content block + if (currentContentBlockIndex >= 0) { + const contentBlockStop = { + type: "content_block_stop", + index: currentContentBlockIndex, + }; + safeEnqueue( + encoder.encode( + `event: content_block_stop\ndata: ${JSON.stringify( + contentBlockStop + )}\n\n` + ) + ); + currentContentBlockIndex = -1; + } + + if (stopReasonMessageDelta) { + safeEnqueue( + encoder.encode( + `event: message_delta\ndata: ${JSON.stringify( + stopReasonMessageDelta + )}\n\n` + ) + ); + stopReasonMessageDelta = null; + } else { + safeEnqueue( + encoder.encode( + `event: message_delta\ndata: ${JSON.stringify({ + type: "message_delta", + delta: { + stop_reason: "end_turn", + stop_sequence: null, + }, + usage: { + input_tokens: 0, + output_tokens: 0, + cache_read_input_tokens: 0, + }, + })}\n\n` + ) + ); + } + const messageStop = { + type: "message_stop", + }; + safeEnqueue( + encoder.encode( + `event: message_stop\ndata: ${JSON.stringify( + messageStop + )}\n\n` + ) + ); + controller.close(); + isClosed = true; + } catch (error) { + if ( + error instanceof TypeError && + error.message.includes("Controller is already closed") + ) { + isClosed = true; + } else { + throw error; + } + } + } + }; + + let reader: ReadableStreamDefaultReader | null = null; + + try { + reader = openaiStream.getReader(); + const decoder = new TextDecoder(); + let buffer = ""; + + while (true) { + if (isClosed) { + break; + } + + const { done, value } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split("\n"); + buffer = lines.pop() || ""; + + for (const line of lines) { + if (isClosed || hasFinished) break; + + if (!line.startsWith("data:")) continue; + const data = line.slice(5).trim(); + this.logger.debug({ + reqId: context.req.id, + type: "recieved data", + data, + }); + + if (data === "[DONE]") { + continue; + } + + try { + const chunk = JSON.parse(data); + totalChunks++; + this.logger.debug({ + reqId: context.req.id, + response: chunk, + tppe: "Original Response", + }); + if (chunk.error) { + const errorMessage = { + type: "error", + message: { + type: "api_error", + message: JSON.stringify(chunk.error), + }, + }; + + safeEnqueue( + encoder.encode( + `event: error\ndata: ${JSON.stringify(errorMessage)}\n\n` + ) + ); + continue; + } + + model = chunk.model || model; + + if (!hasStarted && !isClosed && !hasFinished) { + hasStarted = true; + + const messageStart = { + type: "message_start", + message: { + id: messageId, + type: "message", + role: "assistant", + content: [], + model: model, + stop_reason: null, + stop_sequence: null, + usage: { + input_tokens: 0, + output_tokens: 0, + }, + }, + }; + + safeEnqueue( + encoder.encode( + `event: message_start\ndata: ${JSON.stringify( + messageStart + )}\n\n` + ) + ); + } + + const choice = chunk.choices?.[0]; + if (chunk.usage) { + if (!stopReasonMessageDelta) { + stopReasonMessageDelta = { + type: "message_delta", + delta: { + stop_reason: "end_turn", + stop_sequence: null, + }, + usage: { + input_tokens: + (chunk.usage?.prompt_tokens || 0) - + (chunk.usage?.prompt_tokens_details?.cached_tokens || + 0), + output_tokens: chunk.usage?.completion_tokens || 0, + cache_read_input_tokens: + chunk.usage?.prompt_tokens_details?.cached_tokens || + 0, + }, + }; + } else { + stopReasonMessageDelta.usage = { + input_tokens: + (chunk.usage?.prompt_tokens || 0) - + (chunk.usage?.prompt_tokens_details?.cached_tokens || + 0), + output_tokens: chunk.usage?.completion_tokens || 0, + cache_read_input_tokens: + chunk.usage?.prompt_tokens_details?.cached_tokens || 0, + }; + } + } + if (!choice) { + continue; + } + + if (choice?.delta?.thinking && !isClosed && !hasFinished) { + // Close any previous content block if open + // if (currentContentBlockIndex >= 0) { + // const contentBlockStop = { + // type: "content_block_stop", + // index: currentContentBlockIndex, + // }; + // safeEnqueue( + // encoder.encode( + // `event: content_block_stop\ndata: ${JSON.stringify( + // contentBlockStop + // )}\n\n` + // ) + // ); + // currentContentBlockIndex = -1; + // } + + if (!isThinkingStarted) { + const thinkingBlockIndex = assignContentBlockIndex(); + const contentBlockStart = { + type: "content_block_start", + index: thinkingBlockIndex, + content_block: { type: "thinking", thinking: "" }, + }; + safeEnqueue( + encoder.encode( + `event: content_block_start\ndata: ${JSON.stringify( + contentBlockStart + )}\n\n` + ) + ); + currentContentBlockIndex = thinkingBlockIndex; + isThinkingStarted = true; + } + if (choice.delta.thinking.signature) { + const thinkingSignature = { + type: "content_block_delta", + index: currentContentBlockIndex, + delta: { + type: "signature_delta", + signature: choice.delta.thinking.signature, + }, + }; + safeEnqueue( + encoder.encode( + `event: content_block_delta\ndata: ${JSON.stringify( + thinkingSignature + )}\n\n` + ) + ); + const contentBlockStop = { + type: "content_block_stop", + index: currentContentBlockIndex, + }; + safeEnqueue( + encoder.encode( + `event: content_block_stop\ndata: ${JSON.stringify( + contentBlockStop + )}\n\n` + ) + ); + currentContentBlockIndex = -1; + } else if (choice.delta.thinking.content) { + const thinkingChunk = { + type: "content_block_delta", + index: currentContentBlockIndex, + delta: { + type: "thinking_delta", + thinking: choice.delta.thinking.content || "", + }, + }; + safeEnqueue( + encoder.encode( + `event: content_block_delta\ndata: ${JSON.stringify( + thinkingChunk + )}\n\n` + ) + ); + } + } + + if (choice?.delta?.content && !isClosed && !hasFinished) { + contentChunks++; + + // Close any previous content block if open and it's not a text content block + if (currentContentBlockIndex >= 0) { + // Check if current content block is text type + const isCurrentTextBlock = hasTextContentStarted; + if (!isCurrentTextBlock) { + const contentBlockStop = { + type: "content_block_stop", + index: currentContentBlockIndex, + }; + safeEnqueue( + encoder.encode( + `event: content_block_stop\ndata: ${JSON.stringify( + contentBlockStop + )}\n\n` + ) + ); + currentContentBlockIndex = -1; + } + } + + if (!hasTextContentStarted && !hasFinished) { + hasTextContentStarted = true; + const textBlockIndex = assignContentBlockIndex(); + const contentBlockStart = { + type: "content_block_start", + index: textBlockIndex, + content_block: { + type: "text", + text: "", + }, + }; + safeEnqueue( + encoder.encode( + `event: content_block_start\ndata: ${JSON.stringify( + contentBlockStart + )}\n\n` + ) + ); + currentContentBlockIndex = textBlockIndex; + } + + if (!isClosed && !hasFinished) { + const anthropicChunk = { + type: "content_block_delta", + index: currentContentBlockIndex, // Use current content block index + delta: { + type: "text_delta", + text: choice.delta.content, + }, + }; + safeEnqueue( + encoder.encode( + `event: content_block_delta\ndata: ${JSON.stringify( + anthropicChunk + )}\n\n` + ) + ); + } + } + + if ( + choice?.delta?.annotations?.length && + !isClosed && + !hasFinished + ) { + // Close text content block if open + if (currentContentBlockIndex >= 0 && hasTextContentStarted) { + const contentBlockStop = { + type: "content_block_stop", + index: currentContentBlockIndex, + }; + safeEnqueue( + encoder.encode( + `event: content_block_stop\ndata: ${JSON.stringify( + contentBlockStop + )}\n\n` + ) + ); + currentContentBlockIndex = -1; + hasTextContentStarted = false; + } + + choice?.delta?.annotations.forEach((annotation: any) => { + const annotationBlockIndex = assignContentBlockIndex(); + const contentBlockStart = { + type: "content_block_start", + index: annotationBlockIndex, + content_block: { + type: "web_search_tool_result", + tool_use_id: `srvtoolu_${uuidv4()}`, + content: [ + { + type: "web_search_result", + title: annotation.url_citation.title, + url: annotation.url_citation.url, + }, + ], + }, + }; + safeEnqueue( + encoder.encode( + `event: content_block_start\ndata: ${JSON.stringify( + contentBlockStart + )}\n\n` + ) + ); + + const contentBlockStop = { + type: "content_block_stop", + index: annotationBlockIndex, + }; + safeEnqueue( + encoder.encode( + `event: content_block_stop\ndata: ${JSON.stringify( + contentBlockStop + )}\n\n` + ) + ); + currentContentBlockIndex = -1; + }); + } + + if (choice?.delta?.tool_calls && !isClosed && !hasFinished) { + toolCallChunks++; + const processedInThisChunk = new Set(); + + for (const toolCall of choice.delta.tool_calls) { + if (isClosed) break; + const toolCallIndex = toolCall.index ?? 0; + if (processedInThisChunk.has(toolCallIndex)) { + continue; + } + processedInThisChunk.add(toolCallIndex); + const isUnknownIndex = + !toolCallIndexToContentBlockIndex.has(toolCallIndex); + + if (isUnknownIndex) { + // Close any previous content block if open + if (currentContentBlockIndex >= 0) { + const contentBlockStop = { + type: "content_block_stop", + index: currentContentBlockIndex, + }; + safeEnqueue( + encoder.encode( + `event: content_block_stop\ndata: ${JSON.stringify( + contentBlockStop + )}\n\n` + ) + ); + currentContentBlockIndex = -1; + } + + const newContentBlockIndex = assignContentBlockIndex(); + toolCallIndexToContentBlockIndex.set( + toolCallIndex, + newContentBlockIndex + ); + const toolCallId = + toolCall.id || `call_${Date.now()}_${toolCallIndex}`; + const toolCallName = + toolCall.function?.name || `tool_${toolCallIndex}`; + const contentBlockStart = { + type: "content_block_start", + index: newContentBlockIndex, + content_block: { + type: "tool_use", + id: toolCallId, + name: toolCallName, + input: {}, + }, + }; + + safeEnqueue( + encoder.encode( + `event: content_block_start\ndata: ${JSON.stringify( + contentBlockStart + )}\n\n` + ) + ); + currentContentBlockIndex = newContentBlockIndex; + + const toolCallInfo = { + id: toolCallId, + name: toolCallName, + arguments: "", + contentBlockIndex: newContentBlockIndex, + }; + toolCalls.set(toolCallIndex, toolCallInfo); + } else if (toolCall.id && toolCall.function?.name) { + const existingToolCall = toolCalls.get(toolCallIndex)!; + const wasTemporary = + existingToolCall.id.startsWith("call_") && + existingToolCall.name.startsWith("tool_"); + + if (wasTemporary) { + existingToolCall.id = toolCall.id; + existingToolCall.name = toolCall.function.name; + } + } + + if ( + toolCall.function?.arguments && + !isClosed && + !hasFinished + ) { + const blockIndex = + toolCallIndexToContentBlockIndex.get(toolCallIndex); + if (blockIndex === undefined) { + continue; + } + const currentToolCall = toolCalls.get(toolCallIndex); + if (currentToolCall) { + currentToolCall.arguments += + toolCall.function.arguments; + } + + try { + const anthropicChunk = { + type: "content_block_delta", + index: blockIndex, + delta: { + type: "input_json_delta", + partial_json: toolCall.function.arguments, + }, + }; + safeEnqueue( + encoder.encode( + `event: content_block_delta\ndata: ${JSON.stringify( + anthropicChunk + )}\n\n` + ) + ); + } catch { + try { + const fixedArgument = toolCall.function.arguments + .replace(/[\x00-\x1F\x7F-\x9F]/g, "") + .replace(/\\/g, "\\\\") + .replace(/"/g, '\\"'); + + const fixedChunk = { + type: "content_block_delta", + index: blockIndex, // Use the correct content block index + delta: { + type: "input_json_delta", + partial_json: fixedArgument, + }, + }; + safeEnqueue( + encoder.encode( + `event: content_block_delta\ndata: ${JSON.stringify( + fixedChunk + )}\n\n` + ) + ); + } catch (fixError) { + console.error(fixError); + } + } + } + } + } + + if (choice?.finish_reason && !isClosed && !hasFinished) { + if (contentChunks === 0 && toolCallChunks === 0) { + console.error( + "Warning: No content in the stream response!" + ); + } + + // Close any remaining open content block + if (currentContentBlockIndex >= 0) { + const contentBlockStop = { + type: "content_block_stop", + index: currentContentBlockIndex, + }; + safeEnqueue( + encoder.encode( + `event: content_block_stop\ndata: ${JSON.stringify( + contentBlockStop + )}\n\n` + ) + ); + currentContentBlockIndex = -1; + } + + if (!isClosed) { + const stopReasonMapping: Record = { + stop: "end_turn", + length: "max_tokens", + tool_calls: "tool_use", + content_filter: "stop_sequence", + }; + + const anthropicStopReason = + stopReasonMapping[choice.finish_reason] || "end_turn"; + + stopReasonMessageDelta = { + type: "message_delta", + delta: { + stop_reason: anthropicStopReason, + stop_sequence: null, + }, + usage: { + input_tokens: + (chunk.usage?.prompt_tokens || 0) - + (chunk.usage?.prompt_tokens_details?.cached_tokens || + 0), + output_tokens: chunk.usage?.completion_tokens || 0, + cache_read_input_tokens: + chunk.usage?.prompt_tokens_details?.cached_tokens || + 0, + }, + }; + } + + break; + } + } catch (parseError: any) { + this.logger?.error( + `parseError: ${parseError.name} message: ${parseError.message} stack: ${parseError.stack} data: ${data}` + ); + } + } + } + safeClose(); + } catch (error) { + if (!isClosed) { + try { + controller.error(error); + } catch (controllerError) { + console.error(controllerError); + } + } + } finally { + if (reader) { + try { + reader.releaseLock(); + } catch (releaseError) { + console.error(releaseError); + } + } + } + }, + cancel: (reason) => { + this.logger.debug( + { + reqId: context.req.id, + }, + `cancle stream: ${reason}` + ); + }, + }); + + return readable; + } + + private convertOpenAIResponseToAnthropic( + openaiResponse: ChatCompletion, + context: TransformerContext + ): any { + this.logger.debug( + { + reqId: context.req.id, + response: openaiResponse, + }, + `Original OpenAI response` + ); + try { + const choice = openaiResponse.choices[0]; + if (!choice) { + throw new Error("No choices found in OpenAI response"); + } + const content: any[] = []; + if (choice.message.annotations) { + const id = `srvtoolu_${uuidv4()}`; + content.push({ + type: "server_tool_use", + id, + name: "web_search", + input: { + query: "", + }, + }); + content.push({ + type: "web_search_tool_result", + tool_use_id: id, + content: choice.message.annotations.map((item) => { + return { + type: "web_search_result", + url: item.url_citation.url, + title: item.url_citation.title, + }; + }), + }); + } + if (choice.message.content) { + content.push({ + type: "text", + text: choice.message.content, + }); + } + if (choice.message.tool_calls && choice.message.tool_calls.length > 0) { + choice.message.tool_calls.forEach((toolCall) => { + let parsedInput = {}; + try { + const argumentsStr = toolCall.function.arguments || "{}"; + + if (typeof argumentsStr === "object") { + parsedInput = argumentsStr; + } else if (typeof argumentsStr === "string") { + parsedInput = JSON.parse(argumentsStr); + } + } catch { + parsedInput = { text: toolCall.function.arguments || "" }; + } + + content.push({ + type: "tool_use", + id: toolCall.id, + name: toolCall.function.name, + input: parsedInput, + }); + }); + } + if ((choice.message as any)?.thinking?.content) { + content.push({ + type: "thinking", + thinking: (choice.message as any).thinking.content, + signature: (choice.message as any).thinking.signature, + }); + } + const result = { + id: openaiResponse.id, + type: "message", + role: "assistant", + model: openaiResponse.model, + content: content, + stop_reason: + choice.finish_reason === "stop" + ? "end_turn" + : choice.finish_reason === "length" + ? "max_tokens" + : choice.finish_reason === "tool_calls" + ? "tool_use" + : choice.finish_reason === "content_filter" + ? "stop_sequence" + : "end_turn", + stop_sequence: null, + usage: { + input_tokens: + (openaiResponse.usage?.prompt_tokens || 0) - + (openaiResponse.usage?.prompt_tokens_details?.cached_tokens || 0), + output_tokens: openaiResponse.usage?.completion_tokens || 0, + cache_read_input_tokens: + openaiResponse.usage?.prompt_tokens_details?.cached_tokens || 0, + }, + }; + this.logger.debug( + { + reqId: context.req.id, + result, + }, + `Conversion complete, final Anthropic response` + ); + return result; + } catch { + throw createApiError( + `Provider error: ${JSON.stringify(openaiResponse)}`, + 500, + "provider_error" + ); + } + } +} diff --git a/packages/core/src/transformer/cerebras.transformer.ts b/packages/core/src/transformer/cerebras.transformer.ts new file mode 100644 index 0000000..a05ac19 --- /dev/null +++ b/packages/core/src/transformer/cerebras.transformer.ts @@ -0,0 +1,45 @@ +import { LLMProvider, UnifiedChatRequest, UnifiedMessage } from "@/types/llm"; +import { Transformer } from "@/types/transformer"; + + +/** + * Transformer class for Cerebras + */ +export class CerebrasTransformer implements Transformer { + name = "cerebras"; + + /** + * Transform the request from Claude Code format to Cerebras format + * @param request - The incoming request + * @param provider - The LLM provider information + * @returns The transformed request + */ + async transformRequestIn( + request: UnifiedChatRequest, + provider: LLMProvider + ): Promise> { + // Deep clone the request to avoid modifying the original + const transformedRequest = JSON.parse(JSON.stringify(request)); + + if (transformedRequest.reasoning) { + delete transformedRequest.reasoning; + } else { + transformedRequest.disable_reasoning = false + } + + return { + body: transformedRequest, + config: { + headers: { + 'Authorization': `Bearer ${provider.apiKey}`, + 'Content-Type': 'application/json' + } + } + }; + } + + async transformResponseOut(response: Response): Promise { + + return response; + } +} \ No newline at end of file diff --git a/packages/core/src/transformer/cleancache.transformer.ts b/packages/core/src/transformer/cleancache.transformer.ts new file mode 100644 index 0000000..1784c06 --- /dev/null +++ b/packages/core/src/transformer/cleancache.transformer.ts @@ -0,0 +1,23 @@ +import { MessageContent, TextContent, UnifiedChatRequest } from "@/types/llm"; +import { Transformer } from "../types/transformer"; + +export class CleancacheTransformer implements Transformer { + name = "cleancache"; + + async transformRequestIn(request: UnifiedChatRequest): Promise { + if (Array.isArray(request.messages)) { + request.messages.forEach((msg) => { + if (Array.isArray(msg.content)) { + (msg.content as MessageContent[]).forEach((item) => { + if ((item as TextContent).cache_control) { + delete (item as TextContent).cache_control; + } + }); + } else if (msg.cache_control) { + delete msg.cache_control; + } + }); + } + return request; + } +} diff --git a/packages/core/src/transformer/customparams.transformer.ts b/packages/core/src/transformer/customparams.transformer.ts new file mode 100644 index 0000000..153e303 --- /dev/null +++ b/packages/core/src/transformer/customparams.transformer.ts @@ -0,0 +1,108 @@ +import { UnifiedChatRequest } from "../types/llm"; +import { Transformer, TransformerOptions } from "../types/transformer"; + +export interface CustomParamsOptions extends TransformerOptions { + /** + * Custom parameters to inject into the request body + * Any key-value pairs will be added to the request + * Supports: string, number, boolean, object, array + */ + [key: string]: any; +} + +/** + * Transformer for injecting dynamic custom parameters into LLM requests + * Allows runtime configuration of arbitrary parameters that get merged + * into the request body using deep merge strategy + */ +export class CustomParamsTransformer implements Transformer { + static TransformerName = "customparams"; + + private options: CustomParamsOptions; + + constructor(options: CustomParamsOptions = {}) { + this.options = options; + } + + async transformRequestIn( + request: UnifiedChatRequest + ): Promise { + // Create a copy of the request to avoid mutating the original + const modifiedRequest = { ...request } as any; + + // Inject custom parameters with deep merge + const parametersToInject = Object.entries(this.options); + + for (const [key, value] of parametersToInject) { + if (key in modifiedRequest) { + // Deep merge with existing parameter + if (typeof modifiedRequest[key] === 'object' && + typeof value === 'object' && + !Array.isArray(modifiedRequest[key]) && + !Array.isArray(value) && + modifiedRequest[key] !== null && + value !== null) { + // Deep merge objects + modifiedRequest[key] = this.deepMergeObjects(modifiedRequest[key], value); + } else { + // For non-objects, keep existing value (preserve original) + continue; + } + } else { + // Add new parameter + modifiedRequest[key] = this.cloneValue(value); + } + } + + return modifiedRequest; + } + + async transformResponseOut(response: Response): Promise { + // Pass through response unchanged + return response; + } + + + + /** + * Deep merge two objects recursively + */ + private deepMergeObjects(target: any, source: any): any { + const result = { ...target }; + + for (const [key, value] of Object.entries(source)) { + if (key in result && + typeof result[key] === 'object' && + typeof value === 'object' && + !Array.isArray(result[key]) && + !Array.isArray(value) && + result[key] !== null && + value !== null) { + result[key] = this.deepMergeObjects(result[key], value); + } else { + result[key] = this.cloneValue(value); + } + } + + return result; + } + + /** + * Clone a value to prevent reference issues + */ + private cloneValue(value: any): any { + if (value === null || typeof value !== 'object') { + return value; + } + + if (Array.isArray(value)) { + return value.map(item => this.cloneValue(item)); + } + + const cloned: any = {}; + for (const [key, val] of Object.entries(value)) { + cloned[key] = this.cloneValue(val); + } + return cloned; + } +} \ No newline at end of file diff --git a/packages/core/src/transformer/deepseek.transformer.ts b/packages/core/src/transformer/deepseek.transformer.ts new file mode 100644 index 0000000..a3b7dd2 --- /dev/null +++ b/packages/core/src/transformer/deepseek.transformer.ts @@ -0,0 +1,221 @@ +import { UnifiedChatRequest } from "../types/llm"; +import { Transformer } from "../types/transformer"; + +export class DeepseekTransformer implements Transformer { + name = "deepseek"; + + async transformRequestIn(request: UnifiedChatRequest): Promise { + if (request.max_tokens && request.max_tokens > 8192) { + request.max_tokens = 8192; // DeepSeek has a max token limit of 8192 + } + return request; + } + + async transformResponseOut(response: Response): Promise { + if (response.headers.get("Content-Type")?.includes("application/json")) { + const jsonResponse = await response.json(); + // Handle non-streaming response if needed + return new Response(JSON.stringify(jsonResponse), { + status: response.status, + statusText: response.statusText, + headers: response.headers, + }); + } else if (response.headers.get("Content-Type")?.includes("stream")) { + if (!response.body) { + return response; + } + + const decoder = new TextDecoder(); + const encoder = new TextEncoder(); + let reasoningContent = ""; + let isReasoningComplete = false; + let buffer = ""; // 用于缓冲不完整的数据 + + const stream = new ReadableStream({ + async start(controller) { + const reader = response.body!.getReader(); + const processBuffer = ( + buffer: string, + controller: ReadableStreamDefaultController, + encoder: TextEncoder + ) => { + const lines = buffer.split("\n"); + for (const line of lines) { + if (line.trim()) { + controller.enqueue(encoder.encode(line + "\n")); + } + } + }; + + const processLine = ( + line: string, + context: { + controller: ReadableStreamDefaultController; + encoder: TextEncoder; + reasoningContent: () => string; + appendReasoningContent: (content: string) => void; + isReasoningComplete: () => boolean; + setReasoningComplete: (val: boolean) => void; + } + ) => { + const { controller, encoder } = context; + + if ( + line.startsWith("data: ") && + line.trim() !== "data: [DONE]" + ) { + try { + const data = JSON.parse(line.slice(6)); + + // Extract reasoning_content from delta + if (data.choices?.[0]?.delta?.reasoning_content) { + context.appendReasoningContent( + data.choices[0].delta.reasoning_content + ); + const thinkingChunk = { + ...data, + choices: [ + { + ...data.choices[0], + delta: { + ...data.choices[0].delta, + thinking: { + content: data.choices[0].delta.reasoning_content, + }, + }, + }, + ], + }; + delete thinkingChunk.choices[0].delta.reasoning_content; + const thinkingLine = `data: ${JSON.stringify( + thinkingChunk + )}\n\n`; + controller.enqueue(encoder.encode(thinkingLine)); + return; + } + + // Check if reasoning is complete (when delta has content but no reasoning_content) + if ( + data.choices?.[0]?.delta?.content && + context.reasoningContent() && + !context.isReasoningComplete() + ) { + context.setReasoningComplete(true); + const signature = Date.now().toString(); + + // Create a new chunk with thinking block + const thinkingChunk = { + ...data, + choices: [ + { + ...data.choices[0], + delta: { + ...data.choices[0].delta, + content: null, + thinking: { + content: context.reasoningContent(), + signature: signature, + }, + }, + }, + ], + }; + delete thinkingChunk.choices[0].delta.reasoning_content; + // Send the thinking chunk + const thinkingLine = `data: ${JSON.stringify( + thinkingChunk + )}\n\n`; + controller.enqueue(encoder.encode(thinkingLine)); + } + + if (data.choices[0]?.delta?.reasoning_content) { + delete data.choices[0].delta.reasoning_content; + } + + // Send the modified chunk + if ( + data.choices?.[0]?.delta && + Object.keys(data.choices[0].delta).length > 0 + ) { + if (context.isReasoningComplete()) { + data.choices[0].index++; + } + const modifiedLine = `data: ${JSON.stringify(data)}\n\n`; + controller.enqueue(encoder.encode(modifiedLine)); + } + } catch (e) { + // If JSON parsing fails, pass through the original line + controller.enqueue(encoder.encode(line + "\n")); + } + } else { + // Pass through non-data lines (like [DONE]) + controller.enqueue(encoder.encode(line + "\n")); + } + }; + + try { + while (true) { + const { done, value } = await reader.read(); + if (done) { + // 处理缓冲区中剩余的数据 + if (buffer.trim()) { + processBuffer(buffer, controller, encoder); + } + break; + } + + const chunk = decoder.decode(value, { stream: true }); + buffer += chunk; + + // 处理缓冲区中完整的数据行 + const lines = buffer.split("\n"); + buffer = lines.pop() || ""; // 最后一行可能不完整,保留在缓冲区 + + for (const line of lines) { + if (!line.trim()) continue; + + try { + processLine(line, { + controller, + encoder, + reasoningContent: () => reasoningContent, + appendReasoningContent: (content) => + (reasoningContent += content), + isReasoningComplete: () => isReasoningComplete, + setReasoningComplete: (val) => (isReasoningComplete = val), + }); + } catch (error) { + console.error("Error processing line:", line, error); + // 如果解析失败,直接传递原始行 + controller.enqueue(encoder.encode(line + "\n")); + } + } + } + } catch (error) { + console.error("Stream error:", error); + controller.error(error); + } finally { + try { + reader.releaseLock(); + } catch (e) { + console.error("Error releasing reader lock:", e); + } + controller.close(); + } + }, + }); + + return new Response(stream, { + status: response.status, + statusText: response.statusText, + headers: { + "Content-Type": response.headers.get("Content-Type") || "text/plain", + "Cache-Control": "no-cache", + Connection: "keep-alive", + }, + }); + } + + return response; + } +} diff --git a/packages/core/src/transformer/enhancetool.transformer.ts b/packages/core/src/transformer/enhancetool.transformer.ts new file mode 100644 index 0000000..739ead9 --- /dev/null +++ b/packages/core/src/transformer/enhancetool.transformer.ts @@ -0,0 +1,334 @@ +import { Transformer } from "@/types/transformer"; +import { parseToolArguments } from "@/utils/toolArgumentsParser"; + +export class EnhanceToolTransformer implements Transformer { + name = "enhancetool"; + + async transformResponseOut(response: Response): Promise { + if (response.headers.get("Content-Type")?.includes("application/json")) { + const jsonResponse = await response.json(); + if (jsonResponse?.choices?.[0]?.message?.tool_calls?.length) { + // 处理非流式的工具调用参数解析 + for (const toolCall of jsonResponse.choices[0].message.tool_calls) { + if (toolCall.function?.arguments) { + toolCall.function.arguments = parseToolArguments( + toolCall.function.arguments, + this.logger + ); + } + } + } + return new Response(JSON.stringify(jsonResponse), { + status: response.status, + statusText: response.statusText, + headers: response.headers, + }); + } else if (response.headers.get("Content-Type")?.includes("stream")) { + if (!response.body) { + return response; + } + + const decoder = new TextDecoder(); + const encoder = new TextEncoder(); + + // Define interface for tool call tracking + interface ToolCall { + index?: number; + name?: string; + id?: string; + arguments?: string; + } + + let currentToolCall: ToolCall = {}; + + let hasTextContent = false; + let reasoningContent = ""; + let isReasoningComplete = false; + let hasToolCall = false; + let buffer = ""; // 用于缓冲不完整的数据 + + const stream = new ReadableStream({ + async start(controller) { + const reader = response.body!.getReader(); + const processBuffer = ( + buffer: string, + controller: ReadableStreamDefaultController, + encoder: TextEncoder + ) => { + const lines = buffer.split("\n"); + for (const line of lines) { + if (line.trim()) { + controller.enqueue(encoder.encode(line + "\n")); + } + } + }; + + // Helper function to process completed tool calls + const processCompletedToolCall = ( + data: any, + controller: ReadableStreamDefaultController, + encoder: TextEncoder + ) => { + let finalArgs = ""; + try { + finalArgs = parseToolArguments(currentToolCall.arguments || "", this.logger); + } catch (e: any) { + console.error( + `${e.message} ${ + e.stack + } 工具调用参数解析失败: ${JSON.stringify( + currentToolCall + )}` + ); + // Use original arguments if parsing fails + finalArgs = currentToolCall.arguments || ""; + } + + const delta = { + role: "assistant", + tool_calls: [ + { + function: { + name: currentToolCall.name, + arguments: finalArgs, + }, + id: currentToolCall.id, + index: currentToolCall.index, + type: "function", + }, + ], + }; + + // Remove content field entirely to prevent extra null values + const modifiedData = { + ...data, + choices: [ + { + ...data.choices[0], + delta, + }, + ], + }; + // Remove content field if it exists + if (modifiedData.choices[0].delta.content !== undefined) { + delete modifiedData.choices[0].delta.content; + } + + const modifiedLine = `data: ${JSON.stringify(modifiedData)}\n\n`; + controller.enqueue(encoder.encode(modifiedLine)); + }; + + const processLine = ( + line: string, + context: { + controller: ReadableStreamDefaultController; + encoder: TextEncoder; + hasTextContent: () => boolean; + setHasTextContent: (val: boolean) => void; + reasoningContent: () => string; + appendReasoningContent: (content: string) => void; + isReasoningComplete: () => boolean; + setReasoningComplete: (val: boolean) => void; + } + ) => { + const { controller, encoder } = context; + + if (line.startsWith("data: ") && line.trim() !== "data: [DONE]") { + const jsonStr = line.slice(6); + try { + const data = JSON.parse(jsonStr); + + // Handle tool calls in streaming mode + if (data.choices?.[0]?.delta?.tool_calls?.length) { + const toolCallDelta = data.choices[0].delta.tool_calls[0]; + + // Initialize currentToolCall if this is the first chunk for this tool call + if (typeof currentToolCall.index === "undefined") { + currentToolCall = { + index: toolCallDelta.index, + name: toolCallDelta.function?.name || "", + id: toolCallDelta.id || "", + arguments: toolCallDelta.function?.arguments || "" + }; + if (toolCallDelta.function?.arguments) { + toolCallDelta.function.arguments = '' + } + // Send the first chunk as-is + const modifiedLine = `data: ${JSON.stringify(data)}\n\n`; + controller.enqueue(encoder.encode(modifiedLine)); + return; + } + // Accumulate arguments if this is a continuation of the current tool call + else if (currentToolCall.index === toolCallDelta.index) { + if (toolCallDelta.function?.arguments) { + currentToolCall.arguments += toolCallDelta.function.arguments; + } + // Don't send intermediate chunks that only contain arguments + return; + } + // If we have a different tool call index, process the previous one and start a new one + else { + // Process the completed tool call using helper function + processCompletedToolCall(data, controller, encoder); + + // Start tracking the new tool call + currentToolCall = { + index: toolCallDelta.index, + name: toolCallDelta.function?.name || "", + id: toolCallDelta.id || "", + arguments: toolCallDelta.function?.arguments || "" + }; + return; + } + } + + // Handle finish_reason for tool_calls + if (data.choices?.[0]?.finish_reason === "tool_calls" && currentToolCall.index !== undefined) { + // Process the final tool call using helper function + processCompletedToolCall(data, controller, encoder); + currentToolCall = {}; + return; + } + + // Handle text content alongside tool calls + if ( + data.choices?.[0]?.delta?.tool_calls?.length && + context.hasTextContent() + ) { + if (typeof data.choices[0].index === "number") { + data.choices[0].index += 1; + } else { + data.choices[0].index = 1; + } + } + + const modifiedLine = `data: ${JSON.stringify(data)}\n\n`; + controller.enqueue(encoder.encode(modifiedLine)); + } catch (e) { + // 如果JSON解析失败,可能是数据不完整,将原始行传递下去 + controller.enqueue(encoder.encode(line + "\n")); + } + } else { + // Pass through non-data lines (like [DONE]) + controller.enqueue(encoder.encode(line + "\n")); + } + }; + + try { + while (true) { + const { done, value } = await reader.read(); + if (done) { + // 处理缓冲区中剩余的数据 + if (buffer.trim()) { + processBuffer(buffer, controller, encoder); + } + break; + } + + // 检查value是否有效 + if (!value || value.length === 0) { + continue; + } + + let chunk; + try { + chunk = decoder.decode(value, { stream: true }); + } catch (decodeError) { + console.warn("Failed to decode chunk", decodeError); + continue; + } + + if (chunk.length === 0) { + continue; + } + + buffer += chunk; + + // 如果缓冲区过大,进行处理避免内存泄漏 + if (buffer.length > 1000000) { + // 1MB 限制 + console.warn( + "Buffer size exceeds limit, processing partial data" + ); + const lines = buffer.split("\n"); + buffer = lines.pop() || ""; + + for (const line of lines) { + if (line.trim()) { + try { + processLine(line, { + controller, + encoder, + hasTextContent: () => hasTextContent, + setHasTextContent: (val) => (hasTextContent = val), + reasoningContent: () => reasoningContent, + appendReasoningContent: (content) => + (reasoningContent += content), + isReasoningComplete: () => isReasoningComplete, + setReasoningComplete: (val) => + (isReasoningComplete = val), + }); + } catch (error) { + console.error("Error processing line:", line, error); + // 如果解析失败,直接传递原始行 + controller.enqueue(encoder.encode(line + "\n")); + } + } + } + continue; + } + + // 处理缓冲区中完整的数据行 + const lines = buffer.split("\n"); + buffer = lines.pop() || ""; // 最后一行可能不完整,保留在缓冲区 + + for (const line of lines) { + if (!line.trim()) continue; + + try { + processLine(line, { + controller, + encoder, + hasTextContent: () => hasTextContent, + setHasTextContent: (val) => (hasTextContent = val), + reasoningContent: () => reasoningContent, + appendReasoningContent: (content) => + (reasoningContent += content), + isReasoningComplete: () => isReasoningComplete, + setReasoningComplete: (val) => (isReasoningComplete = val), + }); + } catch (error) { + console.error("Error processing line:", line, error); + // 如果解析失败,直接传递原始行 + controller.enqueue(encoder.encode(line + "\n")); + } + } + } + } catch (error) { + console.error("Stream error:", error); + controller.error(error); + } finally { + try { + reader.releaseLock(); + } catch (e) { + console.error("Error releasing reader lock:", e); + } + controller.close(); + } + }, + }); + + return new Response(stream, { + status: response.status, + statusText: response.statusText, + headers: { + "Content-Type": "text/event-stream", + "Cache-Control": "no-cache", + Connection: "keep-alive", + }, + }); + } + + return response; + } +} diff --git a/packages/core/src/transformer/forcereasoning.transformer.ts b/packages/core/src/transformer/forcereasoning.transformer.ts new file mode 100644 index 0000000..72e7fcd --- /dev/null +++ b/packages/core/src/transformer/forcereasoning.transformer.ts @@ -0,0 +1,342 @@ +import { UnifiedChatRequest } from "../types/llm"; +import { Transformer } from "../types/transformer"; + +const PROMPT = `Always think before answering. Even if the problem seems simple, always write down your reasoning process explicitly. + +Output format: + +Your detailed thinking process goes here + +Your final answer must follow after the closing tag above.`; + +const MAX_INTERLEAVED_TIMES = 10; + +export class ForceReasoningTransformer implements Transformer { + name = "forcereasoning"; + + async transformRequestIn( + request: UnifiedChatRequest + ): Promise { + let times = 0 + request.messages + .filter((msg) => msg.role === "assistant") + .reverse() + .forEach((message) => { + if (message.thinking) { + if (message.thinking.content) { + if (!message.content || times < MAX_INTERLEAVED_TIMES) { + times++; + message.content = `${message.thinking.content}\n${message.content}`; + } + } + delete message.thinking; + } + }); + const lastMessage = request.messages[request.messages.length - 1]; + if (lastMessage.role === "user") { + if (Array.isArray(lastMessage.content)) { + lastMessage.content.push({ + type: "text", + text: PROMPT, + }); + } else { + lastMessage.content = [ + { + type: "text", + text: PROMPT, + }, + { + type: "text", + text: lastMessage.content || '', + }, + ]; + } + } + if (lastMessage.role === "tool") { + request.messages.push({ + role: "user", + content: [ + { + type: "text", + text: PROMPT, + }, + ], + }); + } + return request; + } + + async transformResponseOut(response: Response): Promise { + const reasonStartTag = ""; + const reasonStopTag = ""; + + if (response.headers.get("Content-Type")?.includes("application/json")) { + const jsonResponse: any = await response.json(); + if (jsonResponse.choices[0]?.message.content) { + const regex = /(.*?)<\/reasoning_content>/s; + const match = jsonResponse.choices[0]?.message.content.match(regex); + if (match && match[1]) { + jsonResponse.thinking = { + content: match[1], + }; + } + } + return new Response(JSON.stringify(jsonResponse), { + status: response.status, + statusText: response.statusText, + headers: response.headers, + }); + } else if (response.headers.get("Content-Type")?.includes("stream")) { + if (!response.body) { + return response; + } + let contentIndex = 0; + + const decoder = new TextDecoder(); + const encoder = new TextEncoder(); + + const stream = new ReadableStream({ + async start(controller) { + const reader = response.body!.getReader(); + let lineBuffer = ""; + + let fsmState: "SEARCHING" | "REASONING" | "FINAL" = "SEARCHING"; + let tagBuffer = ""; + let finalBuffer = ""; + + const processAndEnqueue = ( + originalData: any, + content: string | null | undefined + ) => { + if (typeof content !== "string") { + if ( + originalData.choices?.[0]?.delta && + Object.keys(originalData.choices[0].delta).length > 0 && + !originalData.choices[0].delta.content + ) { + originalData.choices[0].index = contentIndex + controller.enqueue( + encoder.encode(`data: ${JSON.stringify(originalData)}\n\n`) + ); + } + return; + } + + let currentContent = tagBuffer + content; + tagBuffer = ""; + + while (currentContent.length > 0) { + if (fsmState === "SEARCHING") { + const startTagIndex = currentContent.indexOf(reasonStartTag); + if (startTagIndex !== -1) { + currentContent = currentContent.substring( + startTagIndex + reasonStartTag.length + ); + fsmState = "REASONING"; + } else { + for (let i = reasonStartTag.length - 1; i > 0; i--) { + if ( + currentContent.endsWith(reasonStartTag.substring(0, i)) + ) { + tagBuffer = currentContent.substring( + currentContent.length - i + ); + break; + } + } + currentContent = ""; + } + } else if (fsmState === "REASONING") { + const endTagIndex = currentContent.indexOf(reasonStopTag); + if (endTagIndex !== -1) { + const reasoningPart = currentContent.substring( + 0, + endTagIndex + ); + if (reasoningPart.length > 0) { + const newDelta = { + ...originalData.choices[0].delta, + thinking: { + content: reasoningPart, + }, + }; + delete newDelta.content; + const thinkingChunk = { + ...originalData, + choices: [ + { ...originalData.choices[0], delta: newDelta, index: contentIndex }, + ], + }; + controller.enqueue( + encoder.encode( + `data: ${JSON.stringify(thinkingChunk)}\n\n` + ) + ); + } + + // Send signature message + const signatureDelta = { + ...originalData.choices[0].delta, + thinking: { signature: new Date().getTime().toString() }, + }; + delete signatureDelta.content; + const signatureChunk = { + ...originalData, + choices: [ + { ...originalData.choices[0], delta: signatureDelta, index: contentIndex }, + ], + }; + controller.enqueue( + encoder.encode( + `data: ${JSON.stringify(signatureChunk)}\n\n` + ) + ); + contentIndex++; + + currentContent = currentContent.substring( + endTagIndex + reasonStopTag.length + ); + fsmState = "FINAL"; + } else { + let reasoningPart = currentContent; + for (let i = reasonStopTag.length - 1; i > 0; i--) { + if ( + currentContent.endsWith(reasonStopTag.substring(0, i)) + ) { + tagBuffer = currentContent.substring( + currentContent.length - i + ); + reasoningPart = currentContent.substring( + 0, + currentContent.length - i + ); + break; + } + } + if (reasoningPart.length > 0) { + const newDelta = { + ...originalData.choices[0].delta, + thinking: { content: reasoningPart }, + }; + delete newDelta.content; + const thinkingChunk = { + ...originalData, + choices: [ + { ...originalData.choices[0], delta: newDelta, index: contentIndex }, + ], + }; + controller.enqueue( + encoder.encode( + `data: ${JSON.stringify(thinkingChunk)}\n\n` + ) + ); + } + currentContent = ""; + } + } else if (fsmState === "FINAL") { + if (currentContent.length > 0) { + // 检查内容是否只包含换行符 + const isOnlyNewlines = /^\s*$/.test(currentContent); + + if (isOnlyNewlines) { + // 如果只有换行符,添加到缓冲区但不发送 + finalBuffer += currentContent; + } else { + // 如果有非换行符内容,将缓冲区和新内容一起发送 + const finalPart = finalBuffer + currentContent; + const newDelta = { + ...originalData.choices[0].delta, + content: finalPart, + }; + if (newDelta.thinking) delete newDelta.thinking; + const finalChunk = { + ...originalData, + choices: [ + { ...originalData.choices[0], delta: newDelta }, + ], + }; + controller.enqueue( + encoder.encode(`data: ${JSON.stringify(finalChunk)}\n\n`) + ); + // 发送后清空缓冲区 + finalBuffer = ""; + } + } + contentIndex++ + currentContent = ""; + } + } + }; + + try { + while (true) { + const { done, value } = await reader.read(); + if (done) { + break; + } + const chunk = decoder.decode(value, { stream: true }); + lineBuffer += chunk; + const lines = lineBuffer.split("\n"); + lineBuffer = lines.pop() || ""; + + for (const line of lines) { + if (!line.trim()) continue; + + if (line.trim() === "data: [DONE]") { + controller.enqueue(encoder.encode(line + "\n\n")); + break; + } + + if (line.startsWith("data:")) { + try { + const data = JSON.parse(line.slice(5)); + processAndEnqueue(data, data.choices?.[0]?.delta?.content); + } catch (e) { + controller.enqueue(encoder.encode(line + "\n")); + } + } else { + controller.enqueue(encoder.encode(line + "\n")); + } + } + } + } catch (error) { + console.error("Stream error:", error); + controller.error(error); + } finally { + try { + reader.releaseLock(); + } catch (e) { + console.error("Error releasing reader lock:", e); + } + + if (fsmState === "REASONING") { + const signatureDelta = { + thinking: { signature: new Date().getTime().toString() }, + }; + const signatureChunk = { + choices: [{ delta: signatureDelta }], + }; + controller.enqueue( + encoder.encode(`data: ${JSON.stringify(signatureChunk)}\n\n`) + ); + } + + controller.close(); + } + }, + }); + + return new Response(stream, { + status: response.status, + statusText: response.statusText, + headers: { + "Content-Type": response.headers.get("Content-Type") || "text/plain", + "Cache-Control": "no-cache", + Connection: "keep-alive", + }, + }); + } + + return response; + } +} diff --git a/packages/core/src/transformer/gemini.transformer.ts b/packages/core/src/transformer/gemini.transformer.ts new file mode 100644 index 0000000..08278ff --- /dev/null +++ b/packages/core/src/transformer/gemini.transformer.ts @@ -0,0 +1,40 @@ +import { LLMProvider, UnifiedChatRequest } from "../types/llm"; +import { Transformer } from "../types/transformer"; +import { + buildRequestBody, + transformRequestOut, + transformResponseOut, +} from "../utils/gemini.util"; + +export class GeminiTransformer implements Transformer { + name = "gemini"; + + endPoint = "/v1beta/models/:modelAndAction"; + + async transformRequestIn( + request: UnifiedChatRequest, + provider: LLMProvider + ): Promise> { + return { + body: buildRequestBody(request), + config: { + url: new URL( + `./${request.model}:${ + request.stream ? "streamGenerateContent?alt=sse" : "generateContent" + }`, + provider.baseUrl + ), + headers: { + "x-goog-api-key": provider.apiKey, + Authorization: undefined, + }, + }, + }; + } + + transformRequestOut = transformRequestOut; + + async transformResponseOut(response: Response): Promise { + return transformResponseOut(response, this.name, this.logger); + } +} diff --git a/packages/core/src/transformer/groq.transformer.ts b/packages/core/src/transformer/groq.transformer.ts new file mode 100644 index 0000000..ee3d9d4 --- /dev/null +++ b/packages/core/src/transformer/groq.transformer.ts @@ -0,0 +1,228 @@ +import { MessageContent, TextContent, UnifiedChatRequest } from "@/types/llm"; +import { Transformer } from "../types/transformer"; +import { v4 as uuidv4 } from "uuid" + +export class GroqTransformer implements Transformer { + name = "groq"; + + async transformRequestIn(request: UnifiedChatRequest): Promise { + request.messages.forEach(msg => { + if (Array.isArray(msg.content)) { + (msg.content as MessageContent[]).forEach((item) => { + if ((item as TextContent).cache_control) { + delete (item as TextContent).cache_control; + } + }); + } else if (msg.cache_control) { + delete msg.cache_control; + } + }) + if (Array.isArray(request.tools)) { + request.tools.forEach(tool => { + delete tool.function.parameters.$schema; + }) + } + return request + } + + async transformResponseOut(response: Response): Promise { + if (response.headers.get("Content-Type")?.includes("application/json")) { + const jsonResponse = await response.json(); + return new Response(JSON.stringify(jsonResponse), { + status: response.status, + statusText: response.statusText, + headers: response.headers, + }); + } else if (response.headers.get("Content-Type")?.includes("stream")) { + if (!response.body) { + return response; + } + + const decoder = new TextDecoder(); + const encoder = new TextEncoder(); + + let hasTextContent = false; + let reasoningContent = ""; + let isReasoningComplete = false; + let buffer = ""; // 用于缓冲不完整的数据 + + const stream = new ReadableStream({ + async start(controller) { + const reader = response.body!.getReader(); + const processBuffer = (buffer: string, controller: ReadableStreamDefaultController, encoder: InstanceType) => { + const lines = buffer.split("\n"); + for (const line of lines) { + if (line.trim()) { + controller.enqueue(encoder.encode(line + "\n")); + } + } + }; + + const processLine = (line: string, context: { + controller: ReadableStreamDefaultController; + encoder: typeof TextEncoder; + hasTextContent: () => boolean; + setHasTextContent: (val: boolean) => void; + reasoningContent: () => string; + appendReasoningContent: (content: string) => void; + isReasoningComplete: () => boolean; + setReasoningComplete: (val: boolean) => void; + }) => { + const { controller, encoder } = context; + + if (line.startsWith("data: ") && line.trim() !== "data: [DONE]") { + const jsonStr = line.slice(6); + try { + const data = JSON.parse(jsonStr); + if (data.error) { + throw new Error(JSON.stringify(data)); + } + + if (data.choices?.[0]?.delta?.content && !context.hasTextContent()) { + context.setHasTextContent(true); + } + + if ( + data.choices?.[0]?.delta?.tool_calls?.length + ) { + data.choices?.[0]?.delta?.tool_calls.forEach((tool: any) => { + tool.id = `call_${uuidv4()}`; + }) + } + + if ( + data.choices?.[0]?.delta?.tool_calls?.length && + context.hasTextContent() + ) { + if (typeof data.choices[0].index === 'number') { + data.choices[0].index += 1; + } else { + data.choices[0].index = 1; + } + } + + const modifiedLine = `data: ${JSON.stringify(data)}\n\n`; + controller.enqueue(encoder.encode(modifiedLine)); + } catch (e) { + // 如果JSON解析失败,可能是数据不完整,将原始行传递下去 + controller.enqueue(encoder.encode(line + "\n")); + } + } else { + // Pass through non-data lines (like [DONE]) + controller.enqueue(encoder.encode(line + "\n")); + } + }; + + try { + while (true) { + const { done, value } = await reader.read(); + if (done) { + // 处理缓冲区中剩余的数据 + if (buffer.trim()) { + processBuffer(buffer, controller, encoder); + } + break; + } + + // 检查value是否有效 + if (!value || value.length === 0) { + continue; + } + + let chunk; + try { + chunk = decoder.decode(value, { stream: true }); + } catch (decodeError) { + console.warn("Failed to decode chunk", decodeError); + continue; + } + + if (chunk.length === 0) { + continue; + } + + buffer += chunk; + + // 如果缓冲区过大,进行处理避免内存泄漏 + if (buffer.length > 1000000) { // 1MB 限制 + console.warn("Buffer size exceeds limit, processing partial data"); + const lines = buffer.split("\n"); + buffer = lines.pop() || ""; + + for (const line of lines) { + if (line.trim()) { + try { + processLine(line, { + controller, + encoder, + hasTextContent: () => hasTextContent, + setHasTextContent: (val) => hasTextContent = val, + reasoningContent: () => reasoningContent, + appendReasoningContent: (content) => reasoningContent += content, + isReasoningComplete: () => isReasoningComplete, + setReasoningComplete: (val) => isReasoningComplete = val + }); + } catch (error) { + console.error("Error processing line:", line, error); + // 如果解析失败,直接传递原始行 + controller.enqueue(encoder.encode(line + "\n")); + } + } + } + continue; + } + + // 处理缓冲区中完整的数据行 + const lines = buffer.split("\n"); + buffer = lines.pop() || ""; // 最后一行可能不完整,保留在缓冲区 + + for (const line of lines) { + if (!line.trim()) continue; + + try { + processLine(line, { + controller, + encoder, + hasTextContent: () => hasTextContent, + setHasTextContent: (val) => hasTextContent = val, + reasoningContent: () => reasoningContent, + appendReasoningContent: (content) => reasoningContent += content, + isReasoningComplete: () => isReasoningComplete, + setReasoningComplete: (val) => isReasoningComplete = val + }); + } catch (error) { + console.error("Error processing line:", line, error); + // 如果解析失败,直接传递原始行 + controller.enqueue(encoder.encode(line + "\n")); + } + } + } + } catch (error) { + console.error("Stream error:", error); + controller.error(error); + } finally { + try { + reader.releaseLock(); + } catch (e) { + console.error("Error releasing reader lock:", e); + } + controller.close(); + } + }, + + }); + + return new Response(stream, { + status: response.status, + statusText: response.statusText, + headers: { + "Content-Type": "text/event-stream", + "Cache-Control": "no-cache", + Connection: "keep-alive", + }, + }); + } + + return response; + } +} \ No newline at end of file diff --git a/packages/core/src/transformer/index.ts b/packages/core/src/transformer/index.ts new file mode 100644 index 0000000..9db7d3b --- /dev/null +++ b/packages/core/src/transformer/index.ts @@ -0,0 +1,45 @@ +import { AnthropicTransformer } from "./anthropic.transformer"; +import { GeminiTransformer } from "./gemini.transformer"; +import { VertexGeminiTransformer } from "./vertex-gemini.transformer"; +import { DeepseekTransformer } from "./deepseek.transformer"; +import { TooluseTransformer } from "./tooluse.transformer"; +import { OpenrouterTransformer } from "./openrouter.transformer"; +import { MaxTokenTransformer } from "./maxtoken.transformer"; +import { GroqTransformer } from "./groq.transformer"; +import { CleancacheTransformer } from "./cleancache.transformer"; +import { EnhanceToolTransformer } from "./enhancetool.transformer"; +import { ReasoningTransformer } from "./reasoning.transformer"; +import { SamplingTransformer } from "./sampling.transformer"; +import { MaxCompletionTokens } from "./maxcompletiontokens.transformer"; +import { VertexClaudeTransformer } from "./vertex-claude.transformer"; +import { CerebrasTransformer } from "./cerebras.transformer"; +import { StreamOptionsTransformer } from "./streamoptions.transformer"; +import { OpenAITransformer } from "./openai.transformer"; +import { CustomParamsTransformer } from "./customparams.transformer"; +import { VercelTransformer } from "./vercel.transformer"; +import { OpenAIResponsesTransformer } from "./openai.responses.transformer"; +import { ForceReasoningTransformer } from "./forcereasoning.transformer" + +export default { + AnthropicTransformer, + GeminiTransformer, + VertexGeminiTransformer, + VertexClaudeTransformer, + DeepseekTransformer, + TooluseTransformer, + OpenrouterTransformer, + OpenAITransformer, + MaxTokenTransformer, + GroqTransformer, + CleancacheTransformer, + EnhanceToolTransformer, + ReasoningTransformer, + SamplingTransformer, + MaxCompletionTokens, + CerebrasTransformer, + StreamOptionsTransformer, + CustomParamsTransformer, + VercelTransformer, + OpenAIResponsesTransformer, + ForceReasoningTransformer +}; diff --git a/packages/core/src/transformer/maxcompletiontokens.transformer.ts b/packages/core/src/transformer/maxcompletiontokens.transformer.ts new file mode 100644 index 0000000..33c584b --- /dev/null +++ b/packages/core/src/transformer/maxcompletiontokens.transformer.ts @@ -0,0 +1,16 @@ +import { UnifiedChatRequest } from "../types/llm"; +import { Transformer } from "../types/transformer"; + +export class MaxCompletionTokens implements Transformer { + static TransformerName = "maxcompletiontokens"; + + async transformRequestIn( + request: UnifiedChatRequest + ): Promise { + if (request.max_tokens) { + request.max_completion_tokens = request.max_tokens; + delete request.max_tokens; + } + return request; + } +} diff --git a/packages/core/src/transformer/maxtoken.transformer.ts b/packages/core/src/transformer/maxtoken.transformer.ts new file mode 100644 index 0000000..a64cbf7 --- /dev/null +++ b/packages/core/src/transformer/maxtoken.transformer.ts @@ -0,0 +1,18 @@ +import { UnifiedChatRequest } from "../types/llm"; +import { Transformer, TransformerOptions } from "../types/transformer"; + +export class MaxTokenTransformer implements Transformer { + static TransformerName = "maxtoken"; + max_tokens: number; + + constructor(private readonly options?: TransformerOptions) { + this.max_tokens = this.options?.max_tokens; + } + + async transformRequestIn(request: UnifiedChatRequest): Promise { + if (request.max_tokens && request.max_tokens > this.max_tokens) { + request.max_tokens = this.max_tokens; + } + return request; + } +} diff --git a/packages/core/src/transformer/openai.responses.transformer.ts b/packages/core/src/transformer/openai.responses.transformer.ts new file mode 100644 index 0000000..f896382 --- /dev/null +++ b/packages/core/src/transformer/openai.responses.transformer.ts @@ -0,0 +1,792 @@ +import { UnifiedChatRequest, MessageContent } from "@/types/llm"; +import { Transformer } from "@/types/transformer"; + +interface ResponsesAPIOutputItem { + type: string; + id?: string; + call_id?: string; + name?: string; + arguments?: string; + content?: Array<{ + type: string; + text?: string; + image_url?: string; + mime_type?: string; + image_base64?: string; + }>; + reasoning?: string; +} + +interface ResponsesAPIPayload { + id: string; + object: string; + model: string; + created_at: number; + output: ResponsesAPIOutputItem[]; + usage?: { + input_tokens: number; + output_tokens: number; + total_tokens: number; + }; +} + +interface ResponsesStreamEvent { + type: string; + item_id?: string; + output_index?: number; + delta?: + | string + | { + url?: string; + b64_json?: string; + mime_type?: string; + }; + item?: { + id?: string; + type?: string; + call_id?: string; + name?: string; + content?: Array<{ + type: string; + text?: string; + image_url?: string; + mime_type?: string; + }>; + reasoning?: string; // 添加 reasoning 字段支持 + }; + response?: { + id?: string; + model?: string; + output?: Array<{ + type: string; + }>; + }; + reasoning_summary?: string; // 添加推理摘要支持 +} + +export class OpenAIResponsesTransformer implements Transformer { + name = "openai-responses"; + endPoint = "/v1/responses"; + + async transformRequestIn( + request: UnifiedChatRequest + ): Promise { + delete request.temperature; + delete request.max_tokens; + + // 处理 reasoning 参数 + if (request.reasoning) { + (request as any).reasoning = { + effort: request.reasoning.effort, + summary: "detailed", + }; + } + + const input: any[] = []; + + const systemMessages = request.messages.filter( + (msg) => msg.role === "system" + ); + if (systemMessages.length > 0) { + const firstSystem = systemMessages[0]; + if (Array.isArray(firstSystem.content)) { + firstSystem.content.forEach((item) => { + let text = ""; + if (typeof item === "string") { + text = item; + } else if (item && typeof item === "object" && "text" in item) { + text = (item as { text: string }).text; + } + input.push({ + role: "system", + content: text, + }); + }); + } else { + (request as any).instructions = firstSystem.content; + } + } + + request.messages.forEach((message) => { + if (message.role === "system") return; + + if (Array.isArray(message.content)) { + const convertedContent = message.content + .map((content) => this.normalizeRequestContent(content, message.role)) + .filter( + (content): content is Record => content !== null + ); + + if (convertedContent.length > 0) { + (message as any).content = convertedContent; + } else { + delete (message as any).content; + } + } + + if (message.role === "tool") { + const toolMessage: any = { ...message }; + toolMessage.type = "function_call_output"; + toolMessage.call_id = message.tool_call_id; + toolMessage.output = message.content; + delete toolMessage.cache_control; + delete toolMessage.role; + delete toolMessage.tool_call_id; + delete toolMessage.content; + input.push(toolMessage); + return; + } + + if (message.role === "assistant" && Array.isArray(message.tool_calls)) { + message.tool_calls.forEach((tool) => { + input.push({ + type: "function_call", + arguments: tool.function.arguments, + name: tool.function.name, + call_id: tool.id, + }); + }); + return; + } + + input.push(message); + }); + + (request as any).input = input; + delete (request as any).messages; + + if (Array.isArray(request.tools)) { + const webSearch = request.tools.find( + (tool) => tool.function.name === "web_search" + ); + + (request as any).tools = request.tools + .filter((tool) => tool.function.name !== "web_search") + .map((tool) => { + if (tool.function.name === "WebSearch") { + delete tool.function.parameters.properties.allowed_domains; + } + if (tool.function.name === "Edit") { + return { + type: tool.type, + name: tool.function.name, + description: tool.function.description, + parameters: { + ...tool.function.parameters, + required: [ + "file_path", + "old_string", + "new_string", + "replace_all", + ], + }, + strict: true, + }; + } + return { + type: tool.type, + name: tool.function.name, + description: tool.function.description, + parameters: tool.function.parameters, + }; + }); + + if (webSearch) { + (request as any).tools.push({ + type: "web_search", + }); + } + } + + request.parallel_tool_calls = false; + + return request; + } + + async transformResponseOut(response: Response): Promise { + const contentType = response.headers.get("Content-Type") || ""; + + if (contentType.includes("application/json")) { + const jsonResponse: any = await response.json(); + + // 检查是否为responses API格式的JSON响应 + if (jsonResponse.object === "response" && jsonResponse.output) { + // 将responses格式转换为chat格式 + const chatResponse = this.convertResponseToChat(jsonResponse); + return new Response(JSON.stringify(chatResponse), { + status: response.status, + statusText: response.statusText, + headers: response.headers, + }); + } + + // 不是responses API格式,保持原样 + return new Response(JSON.stringify(jsonResponse), { + status: response.status, + statusText: response.statusText, + headers: response.headers, + }); + } else if (contentType.includes("text/event-stream")) { + if (!response.body) { + return response; + } + + const decoder = new TextDecoder(); + const encoder = new TextEncoder(); + let buffer = ""; // 用于缓冲不完整的数据 + let isStreamEnded = false; + + const transformer = this; + const stream = new ReadableStream({ + async start(controller) { + const reader = response.body!.getReader(); + + // 索引跟踪变量,只有在事件类型切换时才增加索引 + let currentIndex = -1; + let lastEventType = ""; + + // 获取当前应该使用的索引的函数 + const getCurrentIndex = (eventType: string) => { + if (eventType !== lastEventType) { + currentIndex++; + lastEventType = eventType; + } + return currentIndex; + }; + + try { + while (true) { + const { done, value } = await reader.read(); + if (done) { + if (!isStreamEnded) { + // 发送结束标记 + const doneChunk = `data: [DONE]\n\n`; + controller.enqueue(encoder.encode(doneChunk)); + } + break; + } + + const chunk = decoder.decode(value, { stream: true }); + buffer += chunk; + + // 处理缓冲区中完整的数据行 + let lines = buffer.split(/\r?\n/); + buffer = lines.pop() || ""; // 最后一行可能不完整,保留在缓冲区 + + for (const line of lines) { + if (!line.trim()) continue; + + try { + if (line.startsWith("event: ")) { + // 处理事件行,暂存以便与下一行数据配对 + continue; + } else if (line.startsWith("data: ")) { + const dataStr = line.slice(5).trim(); // 移除 "data: " 前缀 + if (dataStr === "[DONE]") { + isStreamEnded = true; + controller.enqueue(encoder.encode(`data: [DONE]\n\n`)); + continue; + } + + try { + const data: ResponsesStreamEvent = JSON.parse(dataStr); + + // 根据不同的事件类型转换为chat格式 + if (data.type === "response.output_text.delta") { + // 将output_text.delta转换为chat格式 + const chatChunk = { + id: data.item_id || "chatcmpl-" + Date.now(), + object: "chat.completion.chunk", + created: Math.floor(Date.now() / 1000), + model: data.response?.model, + choices: [ + { + index: getCurrentIndex(data.type), + delta: { + content: data.delta || "", + }, + finish_reason: null, + }, + ], + }; + + controller.enqueue( + encoder.encode( + `data: ${JSON.stringify(chatChunk)}\n\n` + ) + ); + } else if ( + data.type === "response.output_item.added" && + data.item?.type === "function_call" + ) { + // 处理function call开始 - 创建初始的tool call chunk + const functionCallChunk = { + id: + data.item.call_id || + data.item.id || + "chatcmpl-" + Date.now(), + object: "chat.completion.chunk", + created: Math.floor(Date.now() / 1000), + model: data.response?.model || "gpt-5-codex-", + choices: [ + { + index: getCurrentIndex(data.type), + delta: { + role: "assistant", + tool_calls: [ + { + index: 0, + id: data.item.call_id || data.item.id, + function: { + name: data.item.name || "", + arguments: "", + }, + type: "function", + }, + ], + }, + finish_reason: null, + }, + ], + }; + + controller.enqueue( + encoder.encode( + `data: ${JSON.stringify(functionCallChunk)}\n\n` + ) + ); + } else if ( + data.type === "response.output_item.added" && + data.item?.type === "message" + ) { + // 处理message item added事件 + const contentItems: MessageContent[] = []; + (data.item.content || []).forEach((item: any) => { + if (item.type === "output_text") { + contentItems.push({ + type: "text", + text: item.text || "", + }); + } + }); + + const delta: any = { role: "assistant" }; + if ( + contentItems.length === 1 && + contentItems[0].type === "text" + ) { + delta.content = contentItems[0].text; + } else if (contentItems.length > 0) { + delta.content = contentItems; + } + if (delta.content) { + const messageChunk = { + id: data.item.id || "chatcmpl-" + Date.now(), + object: "chat.completion.chunk", + created: Math.floor(Date.now() / 1000), + model: data.response?.model, + choices: [ + { + index: getCurrentIndex(data.type), + delta, + finish_reason: null, + }, + ], + }; + + controller.enqueue( + encoder.encode( + `data: ${JSON.stringify(messageChunk)}\n\n` + ) + ); + } + } else if ( + data.type === "response.output_text.annotation.added" + ) { + const annotationChunk = { + id: data.item_id || "chatcmpl-" + Date.now(), + object: "chat.completion.chunk", + created: Math.floor(Date.now() / 1000), + model: data.response?.model || "gpt-5-codex", + choices: [ + { + index: getCurrentIndex(data.type), + delta: { + annotations: [ + { + type: "url_citation", + url_citation: { + url: data.annotation?.url || "", + title: data.annotation?.title || "", + content: "", + start_index: + data.annotation?.start_index || 0, + end_index: + data.annotation?.end_index || 0, + }, + }, + ], + }, + finish_reason: null, + }, + ], + }; + + controller.enqueue( + encoder.encode( + `data: ${JSON.stringify(annotationChunk)}\n\n` + ) + ); + } else if ( + data.type === "response.function_call_arguments.delta" + ) { + // 处理function call参数增量 + const functionCallChunk = { + id: data.item_id || "chatcmpl-" + Date.now(), + object: "chat.completion.chunk", + created: Math.floor(Date.now() / 1000), + model: data.response?.model || "gpt-5-codex-", + choices: [ + { + index: getCurrentIndex(data.type), + delta: { + tool_calls: [ + { + index: 0, + function: { + arguments: data.delta || "", + }, + }, + ], + }, + finish_reason: null, + }, + ], + }; + + controller.enqueue( + encoder.encode( + `data: ${JSON.stringify(functionCallChunk)}\n\n` + ) + ); + } else if (data.type === "response.completed") { + // 发送结束标记 - 检查是否是tool_calls完成 + const finishReason = data.response?.output?.some( + (item: any) => item.type === "function_call" + ) + ? "tool_calls" + : "stop"; + + const endChunk = { + id: data.response?.id || "chatcmpl-" + Date.now(), + object: "chat.completion.chunk", + created: Math.floor(Date.now() / 1000), + model: data.response?.model || "gpt-5-codex-", + choices: [ + { + index: 0, + delta: {}, + finish_reason: finishReason, + }, + ], + }; + + controller.enqueue( + encoder.encode( + `data: ${JSON.stringify(endChunk)}\n\n` + ) + ); + isStreamEnded = true; + } else if ( + data.type === "response.reasoning_summary_text.delta" + ) { + // 处理推理文本,将其转换为 thinking delta 格式 + const thinkingChunk = { + id: data.item_id || "chatcmpl-" + Date.now(), + object: "chat.completion.chunk", + created: Math.floor(Date.now() / 1000), + model: data.response?.model, + choices: [ + { + index: getCurrentIndex(data.type), + delta: { + thinking: { + content: data.delta || "", + }, + }, + finish_reason: null, + }, + ], + }; + + controller.enqueue( + encoder.encode( + `data: ${JSON.stringify(thinkingChunk)}\n\n` + ) + ); + } else if ( + data.type === "response.reasoning_summary_part.done" && + data.part + ) { + const thinkingChunk = { + id: data.item_id || "chatcmpl-" + Date.now(), + object: "chat.completion.chunk", + created: Math.floor(Date.now() / 1000), + model: data.response?.model, + choices: [ + { + index: currentIndex, + delta: { + thinking: { + signature: data.item_id, + }, + }, + finish_reason: null, + }, + ], + }; + + controller.enqueue( + encoder.encode( + `data: ${JSON.stringify(thinkingChunk)}\n\n` + ) + ); + } + } catch (e) { + // 如果JSON解析失败,传递原始行 + controller.enqueue(encoder.encode(line + "\n")); + } + } else { + // 传递其他行 + controller.enqueue(encoder.encode(line + "\n")); + } + } catch (error) { + console.error("Error processing line:", line, error); + // 如果解析失败,直接传递原始行 + controller.enqueue(encoder.encode(line + "\n")); + } + } + } + + // 处理缓冲区中剩余的数据 + if (buffer.trim()) { + controller.enqueue(encoder.encode(buffer + "\n")); + } + + // 确保流结束时发送结束标记 + if (!isStreamEnded) { + const doneChunk = `data: [DONE]\n\n`; + controller.enqueue(encoder.encode(doneChunk)); + } + } catch (error) { + console.error("Stream error:", error); + controller.error(error); + } finally { + try { + reader.releaseLock(); + } catch (e) { + console.error("Error releasing reader lock:", e); + } + controller.close(); + } + }, + }); + + return new Response(stream, { + status: response.status, + statusText: response.statusText, + headers: { + "Content-Type": "text/event-stream", + "Cache-Control": "no-cache", + Connection: "keep-alive", + "Access-Control-Allow-Origin": "*", + }, + }); + } + + return response; + } + + private normalizeRequestContent(content: any, role: string | undefined) { + // 克隆内容对象并删除缓存控制字段 + const clone = { ...content }; + delete clone.cache_control; + + if (content.type === "text") { + return { + type: role === "assistant" ? "output_text" : "input_text", + text: content.text, + }; + } + + if (content.type === "image_url") { + console.log(content); + const imagePayload: Record = { + type: role === "assistant" ? "output_image" : "input_image", + }; + + if (typeof content.image_url?.url === "string") { + imagePayload.image_url = content.image_url.url; + } + + return imagePayload; + } + + return null; + } + + private convertResponseToChat(responseData: ResponsesAPIPayload): any { + // 从output数组中提取不同类型的输出 + const messageOutput = responseData.output?.find( + (item) => item.type === "message" + ); + const functionCallOutput = responseData.output?.find( + (item) => item.type === "function_call" + ); + let annotations; + if ( + messageOutput?.content?.length && + messageOutput?.content[0].annotations + ) { + annotations = messageOutput.content[0].annotations.map((item) => { + return { + type: "url_citation", + url_citation: { + url: item.url || "", + title: item.title || "", + content: "", + start_index: item.start_index || 0, + end_index: item.end_index || 0, + }, + }; + }); + } + + this.logger.debug({ + data: annotations, + type: "url_citation", + }); + + let messageContent: string | MessageContent[] | null = null; + let toolCalls = null; + let thinking = null; + + // 处理推理内容 + if (messageOutput && messageOutput.reasoning) { + thinking = { + content: messageOutput.reasoning, + }; + } + + if (messageOutput && messageOutput.content) { + // 分离文本和图片内容 + const textParts: string[] = []; + const imageParts: MessageContent[] = []; + + messageOutput.content.forEach((item: any) => { + if (item.type === "output_text") { + textParts.push(item.text || ""); + } else if (item.type === "output_image") { + const imageContent = this.buildImageContent({ + url: item.image_url, + mime_type: item.mime_type, + }); + if (imageContent) { + imageParts.push(imageContent); + } + } else if (item.type === "output_image_base64") { + const imageContent = this.buildImageContent({ + b64_json: item.image_base64, + mime_type: item.mime_type, + }); + if (imageContent) { + imageParts.push(imageContent); + } + } + }); + + // 构建最终内容 + if (imageParts.length > 0) { + // 如果有图片,将所有内容组合成数组 + const contentArray: MessageContent[] = []; + if (textParts.length > 0) { + contentArray.push({ + type: "text", + text: textParts.join(""), + }); + } + contentArray.push(...imageParts); + messageContent = contentArray; + } else { + // 如果只有文本,返回字符串 + messageContent = textParts.join(""); + } + } + + if (functionCallOutput) { + // 处理function_call类型的输出 + toolCalls = [ + { + id: functionCallOutput.call_id || functionCallOutput.id, + function: { + name: functionCallOutput.name, + arguments: functionCallOutput.arguments, + }, + type: "function", + }, + ]; + } + + // 构建chat格式的响应 + const chatResponse = { + id: responseData.id || "chatcmpl-" + Date.now(), + object: "chat.completion", + created: responseData.created_at, + model: responseData.model, + choices: [ + { + index: 0, + message: { + role: "assistant", + content: messageContent || null, + tool_calls: toolCalls, + thinking: thinking, + annotations: annotations, + }, + logprobs: null, + finish_reason: toolCalls ? "tool_calls" : "stop", + }, + ], + usage: responseData.usage + ? { + prompt_tokens: responseData.usage.input_tokens || 0, + completion_tokens: responseData.usage.output_tokens || 0, + total_tokens: responseData.usage.total_tokens || 0, + } + : null, + }; + + return chatResponse; + } + + private buildImageContent(source: { + url?: string; + b64_json?: string; + mime_type?: string; + }): MessageContent | null { + if (!source) return null; + + if (source.url || source.b64_json) { + return { + type: "image_url", + image_url: { + url: source.url || "", + b64_json: source.b64_json, + }, + media_type: source.mime_type, + } as MessageContent; + } + + return null; + } +} diff --git a/packages/core/src/transformer/openai.transformer.ts b/packages/core/src/transformer/openai.transformer.ts new file mode 100644 index 0000000..64ce54a --- /dev/null +++ b/packages/core/src/transformer/openai.transformer.ts @@ -0,0 +1,6 @@ +import { Transformer } from "@/types/transformer"; + +export class OpenAITransformer implements Transformer { + name = "OpenAI"; + endPoint = "/v1/chat/completions"; +} diff --git a/packages/core/src/transformer/openrouter.transformer.ts b/packages/core/src/transformer/openrouter.transformer.ts new file mode 100644 index 0000000..fda5f14 --- /dev/null +++ b/packages/core/src/transformer/openrouter.transformer.ts @@ -0,0 +1,357 @@ +import { UnifiedChatRequest } from "@/types/llm"; +import { Transformer, TransformerOptions } from "../types/transformer"; +import { v4 as uuidv4 } from "uuid"; + +export class OpenrouterTransformer implements Transformer { + static TransformerName = "openrouter"; + + constructor(private readonly options?: TransformerOptions) {} + + async transformRequestIn( + request: UnifiedChatRequest + ): Promise { + if (!request.model.includes("claude")) { + request.messages.forEach((msg) => { + if (Array.isArray(msg.content)) { + msg.content.forEach((item: any) => { + if (item.cache_control) { + delete item.cache_control; + } + if (item.type === "image_url") { + if (!item.image_url.url.startsWith("http")) { + item.image_url.url = `${item.image_url.url}`; + } + delete item.media_type; + } + }); + } else if (msg.cache_control) { + delete msg.cache_control; + } + }); + } else { + request.messages.forEach((msg) => { + if (Array.isArray(msg.content)) { + msg.content.forEach((item: any) => { + if (item.type === "image_url") { + if (!item.image_url.url.startsWith("http")) { + item.image_url.url = `data:${item.media_type};base64,${item.image_url.url}`; + } + delete item.media_type; + } + }); + } + }); + } + Object.assign(request, this.options || {}); + return request; + } + + async transformResponseOut(response: Response): Promise { + if (response.headers.get("Content-Type")?.includes("application/json")) { + const jsonResponse = await response.json(); + return new Response(JSON.stringify(jsonResponse), { + status: response.status, + statusText: response.statusText, + headers: response.headers, + }); + } else if (response.headers.get("Content-Type")?.includes("stream")) { + if (!response.body) { + return response; + } + + const decoder = new TextDecoder(); + const encoder = new TextEncoder(); + + let hasTextContent = false; + let reasoningContent = ""; + let isReasoningComplete = false; + let hasToolCall = false; + let buffer = ""; // 用于缓冲不完整的数据 + + const stream = new ReadableStream({ + async start(controller) { + const reader = response.body!.getReader(); + const processBuffer = ( + buffer: string, + controller: ReadableStreamDefaultController, + encoder: TextEncoder + ) => { + const lines = buffer.split("\n"); + for (const line of lines) { + if (line.trim()) { + controller.enqueue(encoder.encode(line + "\n")); + } + } + }; + + const processLine = ( + line: string, + context: { + controller: ReadableStreamDefaultController; + encoder: TextEncoder; + hasTextContent: () => boolean; + setHasTextContent: (val: boolean) => void; + reasoningContent: () => string; + appendReasoningContent: (content: string) => void; + isReasoningComplete: () => boolean; + setReasoningComplete: (val: boolean) => void; + } + ) => { + const { controller, encoder } = context; + + if (line.startsWith("data: ") && line.trim() !== "data: [DONE]") { + const jsonStr = line.slice(6); + try { + const data = JSON.parse(jsonStr); + if (data.usage) { + this.logger?.debug( + { usage: data.usage, hasToolCall }, + "usage" + ); + data.choices[0].finish_reason = hasToolCall + ? "tool_calls" + : "stop"; + } + + if (data.choices?.[0]?.finish_reason === "error") { + controller.enqueue( + encoder.encode( + `data: ${JSON.stringify({ + error: data.choices?.[0].error, + })}\n\n` + ) + ); + } + + if ( + data.choices?.[0]?.delta?.content && + !context.hasTextContent() + ) { + context.setHasTextContent(true); + } + + // Extract reasoning_content from delta + if (data.choices?.[0]?.delta?.reasoning) { + context.appendReasoningContent( + data.choices[0].delta.reasoning + ); + const thinkingChunk = { + ...data, + choices: [ + { + ...data.choices?.[0], + delta: { + ...data.choices[0].delta, + thinking: { + content: data.choices[0].delta.reasoning, + }, + }, + }, + ], + }; + if (thinkingChunk.choices?.[0]?.delta) { + delete thinkingChunk.choices[0].delta.reasoning; + } + const thinkingLine = `data: ${JSON.stringify( + thinkingChunk + )}\n\n`; + controller.enqueue(encoder.encode(thinkingLine)); + return; + } + + // Check if reasoning is complete + if ( + data.choices?.[0]?.delta?.content && + context.reasoningContent() && + !context.isReasoningComplete() + ) { + context.setReasoningComplete(true); + const signature = Date.now().toString(); + + const thinkingChunk = { + ...data, + choices: [ + { + ...data.choices?.[0], + delta: { + ...data.choices[0].delta, + content: null, + thinking: { + content: context.reasoningContent(), + signature: signature, + }, + }, + }, + ], + }; + if (thinkingChunk.choices?.[0]?.delta) { + delete thinkingChunk.choices[0].delta.reasoning; + } + const thinkingLine = `data: ${JSON.stringify( + thinkingChunk + )}\n\n`; + controller.enqueue(encoder.encode(thinkingLine)); + } + + if (data.choices?.[0]?.delta?.reasoning) { + delete data.choices[0].delta.reasoning; + } + if ( + data.choices?.[0]?.delta?.tool_calls?.length && + !Number.isNaN( + parseInt(data.choices?.[0]?.delta?.tool_calls[0].id, 10) + ) + ) { + data.choices?.[0]?.delta?.tool_calls.forEach((tool: any) => { + tool.id = `call_${uuidv4()}`; + }); + } + + if ( + data.choices?.[0]?.delta?.tool_calls?.length && + !hasToolCall + ) { + hasToolCall = true; + } + + if ( + data.choices?.[0]?.delta?.tool_calls?.length && + context.hasTextContent() + ) { + if (typeof data.choices[0].index === "number") { + data.choices[0].index += 1; + } else { + data.choices[0].index = 1; + } + } + + const modifiedLine = `data: ${JSON.stringify(data)}\n\n`; + controller.enqueue(encoder.encode(modifiedLine)); + } catch (e) { + // 如果JSON解析失败,可能是数据不完整,将原始行传递下去 + controller.enqueue(encoder.encode(line + "\n")); + } + } else { + // Pass through non-data lines (like [DONE]) + controller.enqueue(encoder.encode(line + "\n")); + } + }; + + try { + while (true) { + const { done, value } = await reader.read(); + if (done) { + // 处理缓冲区中剩余的数据 + if (buffer.trim()) { + processBuffer(buffer, controller, encoder); + } + break; + } + + // 检查value是否有效 + if (!value || value.length === 0) { + continue; + } + + let chunk; + try { + chunk = decoder.decode(value, { stream: true }); + } catch (decodeError) { + console.warn("Failed to decode chunk", decodeError); + continue; + } + + if (chunk.length === 0) { + continue; + } + + buffer += chunk; + + // 如果缓冲区过大,进行处理避免内存泄漏 + if (buffer.length > 1000000) { + // 1MB 限制 + console.warn( + "Buffer size exceeds limit, processing partial data" + ); + const lines = buffer.split("\n"); + buffer = lines.pop() || ""; + + for (const line of lines) { + if (line.trim()) { + try { + processLine(line, { + controller, + encoder, + hasTextContent: () => hasTextContent, + setHasTextContent: (val) => (hasTextContent = val), + reasoningContent: () => reasoningContent, + appendReasoningContent: (content) => + (reasoningContent += content), + isReasoningComplete: () => isReasoningComplete, + setReasoningComplete: (val) => + (isReasoningComplete = val), + }); + } catch (error) { + console.error("Error processing line:", line, error); + // 如果解析失败,直接传递原始行 + controller.enqueue(encoder.encode(line + "\n")); + } + } + } + continue; + } + + // 处理缓冲区中完整的数据行 + const lines = buffer.split("\n"); + buffer = lines.pop() || ""; // 最后一行可能不完整,保留在缓冲区 + + for (const line of lines) { + if (!line.trim()) continue; + + try { + processLine(line, { + controller, + encoder, + hasTextContent: () => hasTextContent, + setHasTextContent: (val) => (hasTextContent = val), + reasoningContent: () => reasoningContent, + appendReasoningContent: (content) => + (reasoningContent += content), + isReasoningComplete: () => isReasoningComplete, + setReasoningComplete: (val) => (isReasoningComplete = val), + }); + } catch (error) { + console.error("Error processing line:", line, error); + // 如果解析失败,直接传递原始行 + controller.enqueue(encoder.encode(line + "\n")); + } + } + } + } catch (error) { + console.error("Stream error:", error); + controller.error(error); + } finally { + try { + reader.releaseLock(); + } catch (e) { + console.error("Error releasing reader lock:", e); + } + controller.close(); + } + }, + }); + + return new Response(stream, { + status: response.status, + statusText: response.statusText, + headers: { + "Content-Type": "text/event-stream", + "Cache-Control": "no-cache", + Connection: "keep-alive", + }, + }); + } + + return response; + } +} diff --git a/packages/core/src/transformer/reasoning.transformer.ts b/packages/core/src/transformer/reasoning.transformer.ts new file mode 100644 index 0000000..758d1a1 --- /dev/null +++ b/packages/core/src/transformer/reasoning.transformer.ts @@ -0,0 +1,250 @@ +import { UnifiedChatRequest } from "@/types/llm"; +import { Transformer, TransformerOptions } from "../types/transformer"; + +export class ReasoningTransformer implements Transformer { + static TransformerName = "reasoning"; + enable: any; + + constructor(private readonly options?: TransformerOptions) { + this.enable = this.options?.enable ?? true; + } + + async transformRequestIn( + request: UnifiedChatRequest + ): Promise { + if (!this.enable) { + request.thinking = { + type: "disabled", + budget_tokens: -1, + }; + request.enable_thinking = false; + return request; + } + if (request.reasoning) { + request.thinking = { + type: "enabled", + budget_tokens: request.reasoning.max_tokens, + }; + request.enable_thinking = true; + } + return request; + } + + async transformResponseOut(response: Response): Promise { + if (!this.enable) return response; + if (response.headers.get("Content-Type")?.includes("application/json")) { + const jsonResponse = await response.json(); + if (jsonResponse.choices[0]?.message.reasoning_content) { + jsonResponse.thinking = { + content: jsonResponse.choices[0]?.message.reasoning_content + } + } + // Handle non-streaming response if needed + return new Response(JSON.stringify(jsonResponse), { + status: response.status, + statusText: response.statusText, + headers: response.headers, + }); + } else if (response.headers.get("Content-Type")?.includes("stream")) { + if (!response.body) { + return response; + } + + const decoder = new TextDecoder(); + const encoder = new TextEncoder(); + let reasoningContent = ""; + let isReasoningComplete = false; + let buffer = ""; // Buffer for incomplete data + + const stream = new ReadableStream({ + async start(controller) { + const reader = response.body!.getReader(); + + // Process buffer function + const processBuffer = ( + buffer: string, + controller: ReadableStreamDefaultController, + encoder: TextEncoder + ) => { + const lines = buffer.split("\n"); + for (const line of lines) { + if (line.trim()) { + controller.enqueue(encoder.encode(line + "\n")); + } + } + }; + + // Process line function + const processLine = ( + line: string, + context: { + controller: ReadableStreamDefaultController; + encoder: typeof TextEncoder; + reasoningContent: () => string; + appendReasoningContent: (content: string) => void; + isReasoningComplete: () => boolean; + setReasoningComplete: (val: boolean) => void; + } + ) => { + const { controller, encoder } = context; + + this.logger?.debug({ line }, `Processing reason line`); + + if (line.startsWith("data: ") && line.trim() !== "data: [DONE]") { + try { + const data = JSON.parse(line.slice(6)); + console.log(JSON.stringify(data)) + + // Extract reasoning_content from delta + if (data.choices?.[0]?.delta?.reasoning_content) { + context.appendReasoningContent( + data.choices[0].delta.reasoning_content + ); + const thinkingChunk = { + ...data, + choices: [ + { + ...data.choices[0], + delta: { + ...data.choices[0].delta, + thinking: { + content: data.choices[0].delta.reasoning_content, + }, + }, + }, + ], + }; + delete thinkingChunk.choices[0].delta.reasoning_content; + const thinkingLine = `data: ${JSON.stringify( + thinkingChunk + )}\n\n`; + controller.enqueue(encoder.encode(thinkingLine)); + return; + } + + // Check if reasoning is complete (when delta has content but no reasoning_content) + if ( + (data.choices?.[0]?.delta?.content || + data.choices?.[0]?.delta?.tool_calls) && + context.reasoningContent() && + !context.isReasoningComplete() + ) { + context.setReasoningComplete(true); + const signature = Date.now().toString(); + + // Create a new chunk with thinking block + const thinkingChunk = { + ...data, + choices: [ + { + ...data.choices[0], + delta: { + ...data.choices[0].delta, + content: null, + thinking: { + content: context.reasoningContent(), + signature: signature, + }, + }, + }, + ], + }; + delete thinkingChunk.choices[0].delta.reasoning_content; + // Send the thinking chunk + const thinkingLine = `data: ${JSON.stringify( + thinkingChunk + )}\n\n`; + controller.enqueue(encoder.encode(thinkingLine)); + } + + if (data.choices?.[0]?.delta?.reasoning_content) { + delete data.choices[0].delta.reasoning_content; + } + + // Send the modified chunk + if ( + data.choices?.[0]?.delta && + Object.keys(data.choices[0].delta).length > 0 + ) { + if (context.isReasoningComplete()) { + data.choices[0].index++; + } + const modifiedLine = `data: ${JSON.stringify(data)}\n\n`; + controller.enqueue(encoder.encode(modifiedLine)); + } + } catch (e) { + // If JSON parsing fails, pass through the original line + controller.enqueue(encoder.encode(line + "\n")); + } + } else { + // Pass through non-data lines (like [DONE]) + controller.enqueue(encoder.encode(line + "\n")); + } + }; + + try { + while (true) { + const { done, value } = await reader.read(); + if (done) { + // Process remaining data in buffer + if (buffer.trim()) { + processBuffer(buffer, controller, encoder); + } + break; + } + + const chunk = decoder.decode(value, { stream: true }); + buffer += chunk; + + // Process complete lines from buffer + const lines = buffer.split("\n"); + buffer = lines.pop() || ""; // Keep incomplete line in buffer + + for (const line of lines) { + if (!line.trim()) continue; + + try { + processLine(line, { + controller, + encoder: encoder, + reasoningContent: () => reasoningContent, + appendReasoningContent: (content) => + (reasoningContent += content), + isReasoningComplete: () => isReasoningComplete, + setReasoningComplete: (val) => (isReasoningComplete = val), + }); + } catch (error) { + console.error("Error processing line:", line, error); + // Pass through original line if parsing fails + controller.enqueue(encoder.encode(line + "\n")); + } + } + } + } catch (error) { + console.error("Stream error:", error); + controller.error(error); + } finally { + try { + reader.releaseLock(); + } catch (e) { + console.error("Error releasing reader lock:", e); + } + controller.close(); + } + }, + }); + + return new Response(stream, { + status: response.status, + statusText: response.statusText, + headers: { + "Content-Type": "text/event-stream", + "Cache-Control": "no-cache", + Connection: "keep-alive", + }, + }); + } + + return response; + } +} diff --git a/packages/core/src/transformer/sampling.transformer.ts b/packages/core/src/transformer/sampling.transformer.ts new file mode 100644 index 0000000..09b7f16 --- /dev/null +++ b/packages/core/src/transformer/sampling.transformer.ts @@ -0,0 +1,41 @@ +import { UnifiedChatRequest } from "../types/llm"; +import { Transformer, TransformerOptions } from "../types/transformer"; + +export class SamplingTransformer implements Transformer { + static TransformerName = "sampling"; + + max_tokens: number; + temperature: number; + top_p: number; + top_k: number; + repetition_penalty: number; + + constructor(private readonly options?: TransformerOptions) { + this.max_tokens = this.options?.max_tokens; + this.temperature = this.options?.temperature; + this.top_p = this.options?.top_p; + this.top_k = this.options?.top_k; + this.repetition_penalty = this.options?.repetition_penalty; + } + + async transformRequestIn( + request: UnifiedChatRequest + ): Promise { + if (request.max_tokens && request.max_tokens > this.max_tokens) { + request.max_tokens = this.max_tokens; + } + if (typeof this.temperature !== "undefined") { + request.temperature = this.temperature; + } + if (typeof this.top_p !== "undefined") { + request.top_p = this.top_p; + } + if (typeof this.top_k !== "undefined") { + request.top_k = this.top_k; + } + if (typeof this.repetition_penalty !== "undefined") { + request.repetition_penalty = this.repetition_penalty; + } + return request; + } +} diff --git a/packages/core/src/transformer/streamoptions.transformer.ts b/packages/core/src/transformer/streamoptions.transformer.ts new file mode 100644 index 0000000..838321b --- /dev/null +++ b/packages/core/src/transformer/streamoptions.transformer.ts @@ -0,0 +1,16 @@ +import { UnifiedChatRequest } from "../types/llm"; +import { Transformer, TransformerOptions } from "../types/transformer"; + +export class StreamOptionsTransformer implements Transformer { + name = "streamoptions"; + + async transformRequestIn( + request: UnifiedChatRequest + ): Promise { + if (!request.stream) return request; + request.stream_options = { + include_usage: true, + }; + return request; + } +} diff --git a/packages/core/src/transformer/tooluse.transformer.ts b/packages/core/src/transformer/tooluse.transformer.ts new file mode 100644 index 0000000..09efa46 --- /dev/null +++ b/packages/core/src/transformer/tooluse.transformer.ts @@ -0,0 +1,223 @@ +import { UnifiedChatRequest } from "../types/llm"; +import { Transformer } from "../types/transformer"; + +export class TooluseTransformer implements Transformer { + name = "tooluse"; + + transformRequestIn(request: UnifiedChatRequest): UnifiedChatRequest { + request.messages.push({ + role: "system", + content: `Tool mode is active. The user expects you to proactively execute the most suitable tool to help complete the task. +Before invoking a tool, you must carefully evaluate whether it matches the current task. If no available tool is appropriate for the task, you MUST call the \`ExitTool\` to exit tool mode — this is the only valid way to terminate tool mode. +Always prioritize completing the user's task effectively and efficiently by using tools whenever appropriate.`, + }); + if (request.tools?.length) { + request.tool_choice = "required"; + request.tools.push({ + type: "function", + function: { + name: "ExitTool", + description: `Use this tool when you are in tool mode and have completed the task. This is the only valid way to exit tool mode. +IMPORTANT: Before using this tool, ensure that none of the available tools are applicable to the current task. You must evaluate all available options — only if no suitable tool can help you complete the task should you use ExitTool to terminate tool mode. +Examples: +1. Task: "Use a tool to summarize this document" — Do not use ExitTool if a summarization tool is available. +2. Task: "What’s the weather today?" — If no tool is available to answer, use ExitTool after reasoning that none can fulfill the task.`, + parameters: { + type: "object", + properties: { + response: { + type: "string", + description: + "Your response will be forwarded to the user exactly as returned — the tool will not modify or post-process it in any way.", + }, + }, + required: ["response"], + }, + }, + }); + } + return request; + } + + async transformResponseOut(response: Response): Promise { + if (response.headers.get("Content-Type")?.includes("application/json")) { + const jsonResponse = await response.json(); + if ( + jsonResponse?.choices?.[0]?.message.tool_calls?.length && + jsonResponse?.choices?.[0]?.message.tool_calls[0]?.function?.name === + "ExitTool" + ) { + const toolCall = jsonResponse?.choices[0]?.message.tool_calls[0]; + const toolArguments = JSON.parse(toolCall.function.arguments || "{}"); + jsonResponse.choices[0].message.content = toolArguments.response || ""; + delete jsonResponse.choices[0].message.tool_calls; + } + + // Handle non-streaming response if needed + return new Response(JSON.stringify(jsonResponse), { + status: response.status, + statusText: response.statusText, + headers: response.headers, + }); + } else if (response.headers.get("Content-Type")?.includes("stream")) { + if (!response.body) { + return response; + } + + const decoder = new TextDecoder(); + const encoder = new TextEncoder(); + let exitToolIndex = -1; + let exitToolResponse = ""; + let buffer = ""; // 用于缓冲不完整的数据 + + const stream = new ReadableStream({ + async start(controller) { + const reader = response.body!.getReader(); + + const processBuffer = ( + buffer: string, + controller: ReadableStreamDefaultController, + encoder: TextEncoder + ) => { + const lines = buffer.split("\n"); + for (const line of lines) { + if (line.trim()) { + controller.enqueue(encoder.encode(line + "\n")); + } + } + }; + + const processLine = ( + line: string, + context: { + controller: ReadableStreamDefaultController; + encoder: TextEncoder; + exitToolIndex: () => number; + setExitToolIndex: (val: number) => void; + exitToolResponse: () => string; + appendExitToolResponse: (content: string) => void; + } + ) => { + const { + controller, + encoder, + exitToolIndex, + setExitToolIndex, + appendExitToolResponse, + } = context; + + if ( + line.startsWith("data: ") && + line.trim() !== "data: [DONE]" + ) { + try { + const data = JSON.parse(line.slice(6)); + + if (data.choices[0]?.delta?.tool_calls?.length) { + const toolCall = data.choices[0].delta.tool_calls[0]; + + if (toolCall.function?.name === "ExitTool") { + setExitToolIndex(toolCall.index); + return; + } else if ( + exitToolIndex() > -1 && + toolCall.index === exitToolIndex() && + toolCall.function.arguments + ) { + appendExitToolResponse(toolCall.function.arguments); + try { + const response = JSON.parse(context.exitToolResponse()); + data.choices = [ + { + delta: { + role: "assistant", + content: response.response || "", + }, + }, + ]; + const modifiedLine = `data: ${JSON.stringify( + data + )}\n\n`; + controller.enqueue(encoder.encode(modifiedLine)); + } catch (e) {} + return; + } + } + + if ( + data.choices?.[0]?.delta && + Object.keys(data.choices[0].delta).length > 0 + ) { + const modifiedLine = `data: ${JSON.stringify(data)}\n\n`; + controller.enqueue(encoder.encode(modifiedLine)); + } + } catch (e) { + // If JSON parsing fails, pass through the original line + controller.enqueue(encoder.encode(line + "\n")); + } + } else { + // Pass through non-data lines (like [DONE]) + controller.enqueue(encoder.encode(line + "\n")); + } + }; + + try { + while (true) { + const { done, value } = await reader.read(); + if (done) { + if (buffer.trim()) { + processBuffer(buffer, controller, encoder); + } + break; + } + const chunk = decoder.decode(value, { stream: true }); + buffer += chunk; + const lines = buffer.split("\n"); + buffer = lines.pop() || ""; + for (const line of lines) { + if (!line.trim()) continue; + try { + processLine(line, { + controller, + encoder, + exitToolIndex: () => exitToolIndex, + setExitToolIndex: (val) => (exitToolIndex = val), + exitToolResponse: () => exitToolResponse, + appendExitToolResponse: (content) => + (exitToolResponse += content), + }); + } catch (error) { + console.error("Error processing line:", line, error); + // 如果解析失败,直接传递原始行 + controller.enqueue(encoder.encode(line + "\n")); + } + } + } + } catch (error) { + console.error("Stream error:", error); + controller.error(error); + } finally { + try { + reader.releaseLock(); + } catch (e) { + console.error("Error releasing reader lock:", e); + } + controller.close(); + } + }, + }); + + return new Response(stream, { + status: response.status, + statusText: response.statusText, + headers: { + "Content-Type": "text/event-stream", + "Cache-Control": "no-cache", + Connection: "keep-alive", + }, + }); + } + + return response; + } +} diff --git a/packages/core/src/transformer/vercel.transformer.ts b/packages/core/src/transformer/vercel.transformer.ts new file mode 100644 index 0000000..0fa4616 --- /dev/null +++ b/packages/core/src/transformer/vercel.transformer.ts @@ -0,0 +1,358 @@ +import { UnifiedChatRequest } from "@/types/llm"; +import { Transformer, TransformerOptions } from "../types/transformer"; +import { v4 as uuidv4 } from "uuid"; + +export class VercelTransformer implements Transformer { + static TransformerName = "vercel"; + endPoint = "/v1/chat/completions"; + + constructor(private readonly options?: TransformerOptions) {} + + async transformRequestIn( + request: UnifiedChatRequest + ): Promise { + if (!request.model.includes("claude")) { + request.messages.forEach((msg) => { + if (Array.isArray(msg.content)) { + msg.content.forEach((item: any) => { + if (item.cache_control) { + delete item.cache_control; + } + if (item.type === "image_url") { + if (!item.image_url.url.startsWith("http")) { + item.image_url.url = `data:${item.media_type};base64,${item.image_url.url}`; + } + delete item.media_type; + } + }); + } else if (msg.cache_control) { + delete msg.cache_control; + } + }); + } else { + request.messages.forEach((msg) => { + if (Array.isArray(msg.content)) { + msg.content.forEach((item: any) => { + if (item.type === "image_url") { + if (!item.image_url.url.startsWith("http")) { + item.image_url.url = `data:${item.media_type};base64,${item.image_url.url}`; + } + delete item.media_type; + } + }); + } + }); + } + Object.assign(request, this.options || {}); + return request; + } + + async transformResponseOut(response: Response): Promise { + if (response.headers.get("Content-Type")?.includes("application/json")) { + const jsonResponse = await response.json(); + return new Response(JSON.stringify(jsonResponse), { + status: response.status, + statusText: response.statusText, + headers: response.headers, + }); + } else if (response.headers.get("Content-Type")?.includes("stream")) { + if (!response.body) { + return response; + } + + const decoder = new TextDecoder(); + const encoder = new TextEncoder(); + + let hasTextContent = false; + let reasoningContent = ""; + let isReasoningComplete = false; + let hasToolCall = false; + let buffer = ""; // Buffer for incomplete data + + const stream = new ReadableStream({ + async start(controller) { + const reader = response.body!.getReader(); + const processBuffer = ( + buffer: string, + controller: ReadableStreamDefaultController, + encoder: TextEncoder + ) => { + const lines = buffer.split("\n"); + for (const line of lines) { + if (line.trim()) { + controller.enqueue(encoder.encode(line + "\n")); + } + } + }; + + const processLine = ( + line: string, + context: { + controller: ReadableStreamDefaultController; + encoder: TextEncoder; + hasTextContent: () => boolean; + setHasTextContent: (val: boolean) => void; + reasoningContent: () => string; + appendReasoningContent: (content: string) => void; + isReasoningComplete: () => boolean; + setReasoningComplete: (val: boolean) => void; + } + ) => { + const { controller, encoder } = context; + + if (line.startsWith("data: ") && line.trim() !== "data: [DONE]") { + const jsonStr = line.slice(6); + try { + const data = JSON.parse(jsonStr); + if (data.usage) { + this.logger?.debug( + { usage: data.usage, hasToolCall }, + "usage" + ); + data.choices[0].finish_reason = hasToolCall + ? "tool_calls" + : "stop"; + } + + if (data.choices?.[0]?.finish_reason === "error") { + controller.enqueue( + encoder.encode( + `data: ${JSON.stringify({ + error: data.choices?.[0].error, + })}\n\n` + ) + ); + } + + if ( + data.choices?.[0]?.delta?.content && + !context.hasTextContent() + ) { + context.setHasTextContent(true); + } + + // Extract reasoning_content from delta + if (data.choices?.[0]?.delta?.reasoning) { + context.appendReasoningContent( + data.choices[0].delta.reasoning + ); + const thinkingChunk = { + ...data, + choices: [ + { + ...data.choices?.[0], + delta: { + ...data.choices[0].delta, + thinking: { + content: data.choices[0].delta.reasoning, + }, + }, + }, + ], + }; + if (thinkingChunk.choices?.[0]?.delta) { + delete thinkingChunk.choices[0].delta.reasoning; + } + const thinkingLine = `data: ${JSON.stringify( + thinkingChunk + )}\n\n`; + controller.enqueue(encoder.encode(thinkingLine)); + return; + } + + // Check if reasoning is complete + if ( + data.choices?.[0]?.delta?.content && + context.reasoningContent() && + !context.isReasoningComplete() + ) { + context.setReasoningComplete(true); + const signature = Date.now().toString(); + + const thinkingChunk = { + ...data, + choices: [ + { + ...data.choices?.[0], + delta: { + ...data.choices[0].delta, + content: null, + thinking: { + content: context.reasoningContent(), + signature: signature, + }, + }, + }, + ], + }; + if (thinkingChunk.choices?.[0]?.delta) { + delete thinkingChunk.choices[0].delta.reasoning; + } + const thinkingLine = `data: ${JSON.stringify( + thinkingChunk + )}\n\n`; + controller.enqueue(encoder.encode(thinkingLine)); + } + + if (data.choices?.[0]?.delta?.reasoning) { + delete data.choices[0].delta.reasoning; + } + if ( + data.choices?.[0]?.delta?.tool_calls?.length && + !Number.isNaN( + parseInt(data.choices?.[0]?.delta?.tool_calls[0].id, 10) + ) + ) { + data.choices?.[0]?.delta?.tool_calls.forEach((tool: any) => { + tool.id = `call_${uuidv4()}`; + }); + } + + if ( + data.choices?.[0]?.delta?.tool_calls?.length && + !hasToolCall + ) { + hasToolCall = true; + } + + if ( + data.choices?.[0]?.delta?.tool_calls?.length && + context.hasTextContent() + ) { + if (typeof data.choices[0].index === "number") { + data.choices[0].index += 1; + } else { + data.choices[0].index = 1; + } + } + + const modifiedLine = `data: ${JSON.stringify(data)}\n\n`; + controller.enqueue(encoder.encode(modifiedLine)); + } catch (e) { + // If JSON parsing fails, data might be incomplete, pass through the original line + controller.enqueue(encoder.encode(line + "\n")); + } + } else { + // Pass through non-data lines (like [DONE]) + controller.enqueue(encoder.encode(line + "\n")); + } + }; + + try { + while (true) { + const { done, value } = await reader.read(); + if (done) { + // Process remaining data in buffer + if (buffer.trim()) { + processBuffer(buffer, controller, encoder); + } + break; + } + + // Check if value is valid + if (!value || value.length === 0) { + continue; + } + + let chunk; + try { + chunk = decoder.decode(value, { stream: true }); + } catch (decodeError) { + console.warn("Failed to decode chunk", decodeError); + continue; + } + + if (chunk.length === 0) { + continue; + } + + buffer += chunk; + + // Process buffer if it gets too large to avoid memory leaks + if (buffer.length > 1000000) { + // 1MB limit + console.warn( + "Buffer size exceeds limit, processing partial data" + ); + const lines = buffer.split("\n"); + buffer = lines.pop() || ""; + + for (const line of lines) { + if (line.trim()) { + try { + processLine(line, { + controller, + encoder, + hasTextContent: () => hasTextContent, + setHasTextContent: (val) => (hasTextContent = val), + reasoningContent: () => reasoningContent, + appendReasoningContent: (content) => + (reasoningContent += content), + isReasoningComplete: () => isReasoningComplete, + setReasoningComplete: (val) => + (isReasoningComplete = val), + }); + } catch (error) { + console.error("Error processing line:", line, error); + // If parsing fails, pass through the original line + controller.enqueue(encoder.encode(line + "\n")); + } + } + } + continue; + } + + // Process complete lines in buffer + const lines = buffer.split("\n"); + buffer = lines.pop() || ""; // Last line might be incomplete, keep in buffer + + for (const line of lines) { + if (!line.trim()) continue; + + try { + processLine(line, { + controller, + encoder, + hasTextContent: () => hasTextContent, + setHasTextContent: (val) => (hasTextContent = val), + reasoningContent: () => reasoningContent, + appendReasoningContent: (content) => + (reasoningContent += content), + isReasoningComplete: () => isReasoningComplete, + setReasoningComplete: (val) => (isReasoningComplete = val), + }); + } catch (error) { + console.error("Error processing line:", line, error); + // If parsing fails, pass through the original line + controller.enqueue(encoder.encode(line + "\n")); + } + } + } + } catch (error) { + console.error("Stream error:", error); + controller.error(error); + } finally { + try { + reader.releaseLock(); + } catch (e) { + console.error("Error releasing reader lock:", e); + } + controller.close(); + } + }, + }); + + return new Response(stream, { + status: response.status, + statusText: response.statusText, + headers: { + "Content-Type": "text/event-stream", + "Cache-Control": "no-cache", + Connection: "keep-alive", + }, + }); + } + + return response; + } +} \ No newline at end of file diff --git a/packages/core/src/transformer/vertex-claude.transformer.ts b/packages/core/src/transformer/vertex-claude.transformer.ts new file mode 100644 index 0000000..a5b2546 --- /dev/null +++ b/packages/core/src/transformer/vertex-claude.transformer.ts @@ -0,0 +1,81 @@ +import { LLMProvider, UnifiedChatRequest } from "../types/llm"; +import { Transformer } from "../types/transformer"; +import { + buildRequestBody, + transformRequestOut, + transformResponseOut, +} from "../utils/vertex-claude.util"; + +async function getAccessToken(): Promise { + try { + const { GoogleAuth } = await import('google-auth-library'); + + const auth = new GoogleAuth({ + scopes: ['https://www.googleapis.com/auth/cloud-platform'] + }); + + const client = await auth.getClient(); + const accessToken = await client.getAccessToken(); + return accessToken.token || ''; + } catch (error) { + console.error('Error getting access token:', error); + throw new Error('Failed to get access token for Vertex AI. Please ensure you have set up authentication using one of these methods:\n' + + '1. Set GOOGLE_APPLICATION_CREDENTIALS to point to service account key file\n' + + '2. Run "gcloud auth application-default login"\n' + + '3. Use Google Cloud environment with default service account'); + } +} + + + +export class VertexClaudeTransformer implements Transformer { + name = "vertex-claude"; + + async transformRequestIn( + request: UnifiedChatRequest, + provider: LLMProvider + ): Promise> { + let projectId = process.env.GOOGLE_CLOUD_PROJECT; + const location = process.env.GOOGLE_CLOUD_LOCATION || 'us-east5'; + + if (!projectId && process.env.GOOGLE_APPLICATION_CREDENTIALS) { + try { + const fs = await import('fs'); + const keyContent = fs.readFileSync(process.env.GOOGLE_APPLICATION_CREDENTIALS, 'utf8'); + const credentials = JSON.parse(keyContent); + if (credentials && credentials.project_id) { + projectId = credentials.project_id; + } + } catch (error) { + console.error('Error extracting project_id from GOOGLE_APPLICATION_CREDENTIALS:', error); + } + } + + if (!projectId) { + throw new Error('Project ID is required for Vertex AI. Set GOOGLE_CLOUD_PROJECT environment variable or ensure project_id is in GOOGLE_APPLICATION_CREDENTIALS file.'); + } + + const accessToken = await getAccessToken(); + return { + body: buildRequestBody(request), + config: { + url: new URL( + `/v1/projects/${projectId}/locations/${location}/publishers/anthropic/models/${request.model}:${request.stream ? "streamRawPredict" : "rawPredict"}`, + `https://${location}-aiplatform.googleapis.com` + ).toString(), + headers: { + "Authorization": `Bearer ${accessToken}`, + "Content-Type": "application/json", + }, + }, + }; + } + + async transformRequestOut(request: Record): Promise { + return transformRequestOut(request); + } + + async transformResponseOut(response: Response): Promise { + return transformResponseOut(response, this.name, this.logger); + } +} diff --git a/packages/core/src/transformer/vertex-gemini.transformer.ts b/packages/core/src/transformer/vertex-gemini.transformer.ts new file mode 100644 index 0000000..a99fea6 --- /dev/null +++ b/packages/core/src/transformer/vertex-gemini.transformer.ts @@ -0,0 +1,79 @@ +import { LLMProvider, UnifiedChatRequest } from "../types/llm"; +import { Transformer } from "../types/transformer"; +import { + buildRequestBody, + transformRequestOut, + transformResponseOut, +} from "../utils/gemini.util"; + +async function getAccessToken(): Promise { + try { + const { GoogleAuth } = await import('google-auth-library'); + + const auth = new GoogleAuth({ + scopes: ['https://www.googleapis.com/auth/cloud-platform'] + }); + + const client = await auth.getClient(); + const accessToken = await client.getAccessToken(); + return accessToken.token || ''; + } catch (error) { + console.error('Error getting access token:', error); + throw new Error('Failed to get access token for Vertex AI. Please ensure you have set up authentication using one of these methods:\n' + + '1. Set GOOGLE_APPLICATION_CREDENTIALS to point to service account key file\n' + + '2. Run "gcloud auth application-default login"\n' + + '3. Use Google Cloud environment with default service account'); + } +} + +export class VertexGeminiTransformer implements Transformer { + name = "vertex-gemini"; + + async transformRequestIn( + request: UnifiedChatRequest, + provider: LLMProvider + ): Promise> { + let projectId = process.env.GOOGLE_CLOUD_PROJECT; + const location = process.env.GOOGLE_CLOUD_LOCATION || 'us-central1'; + + if (!projectId && process.env.GOOGLE_APPLICATION_CREDENTIALS) { + try { + const fs = await import('fs'); + const keyContent = fs.readFileSync(process.env.GOOGLE_APPLICATION_CREDENTIALS, 'utf8'); + const credentials = JSON.parse(keyContent); + if (credentials && credentials.project_id) { + projectId = credentials.project_id; + } + } catch (error) { + console.error('Error extracting project_id from GOOGLE_APPLICATION_CREDENTIALS:', error); + } + } + + if (!projectId) { + throw new Error('Project ID is required for Vertex AI. Set GOOGLE_CLOUD_PROJECT environment variable or ensure project_id is in GOOGLE_APPLICATION_CREDENTIALS file.'); + } + + const accessToken = await getAccessToken(); + return { + body: buildRequestBody(request), + config: { + url: new URL( + `./v1beta1/projects/${projectId}/locations/${location}/publishers/google/models/${request.model}:${request.stream ? "streamGenerateContent" : "generateContent"}`, + provider.baseUrl.endsWith('/') ? provider.baseUrl : provider.baseUrl + '/' || `https://${location}-aiplatform.googleapis.com` + ), + headers: { + "Authorization": `Bearer ${accessToken}`, + "x-goog-api-key": undefined, + }, + }, + }; + } + + async transformRequestOut(request: Record): Promise { + return transformRequestOut(request); + } + + async transformResponseOut(response: Response): Promise { + return transformResponseOut(response, this.name); + } +} diff --git a/packages/core/src/types/llm.ts b/packages/core/src/types/llm.ts new file mode 100644 index 0000000..20ef0d3 --- /dev/null +++ b/packages/core/src/types/llm.ts @@ -0,0 +1,239 @@ +import type { ChatCompletionMessageParam as OpenAIMessage } from "openai/resources/chat/completions"; +import type { MessageParam as AnthropicMessage } from "@anthropic-ai/sdk/resources/messages"; +import type { + ChatCompletion, + ChatCompletionChunk, +} from "openai/resources/chat/completions"; +import type { + Message, + MessageStreamEvent, +} from "@anthropic-ai/sdk/resources/messages"; +import type { ChatCompletionTool } from "openai/resources/chat/completions"; +import type { Tool as AnthropicTool } from "@anthropic-ai/sdk/resources/messages"; +import { Transformer } from "./transformer"; + +export interface UrlCitation { + url: string; + title: string; + content: string; + start_index: number; + end_index: number; +} +export interface Annotation { + type: "url_citation"; + url_citation?: UrlCitation; +} + +// 内容类型定义 +export interface TextContent { + type: "text"; + text: string; + cache_control?: { + type?: string; + }; +} + +export interface ImageContent { + type: "image_url"; + image_url: { + url: string; + }; + media_type: string; +} + +export type MessageContent = TextContent | ImageContent; + +// 统一的消息接口 +export interface UnifiedMessage { + role: "user" | "assistant" | "system" | "tool"; + content: string | null | MessageContent[]; + tool_calls?: Array<{ + id: string; + type: "function"; + function: { + name: string; + arguments: string; + }; + }>; + tool_call_id?: string; + cache_control?: { + type?: string; + }; + thinking?: { + content: string; + signature?: string; + }; +} + +// 统一的工具定义接口 +export interface UnifiedTool { + type: "function"; + function: { + name: string; + description: string; + parameters: { + type: "object"; + properties: Record; + required?: string[]; + additionalProperties?: boolean; + $schema?: string; + }; + }; +} + +export type ThinkLevel = "none" | "low" | "medium" | "high"; + +// 统一的请求接口 +export interface UnifiedChatRequest { + messages: UnifiedMessage[]; + model: string; + max_tokens?: number; + temperature?: number; + stream?: boolean; + tools?: UnifiedTool[]; + tool_choice?: + | "auto" + | "none" + | "required" + | string + | { type: "function"; function: { name: string } }; + reasoning?: { + // OpenAI-style + effort?: ThinkLevel; + + // Anthropic-style + max_tokens?: number; + + enabled?: boolean; + }; +} + +// 统一的响应接口 +export interface UnifiedChatResponse { + id: string; + model: string; + content: string | null; + usage?: { + prompt_tokens: number; + completion_tokens: number; + total_tokens: number; + }; + tool_calls?: Array<{ + id: string; + type: "function"; + function: { + name: string; + arguments: string; + }; + }>; + annotations?: Annotation[]; +} + +// 流式响应相关类型 +export interface StreamChunk { + id: string; + object: string; + created: number; + model: string; + choices?: Array<{ + index: number; + delta: { + role?: string; + content?: string; + thinking?: { + content?: string; + signature?: string; + }; + tool_calls?: Array<{ + id?: string; + type?: "function"; + function?: { + name?: string; + arguments?: string; + }; + }>; + annotations?: Annotation[]; + }; + finish_reason?: string | null; + }>; +} + +// Anthropic 流式事件类型 +export type AnthropicStreamEvent = MessageStreamEvent; + +// OpenAI 流式块类型 +export type OpenAIStreamChunk = ChatCompletionChunk; + +// OpenAI 特定类型 +export interface OpenAIChatRequest { + messages: OpenAIMessage[]; + model: string; + max_tokens?: number; + temperature?: number; + stream?: boolean; + tools?: ChatCompletionTool[]; + tool_choice?: + | "auto" + | "none" + | { type: "function"; function: { name: string } }; +} + +// Anthropic 特定类型 +export interface AnthropicChatRequest { + messages: AnthropicMessage[]; + model: string; + max_tokens: number; + temperature?: number; + stream?: boolean; + system?: string; + tools?: AnthropicTool[]; + tool_choice?: { type: "auto" } | { type: "tool"; name: string }; +} + +// 转换选项 +export interface ConversionOptions { + targetProvider: "openai" | "anthropic"; + sourceProvider: "openai" | "anthropic"; +} + +export interface LLMProvider { + name: string; + baseUrl: string; + apiKey: string; + models: string[]; + transformer?: { + [key: string]: { + use?: Transformer[]; + }; + } & { + use?: Transformer[]; + }; +} + +export type RegisterProviderRequest = LLMProvider; + +export interface ModelRoute { + provider: string; + model: string; + fullModel: string; +} + +export interface RequestRouteInfo { + provider: LLMProvider; + originalModel: string; + targetModel: string; +} + +export interface ConfigProvider { + name: string; + api_base_url: string; + api_key: string; + models: string[]; + transformer: { + use?: string[] | Array[]; + } & { + [key: string]: { + use?: string[] | Array[]; + }; + }; +} diff --git a/packages/core/src/types/transformer.ts b/packages/core/src/types/transformer.ts new file mode 100644 index 0000000..4337c77 --- /dev/null +++ b/packages/core/src/types/transformer.ts @@ -0,0 +1,43 @@ +import { LLMProvider, UnifiedChatRequest } from "./llm"; + +export interface TransformerOptions { + [key: string]: any; +} + +interface TransformerWithStaticName { + new (options?: TransformerOptions): Transformer; + TransformerName?: string; +} + + +interface TransformerWithInstanceName { + new (): Transformer; + name?: never; +} + +export type TransformerConstructor = TransformerWithStaticName; + +export interface TransformerContext { + [key: string]: any; +} + +export type Transformer = { + transformRequestIn?: ( + request: UnifiedChatRequest, + provider: LLMProvider, + context: TransformerContext, + ) => Promise>; + transformResponseIn?: (response: Response, context?: TransformerContext) => Promise; + + // 将请求格式转换为通用的格式 + transformRequestOut?: (request: any, context: TransformerContext) => Promise; + // 将相应格式转换为通用的格式 + transformResponseOut?: (response: Response, context: TransformerContext) => Promise; + + endPoint?: string; + name?: string; + auth?: (request: any, provider: LLMProvider, context: TransformerContext) => Promise; + + // Logger for transformer + logger?: any; +}; diff --git a/packages/core/src/utils/converter.ts b/packages/core/src/utils/converter.ts new file mode 100644 index 0000000..f766aa6 --- /dev/null +++ b/packages/core/src/utils/converter.ts @@ -0,0 +1,478 @@ +import type { ChatCompletionMessageParam as OpenAIMessage } from "openai/resources/chat/completions"; +import type { MessageParam as AnthropicMessage } from "@anthropic-ai/sdk/resources/messages"; +import type { ChatCompletionTool } from "openai/resources/chat/completions"; +import type { Tool as AnthropicTool } from "@anthropic-ai/sdk/resources/messages"; +import { + UnifiedMessage, + UnifiedChatRequest, + UnifiedTool, + OpenAIChatRequest, + AnthropicChatRequest, + ConversionOptions, +} from "../types/llm"; + +// Simple logger function +function log(...args: any[]) { + // Can be extended to use a proper logger + console.log(...args); +} + +export function convertToolsToOpenAI( + tools: UnifiedTool[] +): ChatCompletionTool[] { + return tools.map((tool) => ({ + type: "function" as const, + function: { + name: tool.function.name, + description: tool.function.description, + parameters: tool.function.parameters, + }, + })); +} + +export function convertToolsToAnthropic(tools: UnifiedTool[]): AnthropicTool[] { + return tools.map((tool) => ({ + name: tool.function.name, + description: tool.function.description, + input_schema: tool.function.parameters, + })); +} + +export function convertToolsFromOpenAI( + tools: ChatCompletionTool[] +): UnifiedTool[] { + return tools.map((tool) => ({ + type: "function" as const, + function: { + name: tool.function.name, + description: tool.function.description || "", + parameters: tool.function.parameters as any, + }, + })); +} + +export function convertToolsFromAnthropic( + tools: AnthropicTool[] +): UnifiedTool[] { + return tools.map((tool) => ({ + type: "function" as const, + function: { + name: tool.name, + description: tool.description || "", + parameters: tool.input_schema as any, + }, + })); +} + +export function convertToOpenAI( + request: UnifiedChatRequest +): OpenAIChatRequest { + const messages: OpenAIMessage[] = []; + const toolResponsesQueue: Map = new Map(); // 用于存储工具响应 + + request.messages.forEach((msg) => { + if (msg.role === "tool" && msg.tool_call_id) { + if (!toolResponsesQueue.has(msg.tool_call_id)) { + toolResponsesQueue.set(msg.tool_call_id, []); + } + toolResponsesQueue.get(msg.tool_call_id).push({ + role: "tool", + content: msg.content, + tool_call_id: msg.tool_call_id, + }); + } + }); + + for (let i = 0; i < request.messages.length; i++) { + const msg = request.messages[i]; + + if (msg.role === "tool") { + continue; + } + + const message: any = { + role: msg.role, + content: msg.content, + }; + + if (msg.tool_calls && msg.tool_calls.length > 0) { + message.tool_calls = msg.tool_calls; + if (message.content === null) { + message.content = null; + } + } + + messages.push(message); + + if ( + msg.role === "assistant" && + msg.tool_calls && + msg.tool_calls.length > 0 + ) { + for (const toolCall of msg.tool_calls) { + if (toolResponsesQueue.has(toolCall.id)) { + const responses = toolResponsesQueue.get(toolCall.id); + + responses.forEach((response) => { + messages.push(response); + }); + + toolResponsesQueue.delete(toolCall.id); + } else { + messages.push({ + role: "tool", + content: JSON.stringify({ + success: true, + message: "Tool call executed successfully", + tool_call_id: toolCall.id, + }), + tool_call_id: toolCall.id, + } as any); + } + } + } + } + + if (toolResponsesQueue.size > 0) { + for (const [id, responses] of toolResponsesQueue.entries()) { + responses.forEach((response) => { + messages.push(response); + }); + } + } + + const result: any = { + messages, + model: request.model, + max_tokens: request.max_tokens, + temperature: request.temperature, + stream: request.stream, + }; + + if (request.tools && request.tools.length > 0) { + result.tools = convertToolsToOpenAI(request.tools); + if (request.tool_choice) { + if (request.tool_choice === "auto" || request.tool_choice === "none") { + result.tool_choice = request.tool_choice; + } else { + result.tool_choice = { + type: "function", + function: { name: request.tool_choice }, + }; + } + } + } + + return result; +} + + + +function isToolCallContent(content: string): boolean { + try { + const parsed = JSON.parse(content); + return ( + Array.isArray(parsed) && + parsed.some((item) => item.type === "tool_use" && item.id && item.name) + ); + } catch { + return false; + } +} + +export function convertFromOpenAI( + request: OpenAIChatRequest +): UnifiedChatRequest { + const messages: UnifiedMessage[] = request.messages.map((msg) => { + if ( + msg.role === "assistant" && + typeof msg.content === "string" && + isToolCallContent(msg.content) + ) { + try { + const toolCalls = JSON.parse(msg.content); + const convertedToolCalls = toolCalls.map((call: any) => ({ + id: call.id, + type: "function" as const, + function: { + name: call.name, + arguments: JSON.stringify(call.input || {}), + }, + })); + + return { + role: msg.role as "user" | "assistant" | "system", + content: null, + tool_calls: convertedToolCalls, + }; + } catch (error) { + return { + role: msg.role as "user" | "assistant" | "system", + content: msg.content, + }; + } + } + + if (msg.role === "tool") { + return { + role: msg.role as "tool", + content: + typeof msg.content === "string" + ? msg.content + : JSON.stringify(msg.content), + tool_call_id: (msg as any).tool_call_id, + }; + } + + return { + role: msg.role as "user" | "assistant" | "system", + content: + typeof msg.content === "string" + ? msg.content + : JSON.stringify(msg.content), + ...((msg as any).tool_calls && { tool_calls: (msg as any).tool_calls }), + }; + }); + + const result: UnifiedChatRequest = { + messages, + model: request.model, + max_tokens: request.max_tokens, + temperature: request.temperature, + stream: request.stream, + }; + + if (request.tools && request.tools.length > 0) { + result.tools = convertToolsFromOpenAI(request.tools); + + if (request.tool_choice) { + if (typeof request.tool_choice === "string") { + result.tool_choice = request.tool_choice; + } else if (request.tool_choice.type === "function") { + result.tool_choice = request.tool_choice.function.name; + } + } + } + + return result; +} + +export function convertFromAnthropic( + request: AnthropicChatRequest +): UnifiedChatRequest { + const messages: UnifiedMessage[] = []; + + if (request.system) { + messages.push({ + role: "system", + content: request.system, + }); + } + const pendingToolCalls: any[] = []; + const pendingTextContent: string[] = []; + let lastRole: string | null = null; + + for (let i = 0; i < request.messages.length; i++) { + const msg = request.messages[i]; + + if (typeof msg.content === "string") { + if ( + lastRole === "assistant" && + pendingToolCalls.length > 0 && + msg.role !== "assistant" + ) { + const assistantMessage: UnifiedMessage = { + role: "assistant", + content: pendingTextContent.join("") || null, + tool_calls: + pendingToolCalls.length > 0 ? pendingToolCalls : undefined, + }; + if (assistantMessage.tool_calls && pendingTextContent.length === 0) { + assistantMessage.content = null; + } + messages.push(assistantMessage); + pendingToolCalls.length = 0; + pendingTextContent.length = 0; + } + + messages.push({ + role: msg.role, + content: msg.content, + }); + } else if (Array.isArray(msg.content)) { + const textBlocks: string[] = []; + const toolCalls: any[] = []; + const toolResults: any[] = []; + + msg.content.forEach((block) => { + if (block.type === "text") { + textBlocks.push(block.text); + } else if (block.type === "tool_use") { + toolCalls.push({ + id: block.id, + type: "function" as const, + function: { + name: block.name, + arguments: JSON.stringify(block.input || {}), + }, + }); + } else if (block.type === "tool_result") { + toolResults.push(block); + } + }); + + if (toolResults.length > 0) { + if (lastRole === "assistant" && pendingToolCalls.length > 0) { + const assistantMessage: UnifiedMessage = { + role: "assistant", + content: pendingTextContent.join("") || null, + tool_calls: pendingToolCalls, + }; + if (pendingTextContent.length === 0) { + assistantMessage.content = null; + } + messages.push(assistantMessage); + pendingToolCalls.length = 0; + pendingTextContent.length = 0; + } + + toolResults.forEach((toolResult) => { + messages.push({ + role: "tool", + content: + typeof toolResult.content === "string" + ? toolResult.content + : JSON.stringify(toolResult.content), + tool_call_id: toolResult.tool_use_id, + }); + }); + } else if (msg.role === "assistant") { + if (lastRole === "assistant") { + pendingToolCalls.push(...toolCalls); + pendingTextContent.push(...textBlocks); + } else { + if (pendingToolCalls.length > 0) { + const prevAssistantMessage: UnifiedMessage = { + role: "assistant", + content: pendingTextContent.join("") || null, + tool_calls: pendingToolCalls, + }; + if (pendingTextContent.length === 0) { + prevAssistantMessage.content = null; + } + messages.push(prevAssistantMessage); + } + + pendingToolCalls.length = 0; + pendingTextContent.length = 0; + pendingToolCalls.push(...toolCalls); + pendingTextContent.push(...textBlocks); + } + } else { + if (lastRole === "assistant" && pendingToolCalls.length > 0) { + const assistantMessage: UnifiedMessage = { + role: "assistant", + content: pendingTextContent.join("") || null, + tool_calls: pendingToolCalls, + }; + if (pendingTextContent.length === 0) { + assistantMessage.content = null; + } + messages.push(assistantMessage); + pendingToolCalls.length = 0; + pendingTextContent.length = 0; + } + + const message: UnifiedMessage = { + role: msg.role, + content: textBlocks.join("") || null, + }; + + if (toolCalls.length > 0) { + message.tool_calls = toolCalls; + if (textBlocks.length === 0) { + message.content = null; + } + } + + messages.push(message); + } + } else { + if (lastRole === "assistant" && pendingToolCalls.length > 0) { + const assistantMessage: UnifiedMessage = { + role: "assistant", + content: pendingTextContent.join("") || null, + tool_calls: pendingToolCalls, + }; + if (pendingTextContent.length === 0) { + assistantMessage.content = null; + } + messages.push(assistantMessage); + pendingToolCalls.length = 0; + pendingTextContent.length = 0; + } + + messages.push({ + role: msg.role, + content: JSON.stringify(msg.content), + }); + } + + lastRole = msg.role; + } + + if (lastRole === "assistant" && pendingToolCalls.length > 0) { + const assistantMessage: UnifiedMessage = { + role: "assistant", + content: pendingTextContent.join("") || null, + tool_calls: pendingToolCalls, + }; + if (pendingTextContent.length === 0) { + assistantMessage.content = null; + } + messages.push(assistantMessage); + } + + const result: UnifiedChatRequest = { + messages, + model: request.model, + max_tokens: request.max_tokens, + temperature: request.temperature, + stream: request.stream, + }; + + if (request.tools && request.tools.length > 0) { + result.tools = convertToolsFromAnthropic(request.tools); + + if (request.tool_choice) { + if (request.tool_choice.type === "auto") { + result.tool_choice = "auto"; + } else if (request.tool_choice.type === "tool") { + result.tool_choice = request.tool_choice.name; + } + } + } + + return result; +} + +export function convertRequest( + request: OpenAIChatRequest | AnthropicChatRequest | UnifiedChatRequest, + options: ConversionOptions +): OpenAIChatRequest | AnthropicChatRequest { + let unifiedRequest: UnifiedChatRequest; + if (options.sourceProvider === "openai") { + unifiedRequest = convertFromOpenAI(request as OpenAIChatRequest); + } else if (options.sourceProvider === "anthropic") { + unifiedRequest = convertFromAnthropic(request as AnthropicChatRequest); + } else { + unifiedRequest = request as UnifiedChatRequest; + } + + if (options.targetProvider === "openai") { + return convertToOpenAI(unifiedRequest); + } else { + // For now, return unified request since Anthropic format is similar + return unifiedRequest as any; + } +} diff --git a/packages/core/src/utils/gemini.util.ts b/packages/core/src/utils/gemini.util.ts new file mode 100644 index 0000000..d976cbf --- /dev/null +++ b/packages/core/src/utils/gemini.util.ts @@ -0,0 +1,1044 @@ +import { UnifiedChatRequest, UnifiedMessage } from "../types/llm"; +import { Content, ContentListUnion, Part, ToolListUnion } from "@google/genai"; + +export function cleanupParameters(obj: any, keyName?: string): void { + if (!obj || typeof obj !== "object") { + return; + } + + if (Array.isArray(obj)) { + obj.forEach((item) => { + cleanupParameters(item); + }); + return; + } + + const validFields = new Set([ + "type", + "format", + "title", + "description", + "nullable", + "enum", + "maxItems", + "minItems", + "properties", + "required", + "minProperties", + "maxProperties", + "minLength", + "maxLength", + "pattern", + "example", + "anyOf", + "propertyOrdering", + "default", + "items", + "minimum", + "maximum", + ]); + + if (keyName !== "properties") { + Object.keys(obj).forEach((key) => { + if (!validFields.has(key)) { + delete obj[key]; + } + }); + } + + if (obj.enum && obj.type !== "string") { + delete obj.enum; + } + + if ( + obj.type === "string" && + obj.format && + !["enum", "date-time"].includes(obj.format) + ) { + delete obj.format; + } + + Object.keys(obj).forEach((key) => { + cleanupParameters(obj[key], key); + }); +} + +// Type enum equivalent in JavaScript +const Type = { + TYPE_UNSPECIFIED: "TYPE_UNSPECIFIED", + STRING: "STRING", + NUMBER: "NUMBER", + INTEGER: "INTEGER", + BOOLEAN: "BOOLEAN", + ARRAY: "ARRAY", + OBJECT: "OBJECT", + NULL: "NULL", +}; + +/** + * Transform the type field from an array of types to an array of anyOf fields. + * @param {string[]} typeList - List of types + * @param {Object} resultingSchema - The schema object to modify + */ +function flattenTypeArrayToAnyOf( + typeList: Array, + resultingSchema: any +): void { + if (typeList.includes("null")) { + resultingSchema["nullable"] = true; + } + const listWithoutNull = typeList.filter((type) => type !== "null"); + + if (listWithoutNull.length === 1) { + const upperCaseType = listWithoutNull[0].toUpperCase(); + resultingSchema["type"] = Object.values(Type).includes(upperCaseType) + ? upperCaseType + : Type.TYPE_UNSPECIFIED; + } else { + resultingSchema["anyOf"] = []; + for (const i of listWithoutNull) { + const upperCaseType = i.toUpperCase(); + resultingSchema["anyOf"].push({ + type: Object.values(Type).includes(upperCaseType) + ? upperCaseType + : Type.TYPE_UNSPECIFIED, + }); + } + } +} + +/** + * Process a JSON schema to make it compatible with the GenAI API + * @param {Object} _jsonSchema - The JSON schema to process + * @returns {Object} - The processed schema + */ +function processJsonSchema(_jsonSchema: any): any { + const genAISchema = {}; + const schemaFieldNames = ["items"]; + const listSchemaFieldNames = ["anyOf"]; + const dictSchemaFieldNames = ["properties"]; + + if (_jsonSchema["type"] && _jsonSchema["anyOf"]) { + throw new Error("type and anyOf cannot be both populated."); + } + + /* + This is to handle the nullable array or object. The _jsonSchema will + be in the format of {anyOf: [{type: 'null'}, {type: 'object'}]}. The + logic is to check if anyOf has 2 elements and one of the element is null, + if so, the anyOf field is unnecessary, so we need to get rid of the anyOf + field and make the schema nullable. Then use the other element as the new + _jsonSchema for processing. This is because the backend doesn't have a null + type. + */ + const incomingAnyOf = _jsonSchema["anyOf"]; + if ( + incomingAnyOf != null && + Array.isArray(incomingAnyOf) && + incomingAnyOf.length == 2 + ) { + if (incomingAnyOf[0] && incomingAnyOf[0]["type"] === "null") { + genAISchema["nullable"] = true; + _jsonSchema = incomingAnyOf[1]; + } else if (incomingAnyOf[1] && incomingAnyOf[1]["type"] === "null") { + genAISchema["nullable"] = true; + _jsonSchema = incomingAnyOf[0]; + } + } + + if (_jsonSchema["type"] && Array.isArray(_jsonSchema["type"])) { + flattenTypeArrayToAnyOf(_jsonSchema["type"], genAISchema); + } + + for (const [fieldName, fieldValue] of Object.entries(_jsonSchema)) { + // Skip if the fieldValue is undefined or null. + if (fieldValue == null) { + continue; + } + + if (fieldName == "type") { + if (fieldValue === "null") { + throw new Error( + "type: null can not be the only possible type for the field." + ); + } + if (Array.isArray(fieldValue)) { + // we have already handled the type field with array of types in the + // beginning of this function. + continue; + } + const upperCaseValue = fieldValue.toUpperCase(); + genAISchema["type"] = Object.values(Type).includes(upperCaseValue) + ? upperCaseValue + : Type.TYPE_UNSPECIFIED; + } else if (schemaFieldNames.includes(fieldName)) { + genAISchema[fieldName] = processJsonSchema(fieldValue); + } else if (listSchemaFieldNames.includes(fieldName)) { + const listSchemaFieldValue = []; + for (const item of fieldValue) { + if (item["type"] == "null") { + genAISchema["nullable"] = true; + continue; + } + listSchemaFieldValue.push(processJsonSchema(item)); + } + genAISchema[fieldName] = listSchemaFieldValue; + } else if (dictSchemaFieldNames.includes(fieldName)) { + const dictSchemaFieldValue = {}; + for (const [key, value] of Object.entries(fieldValue)) { + dictSchemaFieldValue[key] = processJsonSchema(value); + } + genAISchema[fieldName] = dictSchemaFieldValue; + } else { + // additionalProperties is not included in JSONSchema, skipping it. + if (fieldName === "additionalProperties") { + continue; + } + genAISchema[fieldName] = fieldValue; + } + } + return genAISchema; +} + +/** + * Transform a tool object + * @param {Object} tool - The tool object to transform + * @returns {Object} - The transformed tool object + */ +export function tTool(tool: any): any { + if (tool.functionDeclarations) { + for (const functionDeclaration of tool.functionDeclarations) { + if (functionDeclaration.parameters) { + if (!Object.keys(functionDeclaration.parameters).includes("$schema")) { + functionDeclaration.parameters = processJsonSchema( + functionDeclaration.parameters + ); + } else { + if (!functionDeclaration.parametersJsonSchema) { + functionDeclaration.parametersJsonSchema = + functionDeclaration.parameters; + delete functionDeclaration.parameters; + } + } + } + if (functionDeclaration.response) { + if (!Object.keys(functionDeclaration.response).includes("$schema")) { + functionDeclaration.response = processJsonSchema( + functionDeclaration.response + ); + } else { + if (!functionDeclaration.responseJsonSchema) { + functionDeclaration.responseJsonSchema = + functionDeclaration.response; + delete functionDeclaration.response; + } + } + } + } + } + return tool; +} + +export function buildRequestBody( + request: UnifiedChatRequest +): Record { + const tools = []; + const functionDeclarations = request.tools + ?.filter((tool) => tool.function.name !== "web_search") + ?.map((tool) => { + return { + name: tool.function.name, + description: tool.function.description, + parametersJsonSchema: tool.function.parameters, + }; + }); + if (functionDeclarations?.length) { + tools.push( + tTool({ + functionDeclarations, + }) + ); + } + const webSearch = request.tools?.find( + (tool) => tool.function.name === "web_search" + ); + if (webSearch) { + tools.push({ + googleSearch: {}, + }); + } + + const contents: any[] = []; + const toolResponses = request.messages.filter((item) => item.role === "tool"); + request.messages + .filter((item) => item.role !== "tool") + .forEach((message: UnifiedMessage) => { + let role: "user" | "model"; + if (message.role === "assistant") { + role = "model"; + } else if (["user", "system"].includes(message.role)) { + role = "user"; + } else { + role = "user"; // Default to user if role is not recognized + } + const parts = []; + if (typeof message.content === "string") { + const part: any = { + text: message.content, + }; + if (message?.thinking?.signature) { + part.thoughtSignature = message.thinking.signature; + } + parts.push(part); + } else if (Array.isArray(message.content)) { + parts.push( + ...message.content.map((content) => { + if (content.type === "text") { + return { + text: content.text || "", + }; + } + if (content.type === "image_url") { + if (content.image_url.url.startsWith("http")) { + return { + file_data: { + mime_type: content.media_type, + file_uri: content.image_url.url, + }, + }; + } else { + return { + inlineData: { + mime_type: content.media_type, + data: + content.image_url.url?.split(",")?.pop() || + content.image_url.url, + }, + }; + } + } + }) + ); + } else if (message.content && typeof message.content === "object") { + // Object like { text: "..." } + if (message.content.text) { + parts.push({ text: message.content.text }); + } else { + parts.push({ text: JSON.stringify(message.content) }); + } + } + + if (Array.isArray(message.tool_calls)) { + parts.push( + ...message.tool_calls.map((toolCall, index) => { + return { + functionCall: { + id: + toolCall.id || + `tool_${Math.random().toString(36).substring(2, 15)}`, + name: toolCall.function.name, + args: JSON.parse(toolCall.function.arguments || "{}"), + }, + thoughtSignature: + index === 0 && message.thinking?.signature + ? message.thinking?.signature + : undefined, + }; + }) + ); + } + + if (parts.length === 0) { + parts.push({ text: "" }); + } + + contents.push({ + role, + parts, + }); + + if (role === "model" && message.tool_calls) { + const functionResponses = message.tool_calls.map((tool) => { + const response = toolResponses.find( + (item) => item.tool_call_id === tool.id + ); + return { + functionResponse: { + name: tool?.function?.name, + response: { result: response?.content }, + }, + }; + }); + contents.push({ + role: "user", + parts: functionResponses, + }); + } + }); + + const generationConfig: any = {}; + + if ( + request.reasoning && + request.reasoning.effort && + request.reasoning.effort !== "none" + ) { + generationConfig.thinkingConfig = { + includeThoughts: true, + }; + if (request.model.includes("gemini-3")) { + generationConfig.thinkingConfig.thinkingLevel = request.reasoning.effort; + } else { + const thinkingBudgets = request.model.includes("pro") + ? [128, 32768] + : [0, 24576]; + let thinkingBudget; + const max_tokens = request.reasoning.max_tokens; + if (typeof max_tokens !== "undefined") { + if ( + max_tokens >= thinkingBudgets[0] && + max_tokens <= thinkingBudgets[1] + ) { + thinkingBudget = max_tokens; + } else if (max_tokens < thinkingBudgets[0]) { + thinkingBudget = thinkingBudgets[0]; + } else if (max_tokens > thinkingBudgets[1]) { + thinkingBudget = thinkingBudgets[1]; + } + generationConfig.thinkingConfig.thinkingBudget = thinkingBudget; + } + } + } + + const body = { + contents, + tools: tools.length ? tools : undefined, + generationConfig, + }; + + if (request.tool_choice) { + const toolConfig = { + functionCallingConfig: {}, + }; + if (request.tool_choice === "auto") { + toolConfig.functionCallingConfig.mode = "auto"; + } else if (request.tool_choice === "none") { + toolConfig.functionCallingConfig.mode = "none"; + } else if (request.tool_choice === "required") { + toolConfig.functionCallingConfig.mode = "any"; + } else if (request.tool_choice?.function?.name) { + toolConfig.functionCallingConfig.mode = "any"; + toolConfig.functionCallingConfig.allowedFunctionNames = [ + request.tool_choice?.function?.name, + ]; + } + body.toolConfig = toolConfig; + } + + return body; +} + +export function transformRequestOut( + request: Record +): UnifiedChatRequest { + const contents: ContentListUnion = request.contents; + const tools: ToolListUnion = request.tools; + const model: string = request.model; + const max_tokens: number | undefined = request.max_tokens; + const temperature: number | undefined = request.temperature; + const stream: boolean | undefined = request.stream; + const tool_choice: "auto" | "none" | string | undefined = request.tool_choice; + + const unifiedChatRequest: UnifiedChatRequest = { + messages: [], + model, + max_tokens, + temperature, + stream, + tool_choice, + }; + + if (Array.isArray(contents)) { + contents.forEach((content) => { + if (typeof content === "string") { + unifiedChatRequest.messages.push({ + role: "user", + content, + }); + } else if (typeof (content as Part).text === "string") { + unifiedChatRequest.messages.push({ + role: "user", + content: (content as Part).text || null, + }); + } else if ((content as Content).role === "user") { + unifiedChatRequest.messages.push({ + role: "user", + content: + (content as Content)?.parts?.map((part: Part) => ({ + type: "text", + text: part.text || "", + })) || [], + }); + } else if ((content as Content).role === "model") { + unifiedChatRequest.messages.push({ + role: "assistant", + content: + (content as Content)?.parts?.map((part: Part) => ({ + type: "text", + text: part.text || "", + })) || [], + }); + } + }); + } + + if (Array.isArray(tools)) { + unifiedChatRequest.tools = []; + tools.forEach((tool) => { + if (Array.isArray(tool.functionDeclarations)) { + tool.functionDeclarations.forEach((tool) => { + unifiedChatRequest.tools!.push({ + type: "function", + function: { + name: tool.name, + description: tool.description, + parameters: tool.parameters, + }, + }); + }); + } + }); + } + + return unifiedChatRequest; +} + +export async function transformResponseOut( + response: Response, + providerName: string, + logger?: any +): Promise { + if (response.headers.get("Content-Type")?.includes("application/json")) { + const jsonResponse: any = await response.json(); + logger?.debug({ response: jsonResponse }, `${providerName} response:`); + + // Extract thinking content from parts with thought: true + let thinkingContent = ""; + let thinkingSignature = ""; + + const parts = jsonResponse.candidates[0]?.content?.parts || []; + const nonThinkingParts: Part[] = []; + + for (const part of parts) { + if (part.text && part.thought === true) { + thinkingContent += part.text; + } else { + nonThinkingParts.push(part); + } + } + + // Get thoughtSignature from functionCall args or usageMetadata + thinkingSignature = parts.find( + (part: any) => part.thoughtSignature + )?.thoughtSignature; + + const tool_calls = + nonThinkingParts + ?.filter((part: Part) => part.functionCall) + ?.map((part: Part) => ({ + id: + part.functionCall?.id || + `tool_${Math.random().toString(36).substring(2, 15)}`, + type: "function", + function: { + name: part.functionCall?.name, + arguments: JSON.stringify(part.functionCall?.args || {}), + }, + })) || []; + + const textContent = + nonThinkingParts + ?.filter((part: Part) => part.text) + ?.map((part: Part) => part.text) + ?.join("\n") || ""; + + const res = { + id: jsonResponse.responseId, + choices: [ + { + finish_reason: + ( + jsonResponse.candidates[0].finishReason as string + )?.toLowerCase() || null, + index: 0, + message: { + content: textContent, + role: "assistant", + tool_calls: tool_calls.length > 0 ? tool_calls : undefined, + // Add thinking as separate field if available + ...(thinkingSignature && { + thinking: { + content: thinkingContent || "(no content)", + signature: thinkingSignature, + }, + }), + }, + }, + ], + created: parseInt(new Date().getTime() / 1000 + "", 10), + model: jsonResponse.modelVersion, + object: "chat.completion", + usage: { + completion_tokens: + jsonResponse.usageMetadata?.candidatesTokenCount || 0, + prompt_tokens: jsonResponse.usageMetadata?.promptTokenCount || 0, + prompt_tokens_details: { + cached_tokens: + jsonResponse.usageMetadata?.cachedContentTokenCount || 0, + }, + total_tokens: jsonResponse.usageMetadata?.totalTokenCount || 0, + output_tokens_details: { + reasoning_tokens: jsonResponse.usageMetadata?.thoughtsTokenCount || 0, + }, + }, + }; + return new Response(JSON.stringify(res), { + status: response.status, + statusText: response.statusText, + headers: response.headers, + }); + } else if (response.headers.get("Content-Type")?.includes("stream")) { + if (!response.body) { + return response; + } + + const decoder = new TextDecoder(); + const encoder = new TextEncoder(); + let signatureSent = false; + let contentSent = false; + let hasThinkingContent = false; + let pendingContent = ""; + let contentIndex = 0; + let toolCallIndex = -1; + + const stream = new ReadableStream({ + async start(controller) { + const processLine = async ( + line: string, + controller: ReadableStreamDefaultController + ) => { + if (line.startsWith("data: ")) { + const chunkStr = line.slice(6).trim(); + if (chunkStr) { + logger?.debug({ chunkStr }, `${providerName} chunk:`); + try { + const chunk = JSON.parse(chunkStr); + + // Check if chunk has valid structure + if (!chunk.candidates || !chunk.candidates[0]) { + logger?.debug({ chunkStr }, `Invalid chunk structure`); + return; + } + + const candidate = chunk.candidates[0]; + const parts = candidate.content?.parts || []; + + parts + .filter((part: any) => part.text && part.thought === true) + .forEach((part: any) => { + if (!hasThinkingContent) { + hasThinkingContent = true; + } + const thinkingChunk = { + choices: [ + { + delta: { + role: "assistant", + content: null, + thinking: { + content: part.text, + }, + }, + finish_reason: null, + index: contentIndex, + logprobs: null, + }, + ], + created: parseInt(new Date().getTime() / 1000 + "", 10), + id: chunk.responseId || "", + model: chunk.modelVersion || "", + object: "chat.completion.chunk", + system_fingerprint: "fp_a49d71b8a1", + }; + controller.enqueue( + encoder.encode( + `data: ${JSON.stringify(thinkingChunk)}\n\n` + ) + ); + }); + + let signature = parts.find( + (part: Part) => part.thoughtSignature + )?.thoughtSignature; + if (signature && !signatureSent) { + if (!hasThinkingContent) { + const thinkingChunk = { + choices: [ + { + delta: { + role: "assistant", + content: null, + thinking: { + content: "(no content)", + }, + }, + finish_reason: null, + index: contentIndex, + logprobs: null, + }, + ], + created: parseInt(new Date().getTime() / 1000 + "", 10), + id: chunk.responseId || "", + model: chunk.modelVersion || "", + object: "chat.completion.chunk", + system_fingerprint: "fp_a49d71b8a1", + }; + controller.enqueue( + encoder.encode( + `data: ${JSON.stringify(thinkingChunk)}\n\n` + ) + ); + } + const signatureChunk = { + choices: [ + { + delta: { + role: "assistant", + content: null, + thinking: { + signature, + }, + }, + finish_reason: null, + index: contentIndex, + logprobs: null, + }, + ], + created: parseInt(new Date().getTime() / 1000 + "", 10), + id: chunk.responseId || "", + model: chunk.modelVersion || "", + object: "chat.completion.chunk", + system_fingerprint: "fp_a49d71b8a1", + }; + controller.enqueue( + encoder.encode( + `data: ${JSON.stringify(signatureChunk)}\n\n` + ) + ); + signatureSent = true; + contentIndex++; + if (pendingContent) { + const res = { + choices: [ + { + delta: { + role: "assistant", + content: pendingContent, + }, + finish_reason: null, + index: contentIndex, + logprobs: null, + }, + ], + created: parseInt(new Date().getTime() / 1000 + "", 10), + id: chunk.responseId || "", + model: chunk.modelVersion || "", + object: "chat.completion.chunk", + system_fingerprint: "fp_a49d71b8a1", + }; + + controller.enqueue( + encoder.encode(`data: ${JSON.stringify(res)}\n\n`) + ); + + pendingContent = ""; + if (!contentSent) { + contentSent = true; + } + } + } + + const tool_calls = parts + .filter((part: Part) => part.functionCall) + .map((part: Part) => ({ + id: + part.functionCall?.id || + `ccr_tool_${Math.random().toString(36).substring(2, 15)}`, + type: "function", + function: { + name: part.functionCall?.name, + arguments: JSON.stringify(part.functionCall?.args || {}), + }, + })); + + const textContent = parts + .filter((part: Part) => part.text && part.thought !== true) + .map((part: Part) => part.text) + .join("\n"); + + if (!textContent && signatureSent && !contentSent) { + const emptyContentChunk = { + choices: [ + { + delta: { + role: "assistant", + content: "(no content)", + }, + index: contentIndex, + finish_reason: null, + logprobs: null, + }, + ], + created: parseInt(new Date().getTime() / 1000 + "", 10), + id: chunk.responseId || "", + model: chunk.modelVersion || "", + object: "chat.completion.chunk", + system_fingerprint: "fp_a49d71b8a1", + }; + controller.enqueue( + encoder.encode( + `data: ${JSON.stringify(emptyContentChunk)}\n\n` + ) + ); + + if (!contentSent) { + contentSent = true; + } + } + + if (hasThinkingContent && textContent && !signatureSent) { + if (chunk.modelVersion.includes("3")) { + pendingContent += textContent; + return; + } else { + const signatureChunk = { + choices: [ + { + delta: { + role: "assistant", + content: null, + thinking: { + signature: `ccr_${+new Date()}`, + }, + }, + finish_reason: null, + index: contentIndex, + logprobs: null, + }, + ], + created: parseInt(new Date().getTime() / 1000 + "", 10), + id: chunk.responseId || "", + model: chunk.modelVersion || "", + object: "chat.completion.chunk", + system_fingerprint: "fp_a49d71b8a1", + }; + controller.enqueue( + encoder.encode( + `data: ${JSON.stringify(signatureChunk)}\n\n` + ) + ); + signatureSent = true; + } + } + + if (textContent) { + if (!pendingContent) contentIndex++; + const res = { + choices: [ + { + delta: { + role: "assistant", + content: textContent, + }, + finish_reason: + candidate.finishReason?.toLowerCase() || null, + index: contentIndex, + logprobs: null, + }, + ], + created: parseInt(new Date().getTime() / 1000 + "", 10), + id: chunk.responseId || "", + model: chunk.modelVersion || "", + object: "chat.completion.chunk", + system_fingerprint: "fp_a49d71b8a1", + usage: { + completion_tokens: + chunk.usageMetadata?.candidatesTokenCount || 0, + prompt_tokens: chunk.usageMetadata?.promptTokenCount || 0, + prompt_tokens_details: { + cached_tokens: + chunk.usageMetadata?.cachedContentTokenCount || 0, + }, + total_tokens: chunk.usageMetadata?.totalTokenCount || 0, + output_tokens_details: { + reasoning_tokens: + chunk.usageMetadata?.thoughtsTokenCount || 0, + }, + }, + }; + + if (candidate?.groundingMetadata?.groundingChunks?.length) { + (res.choices[0].delta as any).annotations = + candidate.groundingMetadata.groundingChunks.map( + (groundingChunk: any, index: number) => { + const support = + candidate?.groundingMetadata?.groundingSupports?.filter( + (item: any) => + item.groundingChunkIndices?.includes(index) + ); + return { + type: "url_citation", + url_citation: { + url: groundingChunk?.web?.uri || "", + title: groundingChunk?.web?.title || "", + content: support?.[0]?.segment?.text || "", + start_index: + support?.[0]?.segment?.startIndex || 0, + end_index: support?.[0]?.segment?.endIndex || 0, + }, + }; + } + ); + } + controller.enqueue( + encoder.encode(`data: ${JSON.stringify(res)}\n\n`) + ); + + if (!contentSent && textContent) { + contentSent = true; + } + } + + if (tool_calls.length > 0) { + tool_calls.forEach((tool) => { + contentIndex++; + toolCallIndex++; + const res = { + choices: [ + { + delta: { + role: "assistant", + tool_calls: [ + { + ...tool, + index: toolCallIndex, + }, + ], + }, + finish_reason: + candidate.finishReason?.toLowerCase() || null, + index: contentIndex, + logprobs: null, + }, + ], + created: parseInt(new Date().getTime() / 1000 + "", 10), + id: chunk.responseId || "", + model: chunk.modelVersion || "", + object: "chat.completion.chunk", + system_fingerprint: "fp_a49d71b8a1", + usage: { + completion_tokens: + chunk.usageMetadata?.candidatesTokenCount || 0, + prompt_tokens: + chunk.usageMetadata?.promptTokenCount || 0, + prompt_tokens_details: { + cached_tokens: + chunk.usageMetadata?.cachedContentTokenCount || 0, + }, + total_tokens: chunk.usageMetadata?.totalTokenCount || 0, + output_tokens_details: { + reasoning_tokens: + chunk.usageMetadata?.thoughtsTokenCount || 0, + }, + }, + }; + + if (candidate?.groundingMetadata?.groundingChunks?.length) { + (res.choices[0].delta as any).annotations = + candidate.groundingMetadata.groundingChunks.map( + (groundingChunk: any, index: number) => { + const support = + candidate?.groundingMetadata?.groundingSupports?.filter( + (item: any) => + item.groundingChunkIndices?.includes(index) + ); + return { + type: "url_citation", + url_citation: { + url: groundingChunk?.web?.uri || "", + title: groundingChunk?.web?.title || "", + content: support?.[0]?.segment?.text || "", + start_index: + support?.[0]?.segment?.startIndex || 0, + end_index: support?.[0]?.segment?.endIndex || 0, + }, + }; + } + ); + } + controller.enqueue( + encoder.encode(`data: ${JSON.stringify(res)}\n\n`) + ); + }); + + if (!contentSent && textContent) { + contentSent = true; + } + } + } catch (error: any) { + logger?.error( + `Error parsing ${providerName} stream chunk`, + chunkStr, + error.message + ); + } + } + } + }; + + const reader = response.body!.getReader(); + let buffer = ""; + try { + while (true) { + const { done, value } = await reader.read(); + if (done) { + if (buffer) { + await processLine(buffer, controller); + } + break; + } + + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split("\n"); + + buffer = lines.pop() || ""; + + for (const line of lines) { + await processLine(line, controller); + } + } + } catch (error) { + controller.error(error); + } finally { + controller.close(); + } + }, + }); + + return new Response(stream, { + status: response.status, + statusText: response.statusText, + headers: response.headers, + }); + } +} diff --git a/packages/core/src/utils/image.ts b/packages/core/src/utils/image.ts new file mode 100644 index 0000000..ed38c7a --- /dev/null +++ b/packages/core/src/utils/image.ts @@ -0,0 +1,9 @@ +export const formatBase64 = (data: string, media_type: string) => { + if (data.includes("base64")) { + data = data.split("base64").pop() as string; + if (data.startsWith(",")) { + data = data.slice(1); + } + } + return `data:${media_type};base64,${data}`; +}; diff --git a/packages/core/src/utils/request.ts b/packages/core/src/utils/request.ts new file mode 100644 index 0000000..0206edd --- /dev/null +++ b/packages/core/src/utils/request.ts @@ -0,0 +1,57 @@ +import { ProxyAgent } from "undici"; +import { UnifiedChatRequest } from "../types/llm"; + +export function sendUnifiedRequest( + url: URL | string, + request: UnifiedChatRequest, + config: any, + context: any, + logger?: any +): Promise { + const headers = new Headers({ + "Content-Type": "application/json", + }); + if (config.headers) { + Object.entries(config.headers).forEach(([key, value]) => { + if (value) { + headers.set(key, value as string); + } + }); + } + let combinedSignal: AbortSignal; + const timeoutSignal = AbortSignal.timeout(config.TIMEOUT ?? 60 * 1000 * 60); + + if (config.signal) { + const controller = new AbortController(); + const abortHandler = () => controller.abort(); + config.signal.addEventListener("abort", abortHandler); + timeoutSignal.addEventListener("abort", abortHandler); + combinedSignal = controller.signal; + } else { + combinedSignal = timeoutSignal; + } + + const fetchOptions: RequestInit = { + method: "POST", + headers: headers, + body: JSON.stringify(request), + signal: combinedSignal, + }; + + if (config.httpsProxy) { + (fetchOptions as any).dispatcher = new ProxyAgent( + new URL(config.httpsProxy).toString() + ); + } + logger?.debug( + { + reqId: context.req.id, + request: fetchOptions, + headers: Object.fromEntries(headers.entries()), + requestUrl: typeof url === "string" ? url : url.toString(), + useProxy: config.httpsProxy, + }, + "final request" + ); + return fetch(typeof url === "string" ? url : url.toString(), fetchOptions); +} diff --git a/packages/core/src/utils/thinking.ts b/packages/core/src/utils/thinking.ts new file mode 100644 index 0000000..43c9caa --- /dev/null +++ b/packages/core/src/utils/thinking.ts @@ -0,0 +1,8 @@ +import { ThinkLevel } from "@/types/llm"; + +export const getThinkLevel = (thinking_budget: number): ThinkLevel => { + if (thinking_budget <= 0) return "none"; + if (thinking_budget <= 1024) return "low"; + if (thinking_budget <= 8192) return "medium"; + return "high"; +}; diff --git a/packages/core/src/utils/toolArgumentsParser.ts b/packages/core/src/utils/toolArgumentsParser.ts new file mode 100644 index 0000000..5f9e4a7 --- /dev/null +++ b/packages/core/src/utils/toolArgumentsParser.ts @@ -0,0 +1,51 @@ +import JSON5 from "json5"; +import { jsonrepair } from "jsonrepair"; + +/** + * 解析工具调用参数的函数 + * Parse tool call arguments function + * 先尝试标准JSON解析,然后JSON5解析,最后使用jsonrepair进行安全修复 + * First try standard JSON parsing, then JSON5 parsing, finally use jsonrepair for safe repair + * + * @param argsString - 需要解析的参数字符串 / Parameter string to parse + * @returns 解析后的参数对象或安全的空对象 / Parsed parameter object or safe empty object + */ +export function parseToolArguments(argsString: string, logger?: any): string { + // Handle empty or null input + if (!argsString || argsString.trim() === "" || argsString === "{}") { + return "{}"; + } + + try { + // First attempt: Standard JSON parsing + JSON.parse(argsString); + logger?.debug(`工具调用参数标准JSON解析成功 / Tool arguments standard JSON parsing successful`); + return argsString; + } catch (jsonError: any) { + try { + // Second attempt: JSON5 parsing for relaxed syntax + const args = JSON5.parse(argsString); + logger?.debug(`工具调用参数JSON5解析成功 / Tool arguments JSON5 parsing successful`); + return JSON.stringify(args); + } catch (json5Error: any) { + try { + // Third attempt: Safe JSON repair without code execution + const repairedJson = jsonrepair(argsString); + logger?.debug(`工具调用参数安全修复成功 / Tool arguments safely repaired`); + return repairedJson; + } catch (repairError: any) { + // All parsing attempts failed - log errors and return safe fallback + logger?.error( + `JSON解析失败 / JSON parsing failed: ${jsonError.message}. ` + + `JSON5解析失败 / JSON5 parsing failed: ${json5Error.message}. ` + + `JSON修复失败 / JSON repair failed: ${repairError.message}. ` + + `输入数据 / Input data: ${JSON.stringify(argsString)}` + ); + + // Return safe empty object as fallback instead of potentially malformed input + logger?.debug(`返回安全的空对象作为后备方案 / Returning safe empty object as fallback`); + return "{}"; + } + } + } +} \ No newline at end of file diff --git a/packages/core/src/utils/vertex-claude.util.ts b/packages/core/src/utils/vertex-claude.util.ts new file mode 100644 index 0000000..4b159f9 --- /dev/null +++ b/packages/core/src/utils/vertex-claude.util.ts @@ -0,0 +1,542 @@ +import { UnifiedChatRequest, UnifiedMessage, UnifiedTool } from "../types/llm"; + +// Vertex Claude消息接口 +interface ClaudeMessage { + role: "user" | "assistant"; + content: Array<{ + type: "text" | "image"; + text?: string; + source?: { + type: "base64"; + media_type: string; + data: string; + }; + }>; +} + +// Vertex Claude工具接口 +interface ClaudeTool { + name: string; + description: string; + input_schema: { + type: string; + properties: Record; + required?: string[]; + additionalProperties?: boolean; + $schema?: string; + }; +} + +// Vertex Claude请求接口 +interface VertexClaudeRequest { + anthropic_version: "vertex-2023-10-16"; + messages: ClaudeMessage[]; + max_tokens: number; + stream?: boolean; + temperature?: number; + top_p?: number; + top_k?: number; + tools?: ClaudeTool[]; + tool_choice?: "auto" | "none" | { type: "tool"; name: string }; +} + +// Vertex Claude响应接口 +interface VertexClaudeResponse { + content: Array<{ + type: "text"; + text: string; + }>; + id: string; + model: string; + role: "assistant"; + stop_reason: string; + stop_sequence: null; + type: "message"; + usage: { + input_tokens: number; + output_tokens: number; + }; + tool_use?: Array<{ + id: string; + name: string; + input: Record; + }>; +} + +export function buildRequestBody( + request: UnifiedChatRequest +): VertexClaudeRequest { + const messages: ClaudeMessage[] = []; + + for (let i = 0; i < request.messages.length; i++) { + const message = request.messages[i]; + const isLastMessage = i === request.messages.length - 1; + const isAssistantMessage = message.role === "assistant"; + + const content: ClaudeMessage["content"] = []; + + if (typeof message.content === "string") { + // 保留所有字符串内容,即使是空字符串,因为可能包含重要信息 + content.push({ + type: "text", + text: message.content, + }); + } else if (Array.isArray(message.content)) { + message.content.forEach((item) => { + if (item.type === "text") { + // 保留所有文本内容,即使是空字符串 + content.push({ + type: "text", + text: item.text || "", + }); + } else if (item.type === "image_url") { + // 处理图片内容 + content.push({ + type: "image", + source: { + type: "base64", + media_type: item.media_type || "image/jpeg", + data: item.image_url.url, + }, + }); + } + }); + } + + // 只跳过完全空的非最后一条消息(没有内容和工具调用) + if ( + !isLastMessage && + content.length === 0 && + !message.tool_calls && + !message.content + ) { + continue; + } + + // 对于最后一条 assistant 消息,如果没有内容但有工具调用,则添加空内容 + if ( + isLastMessage && + isAssistantMessage && + content.length === 0 && + message.tool_calls + ) { + content.push({ + type: "text", + text: "", + }); + } + + messages.push({ + role: message.role === "assistant" ? "assistant" : "user", + content, + }); + } + + const requestBody: VertexClaudeRequest = { + anthropic_version: "vertex-2023-10-16", + messages, + max_tokens: request.max_tokens || 1000, + stream: request.stream || false, + ...(request.temperature && { temperature: request.temperature }), + }; + + // 处理工具定义 + if (request.tools && request.tools.length > 0) { + requestBody.tools = request.tools.map((tool: UnifiedTool) => ({ + name: tool.function.name, + description: tool.function.description, + input_schema: tool.function.parameters, + })); + } + + // 处理工具选择 + if (request.tool_choice) { + if (request.tool_choice === "auto" || request.tool_choice === "none") { + requestBody.tool_choice = request.tool_choice; + } else if (typeof request.tool_choice === "string") { + // 如果 tool_choice 是字符串,假设是工具名称 + requestBody.tool_choice = { + type: "tool", + name: request.tool_choice, + }; + } + } + + return requestBody; +} + +export function transformRequestOut( + request: Record +): UnifiedChatRequest { + const vertexRequest = request as VertexClaudeRequest; + + const messages: UnifiedMessage[] = vertexRequest.messages.map((msg) => { + const content = msg.content.map((item) => { + if (item.type === "text") { + return { + type: "text" as const, + text: item.text || "", + }; + } else if (item.type === "image" && item.source) { + return { + type: "image_url" as const, + image_url: { + url: item.source.data, + }, + media_type: item.source.media_type, + }; + } + return { + type: "text" as const, + text: "", + }; + }); + + return { + role: msg.role, + content, + }; + }); + + const result: UnifiedChatRequest = { + messages, + model: request.model || "claude-sonnet-4@20250514", + max_tokens: vertexRequest.max_tokens, + temperature: vertexRequest.temperature, + stream: vertexRequest.stream, + }; + + // 处理工具定义 + if (vertexRequest.tools && vertexRequest.tools.length > 0) { + result.tools = vertexRequest.tools.map((tool) => ({ + type: "function" as const, + function: { + name: tool.name, + description: tool.description, + parameters: { + type: "object" as const, + properties: tool.input_schema.properties, + required: tool.input_schema.required, + additionalProperties: tool.input_schema.additionalProperties, + $schema: tool.input_schema.$schema, + }, + }, + })); + } + + // 处理工具选择 + if (vertexRequest.tool_choice) { + if (typeof vertexRequest.tool_choice === "string") { + result.tool_choice = vertexRequest.tool_choice; + } else if (vertexRequest.tool_choice.type === "tool") { + result.tool_choice = vertexRequest.tool_choice.name; + } + } + + return result; +} + +export async function transformResponseOut( + response: Response, + providerName: string, + logger?: any +): Promise { + if (response.headers.get("Content-Type")?.includes("application/json")) { + const jsonResponse = (await response.json()) as VertexClaudeResponse; + + // 处理工具调用 + let tool_calls = undefined; + if (jsonResponse.tool_use && jsonResponse.tool_use.length > 0) { + tool_calls = jsonResponse.tool_use.map((tool) => ({ + id: tool.id, + type: "function" as const, + function: { + name: tool.name, + arguments: JSON.stringify(tool.input), + }, + })); + } + + // 转换为OpenAI格式的响应 + const res = { + id: jsonResponse.id, + choices: [ + { + finish_reason: jsonResponse.stop_reason || null, + index: 0, + message: { + content: jsonResponse.content[0]?.text || "", + role: "assistant", + ...(tool_calls && { tool_calls }), + }, + }, + ], + created: parseInt(new Date().getTime() / 1000 + "", 10), + model: jsonResponse.model, + object: "chat.completion", + usage: { + completion_tokens: jsonResponse.usage.output_tokens, + prompt_tokens: jsonResponse.usage.input_tokens, + total_tokens: + jsonResponse.usage.input_tokens + jsonResponse.usage.output_tokens, + }, + }; + + return new Response(JSON.stringify(res), { + status: response.status, + statusText: response.statusText, + headers: response.headers, + }); + } else if (response.headers.get("Content-Type")?.includes("stream")) { + // 处理流式响应 + if (!response.body) { + return response; + } + + const decoder = new TextDecoder(); + const encoder = new TextEncoder(); + + const processLine = ( + line: string, + controller: ReadableStreamDefaultController + ) => { + if (line.startsWith("data: ")) { + const chunkStr = line.slice(6).trim(); + if (chunkStr) { + logger?.debug({ chunkStr }, `${providerName} chunk:`); + try { + const chunk = JSON.parse(chunkStr); + + // 处理 Anthropic 原生格式的流式响应 + if ( + chunk.type === "content_block_delta" && + chunk.delta?.type === "text_delta" + ) { + // 这是 Anthropic 原生格式,需要转换为 OpenAI 格式 + const res = { + choices: [ + { + delta: { + role: "assistant", + content: chunk.delta.text || "", + }, + finish_reason: null, + index: 0, + logprobs: null, + }, + ], + created: parseInt(new Date().getTime() / 1000 + "", 10), + id: chunk.id || "", + model: chunk.model || "", + object: "chat.completion.chunk", + system_fingerprint: "fp_a49d71b8a1", + usage: { + completion_tokens: chunk.usage?.output_tokens || 0, + prompt_tokens: chunk.usage?.input_tokens || 0, + total_tokens: + (chunk.usage?.input_tokens || 0) + + (chunk.usage?.output_tokens || 0), + }, + }; + controller.enqueue( + encoder.encode(`data: ${JSON.stringify(res)}\n\n`) + ); + } else if ( + chunk.type === "content_block_delta" && + chunk.delta?.type === "input_json_delta" + ) { + // 处理工具调用的参数增量 + const res = { + choices: [ + { + delta: { + tool_calls: [ + { + index: chunk.index || 0, + function: { + arguments: chunk.delta.partial_json || "", + }, + }, + ], + }, + finish_reason: null, + index: 0, + logprobs: null, + }, + ], + created: parseInt(new Date().getTime() / 1000 + "", 10), + id: chunk.id || "", + model: chunk.model || "", + object: "chat.completion.chunk", + system_fingerprint: "fp_a49d71b8a1", + usage: { + completion_tokens: chunk.usage?.output_tokens || 0, + prompt_tokens: chunk.usage?.input_tokens || 0, + total_tokens: + (chunk.usage?.input_tokens || 0) + + (chunk.usage?.output_tokens || 0), + }, + }; + controller.enqueue( + encoder.encode(`data: ${JSON.stringify(res)}\n\n`) + ); + } else if ( + chunk.type === "content_block_start" && + chunk.content_block?.type === "tool_use" + ) { + // 处理工具调用开始 + const res = { + choices: [ + { + delta: { + tool_calls: [ + { + index: chunk.index || 0, + id: chunk.content_block.id, + type: "function", + function: { + name: chunk.content_block.name, + arguments: "", + }, + }, + ], + }, + finish_reason: null, + index: 0, + logprobs: null, + }, + ], + created: parseInt(new Date().getTime() / 1000 + "", 10), + id: chunk.id || "", + model: chunk.model || "", + object: "chat.completion.chunk", + system_fingerprint: "fp_a49d71b8a1", + usage: { + completion_tokens: chunk.usage?.output_tokens || 0, + prompt_tokens: chunk.usage?.input_tokens || 0, + total_tokens: + (chunk.usage?.input_tokens || 0) + + (chunk.usage?.output_tokens || 0), + }, + }; + controller.enqueue( + encoder.encode(`data: ${JSON.stringify(res)}\n\n`) + ); + } else if (chunk.type === "message_delta") { + // 处理消息结束 + const res = { + choices: [ + { + delta: {}, + finish_reason: + chunk.delta?.stop_reason === "tool_use" + ? "tool_calls" + : chunk.delta?.stop_reason === "max_tokens" + ? "length" + : chunk.delta?.stop_reason === "stop_sequence" + ? "content_filter" + : "stop", + index: 0, + logprobs: null, + }, + ], + created: parseInt(new Date().getTime() / 1000 + "", 10), + id: chunk.id || "", + model: chunk.model || "", + object: "chat.completion.chunk", + system_fingerprint: "fp_a49d71b8a1", + usage: { + completion_tokens: chunk.usage?.output_tokens || 0, + prompt_tokens: chunk.usage?.input_tokens || 0, + total_tokens: + (chunk.usage?.input_tokens || 0) + + (chunk.usage?.output_tokens || 0), + }, + }; + controller.enqueue( + encoder.encode(`data: ${JSON.stringify(res)}\n\n`) + ); + } else if (chunk.type === "message_stop") { + // 发送结束标记 + controller.enqueue(encoder.encode(`data: [DONE]\n\n`)); + } else { + // 处理其他格式的响应(保持原有逻辑作为后备) + const res = { + choices: [ + { + delta: { + role: "assistant", + content: chunk.content?.[0]?.text || "", + }, + finish_reason: chunk.stop_reason?.toLowerCase() || null, + index: 0, + logprobs: null, + }, + ], + created: parseInt(new Date().getTime() / 1000 + "", 10), + id: chunk.id || "", + model: chunk.model || "", + object: "chat.completion.chunk", + system_fingerprint: "fp_a49d71b8a1", + usage: { + completion_tokens: chunk.usage?.output_tokens || 0, + prompt_tokens: chunk.usage?.input_tokens || 0, + total_tokens: + (chunk.usage?.input_tokens || 0) + + (chunk.usage?.output_tokens || 0), + }, + }; + controller.enqueue( + encoder.encode(`data: ${JSON.stringify(res)}\n\n`) + ); + } + } catch (error: any) { + logger?.error( + `Error parsing ${providerName} stream chunk`, + chunkStr, + error.message + ); + } + } + } + }; + + const stream = new ReadableStream({ + async start(controller) { + const reader = response.body!.getReader(); + let buffer = ""; + try { + while (true) { + const { done, value } = await reader.read(); + if (done) { + if (buffer) { + processLine(buffer, controller); + } + break; + } + + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split("\n"); + + buffer = lines.pop() || ""; + + for (const line of lines) { + processLine(line, controller); + } + } + } catch (error) { + controller.error(error); + } finally { + controller.close(); + } + }, + }); + + return new Response(stream, { + status: response.status, + statusText: response.statusText, + headers: response.headers, + }); + } + return response; +} diff --git a/packages/core/tsconfig.json b/packages/core/tsconfig.json new file mode 100644 index 0000000..e24a21f --- /dev/null +++ b/packages/core/tsconfig.json @@ -0,0 +1,28 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "CommonJS", + "lib": ["ES2022"], + "outDir": "./dist", + "rootDir": "./src", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "resolveJsonModule": true, + "moduleResolution": "node", + "declaration": true, + "sourceMap": true, + "baseUrl": "./", + "paths": { + "@/*": ["src/*"] + } + }, + "include": [ + "src/**/*" + ], + "exclude": [ + "node_modules", + "dist" + ] +} diff --git a/packages/ui/src/components/ui/checkbox.tsx b/packages/ui/src/components/ui/checkbox.tsx new file mode 100644 index 0000000..35de148 --- /dev/null +++ b/packages/ui/src/components/ui/checkbox.tsx @@ -0,0 +1,30 @@ +"use client" + +import * as React from "react" +import * as CheckboxPrimitives from "@radix-ui/react-checkbox" +import { Check } from "lucide-react" + +import { cn } from "@/lib/utils" + +const Checkbox = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + + + + + +)) +Checkbox.displayName = CheckboxPrimitives.Root.displayName + +export { Checkbox } diff --git a/packages/ui/src/components/ui/select.tsx b/packages/ui/src/components/ui/select.tsx new file mode 100644 index 0000000..cbe5a36 --- /dev/null +++ b/packages/ui/src/components/ui/select.tsx @@ -0,0 +1,160 @@ +"use client" + +import * as React from "react" +import * as SelectPrimitive from "@radix-ui/react-select" +import { Check, ChevronDown, ChevronUp } from "lucide-react" + +import { cn } from "@/lib/utils" + +const Select = SelectPrimitive.Root + +const SelectGroup = SelectPrimitive.Group + +const SelectValue = SelectPrimitive.Value + +const SelectTrigger = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, children, ...props }, ref) => ( + span]:line-clamp-1", + className + )} + {...props} + > + {children} + + + + +)) +SelectTrigger.displayName = SelectPrimitive.Trigger.displayName + +const SelectScrollUpButton = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + + + +)) +SelectScrollUpButton.displayName = SelectPrimitive.ScrollUpButton.displayName + +const SelectScrollDownButton = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + + + +)) +SelectScrollDownButton.displayName = + SelectPrimitive.ScrollDownButton.displayName + +const SelectContent = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, children, position = "popper", ...props }, ref) => ( + + + + + {children} + + + + +)) +SelectContent.displayName = SelectPrimitive.Content.displayName + +const SelectLabel = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +SelectLabel.displayName = SelectPrimitive.Label.displayName + +const SelectItem = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, children, ...props }, ref) => ( + + + + + + + + {children} + +)) +SelectItem.displayName = SelectPrimitive.Item.displayName + +const SelectSeparator = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +SelectSeparator.displayName = SelectPrimitive.Separator.displayName + +export { + Select, + SelectGroup, + SelectValue, + SelectTrigger, + SelectContent, + SelectLabel, + SelectItem, + SelectSeparator, + SelectScrollUpButton, + SelectScrollDownButton, +} diff --git a/packages/ui/src/components/ui/textarea.tsx b/packages/ui/src/components/ui/textarea.tsx new file mode 100644 index 0000000..9f9a6dc --- /dev/null +++ b/packages/ui/src/components/ui/textarea.tsx @@ -0,0 +1,24 @@ +import * as React from "react" + +import { cn } from "@/lib/utils" + +export interface TextareaProps + extends React.TextareaHTMLAttributes {} + +const Textarea = React.forwardRef( + ({ className, ...props }, ref) => { + return ( +