support deepseek-v3-20250324

This commit is contained in:
jinhui.li
2025-03-25 14:51:28 +08:00
parent ba2da2f4ed
commit 93fcd77544
17 changed files with 2050 additions and 485 deletions

28
src/constants.ts Normal file
View File

@@ -0,0 +1,28 @@
import path from "node:path";
import os from "node:os";
export const HOME_DIR = path.join(os.homedir(), ".claude-code-router");
export const CONFIG_FILE = `${HOME_DIR}/config.json`;
export const PROMPTS_DIR = `${HOME_DIR}/prompts`;
export const DEFAULT_CONFIG = {
log: false,
ENABLE_ROUTER: true,
OPENAI_API_KEY: "",
OPENAI_BASE_URL: "https://openrouter.ai/api/v1",
OPENAI_MODEL: "openai/o3-mini",
CODER_AGENT_API_KEY: "",
CODER_AGENT_BASE_URL: "https://api.deepseek.com",
CODER_AGENT_MODEL: "deepseek-chat",
THINK_AGENT_API_KEY: "",
THINK_AGENT_BASE_URL: "https://api.deepseek.com",
THINK_AGENT_MODEL: "deepseek-reasoner",
ROUTER_AGENT_API_KEY: "",
ROUTER_AGENT_BASE_URL: "https://api.deepseek.com",
ROUTER_AGENT_MODEL: "deepseek-chat",
};

140
src/deepseek.ts Normal file
View File

@@ -0,0 +1,140 @@
import { OpenAI } from "openai";
import { createClient } from "./utils";
import { log } from "./utils/log";
export interface BaseRouter {
name: string;
description: string;
run: (
args: OpenAI.Chat.Completions.ChatCompletionCreateParams
) => Promise<any>;
}
const thinkRouter: BaseRouter = {
name: "think",
description: `This agent is used solely for complex reasoning and thinking tasks. It should not be called for information retrieval or repetitive, frequent requests. Only use this agent for tasks that require deep analysis or problem-solving. If there is an existing result from the Thinker agent, do not call this agent again.你只负责深度思考以拆分任务,不需要进行任何的编码和调用工具。最后讲拆分的步骤按照顺序返回。比如\n1. xxx\n2. xxx\n3. xxx`,
run(args) {
const client = createClient({
apiKey: process.env.THINK_AGENT_API_KEY,
baseURL: process.env.THINK_AGENT_BASE_URL,
});
const messages = JSON.parse(JSON.stringify(args.messages));
messages.forEach((msg: any) => {
if (Array.isArray(msg.content)) {
msg.content = JSON.stringify(msg.content);
}
});
let startIdx = messages.findIndex((msg: any) => msg.role !== "system");
if (startIdx === -1) startIdx = messages.length;
for (let i = startIdx; i < messages.length; i++) {
const expectedRole = (i - startIdx) % 2 === 0 ? "user" : "assistant";
messages[i].role = expectedRole;
}
if (
messages.length > 0 &&
messages[messages.length - 1].role === "assistant"
) {
messages.push({
role: "user",
content:
"Please follow the instructions provided above to resolve the issue.",
});
}
delete args.tools;
return client.chat.completions.create({
...args,
messages,
model: process.env.THINK_AGENT_MODEL as string,
});
},
};
export class Router {
routers: BaseRouter[];
client: OpenAI;
constructor() {
this.routers = [thinkRouter];
this.client = createClient({
apiKey: process.env.ROUTER_AGENT_API_KEY,
baseURL: process.env.ROUTER_AGENT_BASE_URL,
});
}
async route(
args: OpenAI.Chat.Completions.ChatCompletionCreateParams
): Promise<any> {
log(`Request Router: ${JSON.stringify(args, null, 2)}`);
const res: OpenAI.Chat.Completions.ChatCompletion =
await this.client.chat.completions.create({
...args,
messages: [
...args.messages,
{
role: "system",
content: `## **Guidelines:**
- **Trigger the "think" mode when the user's request involves deep thinking, complex reasoning, or multi-step analysis.**
- **Criteria:**
- Involves multi-layered logical reasoning or causal analysis
- Requires establishing connections or pattern recognition between different pieces of information
- Involves cross-domain knowledge integration or weighing multiple possibilities
- Requires creative thinking or non-direct inference
### **Format requirements:**
- When you need to trigger the "think" mode, return the following JSON format:
\`\`\`json
{
"use": "think"
}
\`\`\`
`,
},
],
model: process.env.ROUTER_AGENT_MODEL as string,
stream: false,
});
let result;
try {
const text = res.choices[0].message.content;
if (!text) {
throw new Error("No text");
}
result = JSON.parse(
text.slice(text.indexOf("{"), text.lastIndexOf("}") + 1)
);
} catch (e) {
(res.choices[0] as any).delta = res.choices[0].message;
log(`No Router: ${JSON.stringify(res.choices[0].message)}`);
return [res];
}
const router = this.routers.find((item) => item.name === result.use);
if (!router) {
(res.choices[0] as any).delta = res.choices[0].message;
log(`No Router: ${JSON.stringify(res.choices[0].message)}`);
return [res];
}
log(`Use Router: ${router.name}`);
if (router.name === "think") {
const agentResult = await router.run({
...args,
stream: false,
});
try {
args.messages.push({
role: "user",
content:
`${router.name} Agent Result: ` +
agentResult.choices[0].message.content,
});
log(
`${router.name} Agent Result: ` +
agentResult.choices[0].message.content
);
return await this.route(args);
} catch (error) {
console.log(agentResult);
throw error;
}
}
return router.run(args);
}
}

35
src/index.ts Normal file
View File

@@ -0,0 +1,35 @@
import { existsSync } from "fs";
import { writeFile } from "fs/promises";
import { initConfig, initDir } from "./utils";
import { createServer } from "./server";
import { rewriteToolsPrompt } from "./middlewares/rewriteToolsPrompt";
async function initializeClaudeConfig() {
const homeDir = process.env.HOME;
const configPath = `${homeDir}/.claude.json`;
if (!existsSync(configPath)) {
const userID = Array.from(
{ length: 64 },
() => Math.random().toString(16)[2]
).join("");
const configContent = {
numStartups: 184,
autoUpdaterStatus: "enabled",
userID,
hasCompletedOnboarding: true,
lastOnboardingVersion: "0.2.9",
projects: {},
};
await writeFile(configPath, JSON.stringify(configContent, null, 2));
}
}
async function run() {
await initializeClaudeConfig();
await initDir();
await initConfig();
const server = createServer(3456);
server.useMiddleware(rewriteToolsPrompt);
server.start();
}
run();

View File

@@ -0,0 +1,34 @@
import { Request, Response, NextFunction } from "express";
import { readFile, access } from "node:fs/promises";
import { join } from "node:path";
import { PROMPTS_DIR } from "../constants";
const getPrompt = async (name: string) => {
try {
const promptPath = join(PROMPTS_DIR, `${name}.md`);
await access(promptPath);
const prompt = await readFile(promptPath, "utf-8");
return prompt;
} catch {
return null;
}
};
export const rewriteToolsPrompt = async (
req: Request,
res: Response,
next: NextFunction
) => {
const { tools } = req.body;
if (!Array.isArray(tools)) {
next();
return;
}
for (const tool of tools) {
const prompt = await getPrompt(tool.name);
if (prompt) {
tool.description = prompt;
}
}
next();
};

206
src/router copy.ts Normal file
View File

@@ -0,0 +1,206 @@
import { OpenAI } from "openai";
import { createClient } from "./utils";
import { log } from "./utils/log";
export interface BaseRouter {
name: string;
description: string;
run: (
args: OpenAI.Chat.Completions.ChatCompletionCreateParams
) => Promise<any>;
}
const coderRouter: BaseRouter = {
name: "coder",
description: `This agent is solely responsible for helping users write code. This agent could not call tools. This agent is used for writing and modifying code when the user provides clear and specific coding requirements. For example, tasks like implementing a quicksort algorithm in JavaScript or creating an HTML layout. If the user's request is unclear or cannot be directly translated into code, please route the task to 'think' first for clarification or further processing.`,
run(args) {
const client = createClient({
apiKey: process.env.CODER_AGENT_API_KEY,
baseURL: process.env.CODER_AGENT_BASE_URL,
});
delete args.tools;
args.messages.forEach((item) => {
if (Array.isArray(item.content)) {
item.content = JSON.stringify(item.content);
}
});
return client.chat.completions.create({
...args,
messages: [
...args.messages,
{
role: "system",
content:
"You are a code writer who helps users write code based on their specific requirements. You create algorithms, implement functionality, and build structures according to the clear instructions provided by the user. Your focus is solely on writing code, ensuring that the task is completed accurately and efficiently.",
},
],
model: process.env.CODER_AGENT_MODEL as string,
});
},
};
const useToolRouter: BaseRouter = {
name: "use-tool",
description: `This agent can call user-specified tools to perform tasks. The user provides a list of tools to be used, and the agent integrates these tools to complete the specified tasks efficiently. The agent follows user instructions and ensures proper tool utilization for each request`,
run(args) {
const client = createClient({
apiKey: process.env.TOOL_AGENT_API_KEY,
baseURL: process.env.TOOL_AGENT_BASE_URL,
});
return client.chat.completions.create({
...args,
messages: [
...args.messages,
{
role: "system",
content:
"You need to select the appropriate tool for the task based on the users request. Review the requirements and choose the tool that fits the task best.",
},
],
model: process.env.TOOL_AGENT_MODEL as string,
});
},
};
const thinkRouter: BaseRouter = {
name: "think",
description: `This agent is used solely for complex reasoning and thinking tasks. It should not be called for information retrieval or repetitive, frequent requests. Only use this agent for tasks that require deep analysis or problem-solving. If there is an existing result from the Thinker agent, do not call this agent again.你只负责深度思考以拆分任务,不需要进行任何的编码和调用工具。最后讲拆分的步骤按照顺序返回。比如\n1. xxx\n2. xxx\n3. xxx`,
run(args) {
const client = createClient({
apiKey: process.env.THINK_AGENT_API_KEY,
baseURL: process.env.THINK_AGENT_BASE_URL,
});
const messages = JSON.parse(JSON.stringify(args.messages));
messages.forEach((msg: any) => {
if (Array.isArray(msg.content)) {
msg.content = JSON.stringify(msg.content);
}
});
let startIdx = messages.findIndex((msg: any) => msg.role !== "system");
if (startIdx === -1) startIdx = messages.length;
for (let i = startIdx; i < messages.length; i++) {
const expectedRole = (i - startIdx) % 2 === 0 ? "user" : "assistant";
messages[i].role = expectedRole;
}
if (
messages.length > 0 &&
messages[messages.length - 1].role === "assistant"
) {
messages.push({
role: "user",
content:
"Please follow the instructions provided above to resolve the issue.",
});
}
delete args.tools;
return client.chat.completions.create({
...args,
messages,
model: process.env.THINK_AGENT_MODEL as string,
});
},
};
export class Router {
routers: BaseRouter[];
client: OpenAI;
constructor() {
this.routers = [coderRouter, useToolRouter, thinkRouter];
this.client = createClient({
apiKey: process.env.ROUTER_AGENT_API_KEY,
baseURL: process.env.ROUTER_AGENT_BASE_URL,
});
}
async route(
args: OpenAI.Chat.Completions.ChatCompletionCreateParams
): Promise<any> {
log(`Route: ${JSON.stringify(args, null, 2)}`);
const res: OpenAI.Chat.Completions.ChatCompletion =
await this.client.chat.completions.create({
...args,
messages: [
...args.messages,
{
role: "system",
content: `You are an AI task router and executor, responsible for understanding user requests and directing them to the appropriate processing mode or tool based on the task type and requirements. Your main responsibility is to determine the nature of the request, execute the task when possible, and respond appropriately.
### **Guidelines:**
- **If an external tool is required to complete the task (such as searching for information, generating images, or modifying code), route the task to \`use-tool\` rather than handling it directly.**
- If the task requires generating an image, route to \`use-tool\` and specify the image generation tool.
- If the task requires searching for information, route to \`use-tool\` and specify the search tool.
- If the task requires modifying or executing code, route to \`use-tool\` and specify the code handling tool.
- **Do NOT execute the tool action directly; always trigger it through \`use-tool\`.**
- **If the user is chatting casually or having a general conversation, respond naturally and conversationally. Improving the user experience through friendly interactions is one of your main responsibilities.**
- **If the user's request involves deep thinking, complex reasoning, or multi-step analysis, use the "think" mode to break down and solve the problem.**
- **If the user's request involves coding or technical implementation, use the "coder" mode to generate or modify code.**
- **After generating the code, if the task requires applying or integrating the code, route to \`use-tool\` and specify the code execution tool.**
- **Do NOT re-trigger "coder" to apply code — route to \`use-tool\` instead.**
### **Format requirements:**
- When you need to trigger a specific mode (such as "think", "coder", or "use-tool"), return the following JSON format:
### IMPORTANT:
- 你不能也不会调用BatchTool如果你需要使用工具请路由到\`use-tool\`,由\`use-tool\`来调用BatchTool。
\`\`\`json
{
"use": "<mode-name>",
}
\`\`\`
`,
},
],
model: process.env.ROUTER_AGENT_MODEL as string,
stream: false,
});
let result;
try {
const text = res.choices[0].message.content;
if (!text) {
throw new Error("No text");
}
result = JSON.parse(
text.slice(text.indexOf("{"), text.lastIndexOf("}") + 1)
);
} catch (e) {
(res.choices[0] as any).delta = res.choices[0].message;
return [res];
}
const router = this.routers.find((item) => item.name === result.use);
if (!router) {
(res.choices[0] as any).delta = res.choices[0].message;
log(`No Router: ${JSON.stringify(res.choices[0].message)}`);
return [res];
}
log(`Use Router: ${router.name}`);
if (router.name === "think" || router.name === "coder") {
const agentResult = await router.run({
...args,
stream: false,
});
try {
args.messages.push({
role: "user",
content:
`${router.name} Agent Result: ` +
agentResult.choices[0].message.content,
});
log(
`${router.name} Agent Result: ` +
agentResult.choices[0].message.content
);
return await this.route(args);
} catch (error) {
console.log(agentResult);
throw error;
}
}
return router.run(args);
}
}

159
src/server.ts Normal file
View File

@@ -0,0 +1,159 @@
import express, { RequestHandler } from "express";
import {
ContentBlockParam,
MessageCreateParamsBase,
} from "@anthropic-ai/sdk/resources/messages";
import { OpenAI } from "openai";
import { Router } from "./deepseek";
import { getOpenAICommonOptions } from "./utils";
import { streamOpenAIResponse } from "./utils/stream";
interface Client {
call: (
data: OpenAI.Chat.Completions.ChatCompletionCreateParams
) => Promise<any>;
}
interface Server {
app: express.Application;
useMiddleware: (middleware: RequestHandler) => void;
start: () => void;
}
export const createServer = (port: number): Server => {
const app = express();
app.use(express.json({ limit: "500mb" }));
let client: Client;
if (process.env.ENABLE_ROUTER && process.env.ENABLE_ROUTER === "true") {
const router = new Router();
client = {
call: (data) => {
return router.route(data);
},
};
} else {
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
baseURL: process.env.OPENAI_BASE_URL,
...getOpenAICommonOptions(),
});
client = {
call: (data) => {
if (process.env.OPENAI_MODEL) {
data.model = process.env.OPENAI_MODEL;
}
return openai.chat.completions.create(data);
},
};
}
app.post("/v1/messages", async (req, res) => {
try {
let {
model,
max_tokens,
messages,
system = [],
temperature,
metadata,
tools,
}: MessageCreateParamsBase = req.body;
const openAIMessages: OpenAI.Chat.Completions.ChatCompletionMessageParam[] =
messages.map((item) => {
if (item.content instanceof Array) {
return {
role: item.role,
content: item.content
.map((it: ContentBlockParam) => {
if (it.type === "text") {
return typeof it.text === "string"
? it.text
: JSON.stringify(it);
}
return JSON.stringify(it);
})
.join(""),
} as OpenAI.Chat.Completions.ChatCompletionMessageParam;
}
return {
role: item.role,
content:
typeof item.content === "string"
? item.content
: JSON.stringify(item.content),
};
});
const systemMessages: OpenAI.Chat.Completions.ChatCompletionMessageParam[] =
Array.isArray(system)
? system.map((item) => ({
role: "system",
content: item.text,
}))
: [{ role: "system", content: system }];
const data: OpenAI.Chat.Completions.ChatCompletionCreateParams = {
model,
messages: [...systemMessages, ...openAIMessages],
temperature,
stream: true,
};
if (tools) {
data.tools = tools
.filter((tool) => !["StickerRequest"].includes(tool.name))
.map((item: any) => ({
type: "function",
function: {
name: item.name,
description: item.description,
parameters: item.input_schema,
},
}));
}
res.setHeader("Content-Type", "text/event-stream");
res.setHeader("Cache-Control", "no-cache");
res.setHeader("Connection", "keep-alive");
try {
const completion = await client.call(data);
await streamOpenAIResponse(res, completion, model);
} catch (e) {
console.error("Error in OpenAI API call:", e);
}
} catch (error) {
console.error("Error in request processing:", error);
const errorCompletion: AsyncIterable<OpenAI.Chat.Completions.ChatCompletionChunk> =
{
async *[Symbol.asyncIterator]() {
yield {
id: `error_${Date.now()}`,
created: Math.floor(Date.now() / 1000),
model: "gpt-3.5-turbo",
object: "chat.completion.chunk",
choices: [
{
index: 0,
delta: {
content: `Error: ${(error as Error).message}`,
},
finish_reason: "stop",
},
],
};
},
};
await streamOpenAIResponse(res, errorCompletion, "gpt-3.5-turbo");
}
});
return {
app,
useMiddleware: (middleware: RequestHandler) => {
app.use("/v1/messages", middleware);
},
start: () => {
app.listen(port, () => {
console.log(`Server is running on port ${port}`);
});
},
};
};

57
src/utils/index.ts Normal file
View File

@@ -0,0 +1,57 @@
import { HttpsProxyAgent } from "https-proxy-agent";
import OpenAI, { ClientOptions } from "openai";
import fs from "node:fs/promises";
import {
CONFIG_FILE,
DEFAULT_CONFIG,
HOME_DIR,
PROMPTS_DIR,
} from "../constants";
export function getOpenAICommonOptions(): ClientOptions {
const options: ClientOptions = {};
if (process.env.PROXY_URL) {
options.httpAgent = new HttpsProxyAgent(process.env.PROXY_URL);
}
return options;
}
const ensureDir = async (dir_path: string) => {
try {
await fs.access(dir_path);
} catch {
await fs.mkdir(dir_path, { recursive: true });
}
};
export const initDir = async () => {
await ensureDir(HOME_DIR);
await ensureDir(PROMPTS_DIR);
};
export const readConfigFile = async () => {
try {
const config = await fs.readFile(CONFIG_FILE, "utf-8");
return JSON.parse(config);
} catch {
await writeConfigFile(DEFAULT_CONFIG);
return DEFAULT_CONFIG;
}
};
export const writeConfigFile = async (config: any) => {
await fs.writeFile(CONFIG_FILE, JSON.stringify(config, null, 2));
};
export const initConfig = async () => {
const config = await readConfigFile();
Object.assign(process.env, config);
};
export const createClient = (options: ClientOptions) => {
const client = new OpenAI({
...options,
...getOpenAICommonOptions(),
});
return client;
};

27
src/utils/log.ts Normal file
View File

@@ -0,0 +1,27 @@
import fs from 'node:fs';
import path from 'node:path';
import { HOME_DIR } from '../constants';
const LOG_FILE = path.join(HOME_DIR, 'claude-code-router.log');
// Ensure log directory exists
if (!fs.existsSync(HOME_DIR)) {
fs.mkdirSync(HOME_DIR, { recursive: true });
}
export function log(...args: any[]) {
// Check if logging is enabled via environment variable
const isLogEnabled = process.env.LOG === 'true';
if (!isLogEnabled) {
return;
}
const timestamp = new Date().toISOString();
const logMessage = `[${timestamp}] ${args.map(arg =>
typeof arg === 'object' ? JSON.stringify(arg) : String(arg)
).join(' ')}\n`;
// Append to log file
fs.appendFileSync(LOG_FILE, logMessage, 'utf8');
}

268
src/utils/stream.ts Normal file
View File

@@ -0,0 +1,268 @@
import { Response } from "express";
import { OpenAI } from "openai";
interface ContentBlock {
type: string;
id?: string;
name?: string;
input?: any;
text?: string;
}
interface MessageEvent {
type: string;
message?: {
id: string;
type: string;
role: string;
content: any[];
model: string;
stop_reason: string | null;
stop_sequence: string | null;
usage: {
input_tokens: number;
output_tokens: number;
};
};
delta?: {
stop_reason?: string;
stop_sequence?: string | null;
content?: ContentBlock[];
type?: string;
text?: string;
partial_json?: string;
};
index?: number;
content_block?: ContentBlock;
usage?: {
input_tokens: number;
output_tokens: number;
};
}
export async function streamOpenAIResponse(
res: Response,
completion: AsyncIterable<OpenAI.Chat.Completions.ChatCompletionChunk>,
model: string
) {
const messageId = "msg_" + Date.now();
let contentBlockIndex = 0;
let currentContentBlocks: ContentBlock[] = [];
// Send message_start event
const messageStart: MessageEvent = {
type: "message_start",
message: {
id: messageId,
type: "message",
role: "assistant",
content: [],
model,
stop_reason: null,
stop_sequence: null,
usage: { input_tokens: 1, output_tokens: 1 },
},
};
res.write(`event: message_start\ndata: ${JSON.stringify(messageStart)}\n\n`);
let isToolUse = false;
let toolUseJson = "";
let hasStartedTextBlock = false;
try {
for await (const chunk of completion) {
const delta = chunk.choices[0].delta;
if (delta.tool_calls && delta.tool_calls.length > 0) {
const toolCall = delta.tool_calls[0];
if (!isToolUse) {
// Start new tool call block
isToolUse = true;
const toolBlock: ContentBlock = {
type: "tool_use",
id: `toolu_${Date.now()}`,
name: toolCall.function?.name,
input: {},
};
const toolBlockStart: MessageEvent = {
type: "content_block_start",
index: contentBlockIndex,
content_block: toolBlock,
};
currentContentBlocks.push(toolBlock);
res.write(
`event: content_block_start\ndata: ${JSON.stringify(
toolBlockStart
)}\n\n`
);
toolUseJson = "";
}
// Stream tool call JSON
if (toolCall.function?.arguments) {
const jsonDelta: MessageEvent = {
type: "content_block_delta",
index: contentBlockIndex,
delta: {
type: "input_json_delta",
partial_json: toolCall.function?.arguments,
},
};
toolUseJson += toolCall.function.arguments;
try {
const parsedJson = JSON.parse(toolUseJson);
currentContentBlocks[contentBlockIndex].input = parsedJson;
} catch (e) {
// JSON not yet complete, continue accumulating
}
res.write(
`event: content_block_delta\ndata: ${JSON.stringify(jsonDelta)}\n\n`
);
}
} else if (delta.content) {
// Handle regular text content
if (isToolUse) {
// End previous tool call block
const contentBlockStop: MessageEvent = {
type: "content_block_stop",
index: contentBlockIndex,
};
res.write(
`event: content_block_stop\ndata: ${JSON.stringify(
contentBlockStop
)}\n\n`
);
contentBlockIndex++;
isToolUse = false;
}
if (!delta.content) continue;
// If text block not yet started, send content_block_start
if (!hasStartedTextBlock) {
const textBlock: ContentBlock = {
type: "text",
text: "",
};
const textBlockStart: MessageEvent = {
type: "content_block_start",
index: contentBlockIndex,
content_block: textBlock,
};
currentContentBlocks.push(textBlock);
res.write(
`event: content_block_start\ndata: ${JSON.stringify(
textBlockStart
)}\n\n`
);
hasStartedTextBlock = true;
}
// Send regular text content
const contentDelta: MessageEvent = {
type: "content_block_delta",
index: contentBlockIndex,
delta: {
type: "text_delta",
text: delta.content,
},
};
// Update content block text
if (currentContentBlocks[contentBlockIndex]) {
currentContentBlocks[contentBlockIndex].text += delta.content;
}
res.write(
`event: content_block_delta\ndata: ${JSON.stringify(
contentDelta
)}\n\n`
);
}
}
} catch (e: any) {
// If text block not yet started, send content_block_start
if (!hasStartedTextBlock) {
const textBlock: ContentBlock = {
type: "text",
text: "",
};
const textBlockStart: MessageEvent = {
type: "content_block_start",
index: contentBlockIndex,
content_block: textBlock,
};
currentContentBlocks.push(textBlock);
res.write(
`event: content_block_start\ndata: ${JSON.stringify(
textBlockStart
)}\n\n`
);
hasStartedTextBlock = true;
}
// Send regular text content
const contentDelta: MessageEvent = {
type: "content_block_delta",
index: contentBlockIndex,
delta: {
type: "text_delta",
text: JSON.stringify(e),
},
};
// Update content block text
if (currentContentBlocks[contentBlockIndex]) {
currentContentBlocks[contentBlockIndex].text += JSON.stringify(e);
}
res.write(
`event: content_block_delta\ndata: ${JSON.stringify(contentDelta)}\n\n`
);
}
// Close last content block
const contentBlockStop: MessageEvent = {
type: "content_block_stop",
index: contentBlockIndex,
};
res.write(
`event: content_block_stop\ndata: ${JSON.stringify(contentBlockStop)}\n\n`
);
// Send message_delta event with appropriate stop_reason
const messageDelta: MessageEvent = {
type: "message_delta",
delta: {
stop_reason: isToolUse ? "tool_use" : "end_turn",
stop_sequence: null,
content: currentContentBlocks,
},
usage: { input_tokens: 100, output_tokens: 150 },
};
res.write(`event: message_delta\ndata: ${JSON.stringify(messageDelta)}\n\n`);
// Send message_stop event
const messageStop: MessageEvent = {
type: "message_stop",
};
res.write(`event: message_stop\ndata: ${JSON.stringify(messageStop)}\n\n`);
res.end();
}