Compare commits
16 Commits
dev/custom
...
feature/mo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5e70bc70c0 | ||
|
|
9a89250d79 | ||
|
|
7a5d712444 | ||
|
|
84e76f24b0 | ||
|
|
c9059f146d | ||
|
|
9cffebf081 | ||
|
|
111492b908 | ||
|
|
edc8ecbcba | ||
|
|
ea68b2ea55 | ||
|
|
3b0d7bac0c | ||
|
|
6912572fbb | ||
|
|
2cc91ada5c | ||
|
|
aa3f72f390 | ||
|
|
6e4022b6f1 | ||
|
|
30bf711a2a | ||
|
|
2ade113c2a |
2
.dockerignore
Normal file
2
.dockerignore
Normal file
@@ -0,0 +1,2 @@
|
||||
node_modules
|
||||
npm-debug.log
|
||||
31
.env.example
31
.env.example
@@ -1,31 +0,0 @@
|
||||
## If you don't want to use multi-model routing
|
||||
## set ENABLE_ROUTER to false, and define the following variables
|
||||
## the model needs to support function calling
|
||||
ENABLE_ROUTER=false
|
||||
OPENAI_API_KEY=""
|
||||
OPENAI_BASE_URL=""
|
||||
OPENAI_MODEL=""
|
||||
|
||||
|
||||
## If you want to use multi-model routing, set ENABLE_ROUTER to true
|
||||
# ENABLE_ROUTER=true
|
||||
|
||||
## Define the model for the tool agent, the model needs to support function calling
|
||||
# TOOL_AGENT_API_KEY=""
|
||||
# TOOL_AGENT_BASE_URL=""
|
||||
# TOOL_AGENT_MODEL=""
|
||||
|
||||
## Define the model for the coder agent
|
||||
# CODER_AGENT_API_KEY=""
|
||||
# CODER_AGENT_BASE_URL=""
|
||||
# CODER_AGENT_MODEL=""
|
||||
|
||||
## Define the model for the thinker agent, using a model that supports reasoning will yield better results
|
||||
# THINK_AGENT_API_KEY=""
|
||||
# THINK_AGENT_BASE_URL=""
|
||||
# THINK_AGENT_MODEL=""
|
||||
|
||||
## Define the model for the router agent, this model is the entry point for each request, it will consume a lot of tokens, please choose a small model to reduce costs
|
||||
# ROUTER_AGENT_API_KEY=""
|
||||
# ROUTER_AGENT_BASE_URL=""
|
||||
# ROUTER_AGENT_MODEL=""
|
||||
9
.npmignore
Normal file
9
.npmignore
Normal file
@@ -0,0 +1,9 @@
|
||||
src
|
||||
node_modules
|
||||
.claude
|
||||
CLAUDE.md
|
||||
screenshoots
|
||||
.DS_Store
|
||||
.vscode
|
||||
.idea
|
||||
.env
|
||||
12
CLAUDE.md
Normal file
12
CLAUDE.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.You need use English to write text.
|
||||
|
||||
## Key Development Commands
|
||||
- Build: `npm run build`
|
||||
- Start: `npm start`
|
||||
|
||||
## Architecture
|
||||
- Uses `express` for routing (see `src/server.ts`)
|
||||
- Bundles with `esbuild` for CLI distribution
|
||||
- Plugins are loaded from `$HOME/.claude-code-router/plugins`
|
||||
21
LICENSE
Normal file
21
LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2025 musistudio
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
51
README.md
51
README.md
@@ -1,47 +1,32 @@
|
||||
# Claude Code Router
|
||||
|
||||
> This is a repository for testing routing Claude Code requests to different models.
|
||||
> This is a tool for routing Claude Code requests to different models, and you can customize any request.
|
||||
|
||||

|
||||
|
||||
## Implemented
|
||||
|
||||
- [x] Support writing custom plugins for rewriting prompts.
|
||||
|
||||
- [x] Support writing custom plugins for implementing routers.
|
||||

|
||||
|
||||
## Usage
|
||||
|
||||
0. Install Claude Code
|
||||
1. Install Claude Code
|
||||
|
||||
```shell
|
||||
npm install -g @anthropic-ai/claude-code
|
||||
```
|
||||
|
||||
1. Clone this repo and install dependencies
|
||||
2. Install Claude Code Router
|
||||
|
||||
```shell
|
||||
git clone https://github.com/musistudio/claude-code-router
|
||||
cd claude-code-router && pnpm i
|
||||
npm run build
|
||||
npm install -g @musistudio/claude-code-router
|
||||
```
|
||||
|
||||
2. Start claude-code-router server
|
||||
3. Start Claude Code by claude-code-router
|
||||
|
||||
```shell
|
||||
node dist/cli.js
|
||||
ccr code
|
||||
```
|
||||
|
||||
3. Set environment variable to start claude code
|
||||
|
||||
```shell
|
||||
export DISABLE_PROMPT_CACHING=1
|
||||
export ANTHROPIC_BASE_URL="http://127.0.0.1:3456"
|
||||
export API_TIMEOUT_MS=600000
|
||||
claude
|
||||
```
|
||||
|
||||
## Plugin
|
||||
## Plugin[Beta]
|
||||
|
||||
The plugin allows users to rewrite Claude Code prompt and custom router. The plugin path is in `$HOME/.claude-code-router/plugins`. Currently, there are two demos available:
|
||||
1. [custom router](https://github.com/musistudio/claude-code-router/blob/dev/custom-prompt/plugins/deepseek.js)
|
||||
@@ -58,3 +43,23 @@ You need to move them to the `$HOME/.claude-code-router/plugins` directory and c
|
||||
"OPENAI_MODEL": ""
|
||||
}
|
||||
```
|
||||
|
||||
## Features
|
||||
- [x] Plugins
|
||||
- [ ] Support change models
|
||||
- [ ] Support scheduled tasks
|
||||
|
||||
|
||||
## Some tips:
|
||||
If you’re using the DeepSeek API provided by the official website, you might encounter an “exceeding context” error after several rounds of conversation (since the official API only supports a 64K context window). In this case, you’ll need to discard the previous context and start fresh. Alternatively, you can use ByteDance’s DeepSeek API, which offers a 128K context window and supports KV cache.
|
||||
|
||||

|
||||
|
||||
Note: claude code consumes a huge amount of tokens, but thanks to DeepSeek’s low cost, you can use claude code at a fraction of Claude’s price, and you don’t need to subscribe to the Claude Max plan.
|
||||
|
||||
Some interesting points: Based on my testing, including a lot of context information can help narrow the performance gap between these LLM models. For instance, when I used Claude-4 in VSCode Copilot to handle a Flutter issue, it messed up the files in three rounds of conversation, and I had to roll everything back. However, when I used claude code with DeepSeek, after three or four rounds of conversation, I finally managed to complete my task—and the cost was less than 1 RMB!
|
||||
|
||||
|
||||
## Buy me a coffee
|
||||
If you find this project helpful, you can choose to sponsor the author with a cup of coffee.
|
||||
[Buy me a coffee](http://paypal.me/musistudio1999)
|
||||
8
config.json
Normal file
8
config.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"usePlugin": "",
|
||||
"LOG": true,
|
||||
"OPENAI_API_KEY": "",
|
||||
"OPENAI_BASE_URL": "",
|
||||
"OPENAI_MODEL": "",
|
||||
"modelProviders": {}
|
||||
}
|
||||
13
docker-compose.yml
Normal file
13
docker-compose.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
claude-code-reverse:
|
||||
build: .
|
||||
ports:
|
||||
- "3456:3456"
|
||||
environment:
|
||||
- ENABLE_ROUTER=${ENABLE_ROUTER}
|
||||
- OPENAI_API_KEY=${OPENAI_API_KEY}
|
||||
- OPENAI_BASE_URL=${OPENAI_BASE_URL}
|
||||
- OPENAI_MODEL=${OPENAI_MODEL}
|
||||
restart: unless-stopped
|
||||
12
dockerfile
Normal file
12
dockerfile
Normal file
@@ -0,0 +1,12 @@
|
||||
FROM node:20-alpine
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY package*.json ./
|
||||
RUN npm i
|
||||
|
||||
COPY . .
|
||||
|
||||
EXPOSE 3456
|
||||
|
||||
CMD ["node", "index.mjs"]
|
||||
1013
package-lock.json
generated
1013
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
24
package.json
24
package.json
@@ -1,24 +1,32 @@
|
||||
{
|
||||
"name": "claude-code-router",
|
||||
"version": "1.0.0",
|
||||
"name": "@musistudio/claude-code-router",
|
||||
"version": "1.0.3",
|
||||
"description": "Use Claude Code without an Anthropics account and route it to another LLM provider",
|
||||
"bin": {
|
||||
"claude-code-router": "./dist/cli.js"
|
||||
"ccr": "./dist/cli.js"
|
||||
},
|
||||
"scripts": {
|
||||
"start": "node dist/cli.js",
|
||||
"build": "tsc && esbuild src/index.ts --bundle --platform=node --outfile=dist/cli.js"
|
||||
"build": "esbuild src/cli.ts --bundle --platform=node --outfile=dist/cli.js",
|
||||
"buildserver": "esbuild src/index.ts --bundle --platform=node --outfile=dist/index.js"
|
||||
},
|
||||
"keywords": ["claude", "code", "router", "llm", "anthropic"],
|
||||
"keywords": [
|
||||
"claude",
|
||||
"code",
|
||||
"router",
|
||||
"llm",
|
||||
"anthropic"
|
||||
],
|
||||
"author": "musistudio",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@anthropic-ai/claude-code": "^0.2.53",
|
||||
"@anthropic-ai/sdk": "^0.39.0",
|
||||
"dotenv": "^16.4.7",
|
||||
"express": "^4.21.2",
|
||||
"https-proxy-agent": "^7.0.6",
|
||||
"openai": "^4.85.4"
|
||||
"lru-cache": "^11.1.0",
|
||||
"openai": "^4.85.4",
|
||||
"tiktoken": "^1.0.21",
|
||||
"uuid": "^11.1.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/express": "^5.0.0",
|
||||
|
||||
@@ -1,139 +0,0 @@
|
||||
const {
|
||||
log,
|
||||
streamOpenAIResponse,
|
||||
createClient,
|
||||
} = require("claude-code-router");
|
||||
|
||||
const thinkRouter = {
|
||||
name: "think",
|
||||
description: `This agent is used solely for complex reasoning and thinking tasks. It should not be called for information retrieval or repetitive, frequent requests. Only use this agent for tasks that require deep analysis or problem-solving. If there is an existing result from the Thinker agent, do not call this agent again.你只负责深度思考以拆分任务,不需要进行任何的编码和调用工具。最后讲拆分的步骤按照顺序返回。比如\n1. xxx\n2. xxx\n3. xxx`,
|
||||
run(args) {
|
||||
const client = createClient({
|
||||
apiKey: process.env.THINK_AGENT_API_KEY,
|
||||
baseURL: process.env.THINK_AGENT_BASE_URL,
|
||||
});
|
||||
const messages = JSON.parse(JSON.stringify(args.messages));
|
||||
messages.forEach((msg) => {
|
||||
if (Array.isArray(msg.content)) {
|
||||
msg.content = JSON.stringify(msg.content);
|
||||
}
|
||||
});
|
||||
|
||||
let startIdx = messages.findIndex((msg) => msg.role !== "system");
|
||||
if (startIdx === -1) startIdx = messages.length;
|
||||
|
||||
for (let i = startIdx; i < messages.length; i++) {
|
||||
const expectedRole = (i - startIdx) % 2 === 0 ? "user" : "assistant";
|
||||
messages[i].role = expectedRole;
|
||||
}
|
||||
|
||||
if (
|
||||
messages.length > 0 &&
|
||||
messages[messages.length - 1].role === "assistant"
|
||||
) {
|
||||
messages.push({
|
||||
role: "user",
|
||||
content:
|
||||
"Please follow the instructions provided above to resolve the issue.",
|
||||
});
|
||||
}
|
||||
delete args.tools;
|
||||
return client.chat.completions.create({
|
||||
...args,
|
||||
messages,
|
||||
model: process.env.THINK_AGENT_MODEL,
|
||||
});
|
||||
},
|
||||
};
|
||||
|
||||
class Router {
|
||||
constructor() {
|
||||
this.routers = [thinkRouter];
|
||||
this.client = createClient({
|
||||
apiKey: process.env.ROUTER_AGENT_API_KEY,
|
||||
baseURL: process.env.ROUTER_AGENT_BASE_URL,
|
||||
});
|
||||
}
|
||||
async route(args) {
|
||||
log(`Request Router: ${JSON.stringify(args, null, 2)}`);
|
||||
const res = await this.client.chat.completions.create({
|
||||
...args,
|
||||
messages: [
|
||||
...args.messages,
|
||||
{
|
||||
role: "system",
|
||||
content: `## **Guidelines:**
|
||||
- **Trigger the "think" mode when the user's request involves deep thinking, complex reasoning, or multi-step analysis.**
|
||||
- **Criteria:**
|
||||
- Involves multi-layered logical reasoning or causal analysis
|
||||
- Requires establishing connections or pattern recognition between different pieces of information
|
||||
- Involves cross-domain knowledge integration or weighing multiple possibilities
|
||||
- Requires creative thinking or non-direct inference
|
||||
### **Special Case:**
|
||||
- **When the user sends "test", respond with "success" only.**
|
||||
|
||||
### **Format requirements:**
|
||||
- When you need to trigger the "think" mode, return the following JSON format:
|
||||
\`\`\`json
|
||||
{
|
||||
"use": "think"
|
||||
}
|
||||
\`\`\`
|
||||
`,
|
||||
},
|
||||
],
|
||||
model: process.env.ROUTER_AGENT_MODEL,
|
||||
stream: false,
|
||||
});
|
||||
let result;
|
||||
try {
|
||||
const text = res.choices[0].message.content;
|
||||
if (!text) {
|
||||
throw new Error("No text");
|
||||
}
|
||||
result = JSON.parse(
|
||||
text.slice(text.indexOf("{"), text.lastIndexOf("}") + 1)
|
||||
);
|
||||
} catch (e) {
|
||||
res.choices[0].delta = res.choices[0].message;
|
||||
log(`No Router: ${JSON.stringify(res.choices[0].message)}`);
|
||||
return [res];
|
||||
}
|
||||
const router = this.routers.find((item) => item.name === result.use);
|
||||
if (!router) {
|
||||
res.choices[0].delta = res.choices[0].message;
|
||||
log(`No Router: ${JSON.stringify(res.choices[0].message)}`);
|
||||
return [res];
|
||||
}
|
||||
log(`Use Router: ${router.name}`);
|
||||
if (router.name === "think") {
|
||||
const agentResult = await router.run({
|
||||
...args,
|
||||
stream: false,
|
||||
});
|
||||
try {
|
||||
args.messages.push({
|
||||
role: "user",
|
||||
content:
|
||||
`${router.name} Agent Result: ` +
|
||||
agentResult.choices[0].message.content,
|
||||
});
|
||||
log(
|
||||
`${router.name} Agent Result: ` +
|
||||
agentResult.choices[0].message.content
|
||||
);
|
||||
return await this.route(args);
|
||||
} catch (error) {
|
||||
console.log(agentResult);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
return router.run(args);
|
||||
}
|
||||
}
|
||||
|
||||
const router = new Router();
|
||||
module.exports = async function handle(req, res, next) {
|
||||
const completions = await router.route(req.body);
|
||||
streamOpenAIResponse(res, completions, req.body.model);
|
||||
};
|
||||
@@ -1,23 +0,0 @@
|
||||
module.exports = async function handle(req, res, next) {
|
||||
if (Array.isArray(req.body.tools)) {
|
||||
// rewrite tools definition
|
||||
req.body.tools.forEach((tool) => {
|
||||
if (tool.function.name === "BatchTool") {
|
||||
// HACK: Gemini does not support objects with empty properties
|
||||
tool.function.parameters.properties.invocations.items.properties.input.type =
|
||||
"number";
|
||||
return;
|
||||
}
|
||||
Object.keys(tool.function.parameters.properties).forEach((key) => {
|
||||
const prop = tool.function.parameters.properties[key];
|
||||
if (
|
||||
prop.type === "string" &&
|
||||
!["enum", "date-time"].includes(prop.format)
|
||||
) {
|
||||
delete prop.format;
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
next();
|
||||
};
|
||||
104
pnpm-lock.yaml
generated
104
pnpm-lock.yaml
generated
@@ -5,9 +5,6 @@ settings:
|
||||
excludeLinksFromLockfile: false
|
||||
|
||||
dependencies:
|
||||
'@anthropic-ai/claude-code':
|
||||
specifier: ^0.2.53
|
||||
version: 0.2.53
|
||||
'@anthropic-ai/sdk':
|
||||
specifier: ^0.39.0
|
||||
version: 0.39.0
|
||||
@@ -20,9 +17,18 @@ dependencies:
|
||||
https-proxy-agent:
|
||||
specifier: ^7.0.6
|
||||
version: 7.0.6
|
||||
lru-cache:
|
||||
specifier: ^11.1.0
|
||||
version: 11.1.0
|
||||
openai:
|
||||
specifier: ^4.85.4
|
||||
version: 4.86.1
|
||||
tiktoken:
|
||||
specifier: ^1.0.21
|
||||
version: 1.0.21
|
||||
uuid:
|
||||
specifier: ^11.1.0
|
||||
version: 11.1.0
|
||||
|
||||
devDependencies:
|
||||
'@types/express':
|
||||
@@ -37,18 +43,6 @@ devDependencies:
|
||||
|
||||
packages:
|
||||
|
||||
/@anthropic-ai/claude-code@0.2.53:
|
||||
resolution: {integrity: sha512-DKXGjSsu2+rc1GaAdOjRqD7fMLvyQgwi/sqf6lLHWQAarwYxR/ahbSheu7h1Ub0wm0htnuIqgNnmNZUM43w/3Q==}
|
||||
engines: {node: '>=18.0.0'}
|
||||
hasBin: true
|
||||
requiresBuild: true
|
||||
optionalDependencies:
|
||||
'@img/sharp-darwin-arm64': 0.33.5
|
||||
'@img/sharp-linux-arm': 0.33.5
|
||||
'@img/sharp-linux-x64': 0.33.5
|
||||
'@img/sharp-win32-x64': 0.33.5
|
||||
dev: false
|
||||
|
||||
/@anthropic-ai/sdk@0.39.0:
|
||||
resolution: {integrity: sha512-eMyDIPRZbt1CCLErRCi3exlAvNkBtRe+kW5vvJyef93PmNr/clstYgHhtvmkxN82nlKgzyGPCyGxrm0JQ1ZIdg==}
|
||||
dependencies:
|
||||
@@ -288,72 +282,6 @@ packages:
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@img/sharp-darwin-arm64@0.33.5:
|
||||
resolution: {integrity: sha512-UT4p+iz/2H4twwAoLCqfA9UH5pI6DggwKEGuaPy7nCVQ8ZsiY5PIcrRvD1DzuY3qYL07NtIQcWnBSY/heikIFQ==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [arm64]
|
||||
os: [darwin]
|
||||
requiresBuild: true
|
||||
optionalDependencies:
|
||||
'@img/sharp-libvips-darwin-arm64': 1.0.4
|
||||
dev: false
|
||||
optional: true
|
||||
|
||||
/@img/sharp-libvips-darwin-arm64@1.0.4:
|
||||
resolution: {integrity: sha512-XblONe153h0O2zuFfTAbQYAX2JhYmDHeWikp1LM9Hul9gVPjFY427k6dFEcOL72O01QxQsWi761svJ/ev9xEDg==}
|
||||
cpu: [arm64]
|
||||
os: [darwin]
|
||||
requiresBuild: true
|
||||
dev: false
|
||||
optional: true
|
||||
|
||||
/@img/sharp-libvips-linux-arm@1.0.5:
|
||||
resolution: {integrity: sha512-gvcC4ACAOPRNATg/ov8/MnbxFDJqf/pDePbBnuBDcjsI8PssmjoKMAz4LtLaVi+OnSb5FK/yIOamqDwGmXW32g==}
|
||||
cpu: [arm]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: false
|
||||
optional: true
|
||||
|
||||
/@img/sharp-libvips-linux-x64@1.0.4:
|
||||
resolution: {integrity: sha512-MmWmQ3iPFZr0Iev+BAgVMb3ZyC4KeFc3jFxnNbEPas60e1cIfevbtuyf9nDGIzOaW9PdnDciJm+wFFaTlj5xYw==}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
dev: false
|
||||
optional: true
|
||||
|
||||
/@img/sharp-linux-arm@0.33.5:
|
||||
resolution: {integrity: sha512-JTS1eldqZbJxjvKaAkxhZmBqPRGmxgu+qFKSInv8moZ2AmT5Yib3EQ1c6gp493HvrvV8QgdOXdyaIBrhvFhBMQ==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [arm]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
optionalDependencies:
|
||||
'@img/sharp-libvips-linux-arm': 1.0.5
|
||||
dev: false
|
||||
optional: true
|
||||
|
||||
/@img/sharp-linux-x64@0.33.5:
|
||||
resolution: {integrity: sha512-opC+Ok5pRNAzuvq1AG0ar+1owsu842/Ab+4qvU879ippJBHvyY5n2mxF1izXqkPYlGuP/M556uh53jRLJmzTWA==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
requiresBuild: true
|
||||
optionalDependencies:
|
||||
'@img/sharp-libvips-linux-x64': 1.0.4
|
||||
dev: false
|
||||
optional: true
|
||||
|
||||
/@img/sharp-win32-x64@0.33.5:
|
||||
resolution: {integrity: sha512-MpY/o8/8kj+EcnxwvrP4aTJSWw/aZ7JIGR4aBeZkZw5B7/Jn+tY9/VNwtcoGmdT7GfggGIU4kygOMSbYnOrAbg==}
|
||||
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
|
||||
cpu: [x64]
|
||||
os: [win32]
|
||||
requiresBuild: true
|
||||
dev: false
|
||||
optional: true
|
||||
|
||||
/@types/body-parser@1.19.5:
|
||||
resolution: {integrity: sha512-fB3Zu92ucau0iQ0JMCFQE7b/dv8Ot07NI3KaZIkIUNXq82k4eBAqUaneXfleGY9JWskeS9y+u0nXMyspcuQrCg==}
|
||||
dependencies:
|
||||
@@ -853,6 +781,11 @@ packages:
|
||||
engines: {node: '>= 0.10'}
|
||||
dev: false
|
||||
|
||||
/lru-cache@11.1.0:
|
||||
resolution: {integrity: sha512-QIXZUBJUx+2zHUdQujWejBkcD9+cs94tLn0+YL8UrCh+D5sCXZ4c7LaEH48pNwRY3MLDgqUFyhlCyjJPf1WP0A==}
|
||||
engines: {node: 20 || >=22}
|
||||
dev: false
|
||||
|
||||
/math-intrinsics@1.1.0:
|
||||
resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==}
|
||||
engines: {node: '>= 0.4'}
|
||||
@@ -1084,6 +1017,10 @@ packages:
|
||||
engines: {node: '>= 0.8'}
|
||||
dev: false
|
||||
|
||||
/tiktoken@1.0.21:
|
||||
resolution: {integrity: sha512-/kqtlepLMptX0OgbYD9aMYbM7EFrMZCL7EoHM8Psmg2FuhXoo/bH64KqOiZGGwa6oS9TPdSEDKBnV2LuB8+5vQ==}
|
||||
dev: false
|
||||
|
||||
/toidentifier@1.0.1:
|
||||
resolution: {integrity: sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==}
|
||||
engines: {node: '>=0.6'}
|
||||
@@ -1120,6 +1057,11 @@ packages:
|
||||
engines: {node: '>= 0.4.0'}
|
||||
dev: false
|
||||
|
||||
/uuid@11.1.0:
|
||||
resolution: {integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==}
|
||||
hasBin: true
|
||||
dev: false
|
||||
|
||||
/vary@1.1.2:
|
||||
resolution: {integrity: sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==}
|
||||
engines: {node: '>= 0.8'}
|
||||
|
||||
BIN
screenshoots/claude-code.png
Normal file
BIN
screenshoots/claude-code.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 353 KiB |
BIN
screenshoots/contexterror.jpg
Normal file
BIN
screenshoots/contexterror.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 48 KiB |
113
src/cli.ts
Normal file
113
src/cli.ts
Normal file
@@ -0,0 +1,113 @@
|
||||
#!/usr/bin/env node
|
||||
import { run } from "./index";
|
||||
import { closeService } from "./utils/close";
|
||||
import { showStatus } from "./utils/status";
|
||||
import { executeCodeCommand } from "./utils/codeCommand";
|
||||
import { cleanupPidFile, isServiceRunning } from "./utils/processCheck";
|
||||
import { version } from "../package.json";
|
||||
|
||||
const command = process.argv[2];
|
||||
|
||||
const HELP_TEXT = `
|
||||
Usage: claude-code [command]
|
||||
|
||||
Commands:
|
||||
start Start service
|
||||
stop Stop service
|
||||
status Show service status
|
||||
code Execute code command
|
||||
-v, version Show version information
|
||||
-h, help Show help information
|
||||
|
||||
Example:
|
||||
claude-code start
|
||||
claude-code code "Write a Hello World"
|
||||
`;
|
||||
|
||||
async function waitForService(
|
||||
timeout = 10000,
|
||||
initialDelay = 1000
|
||||
): Promise<boolean> {
|
||||
// Wait for an initial period to let the service initialize
|
||||
await new Promise((resolve) => setTimeout(resolve, initialDelay));
|
||||
|
||||
const startTime = Date.now();
|
||||
while (Date.now() - startTime < timeout) {
|
||||
if (isServiceRunning()) {
|
||||
// Wait for an additional short period to ensure service is fully ready
|
||||
await new Promise((resolve) => setTimeout(resolve, 500));
|
||||
return true;
|
||||
}
|
||||
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
import { spawn } from "child_process";
|
||||
import { PID_FILE, REFERENCE_COUNT_FILE } from "./constants";
|
||||
import { existsSync, readFileSync } from "fs";
|
||||
|
||||
async function main() {
|
||||
switch (command) {
|
||||
case "start":
|
||||
run();
|
||||
break;
|
||||
case "stop":
|
||||
try {
|
||||
const pid = parseInt(readFileSync(PID_FILE, "utf-8"));
|
||||
process.kill(pid);
|
||||
cleanupPidFile();
|
||||
if (existsSync(REFERENCE_COUNT_FILE)) {
|
||||
try {
|
||||
require("fs").unlinkSync(REFERENCE_COUNT_FILE);
|
||||
} catch (e) {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
}
|
||||
console.log(
|
||||
"claude code router service has been successfully stopped."
|
||||
);
|
||||
} catch (e) {
|
||||
console.log(
|
||||
"Failed to stop the service. It may have already been stopped."
|
||||
);
|
||||
cleanupPidFile();
|
||||
}
|
||||
break;
|
||||
case "status":
|
||||
showStatus();
|
||||
break;
|
||||
case "code":
|
||||
if (!isServiceRunning()) {
|
||||
console.log("Service not running, starting service...");
|
||||
spawn("ccr", ["start"], {
|
||||
detached: true,
|
||||
stdio: "ignore",
|
||||
}).unref();
|
||||
if (await waitForService()) {
|
||||
executeCodeCommand(process.argv.slice(3));
|
||||
} else {
|
||||
console.error(
|
||||
"Service startup timeout, please manually run claude-code start to start the service"
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
} else {
|
||||
executeCodeCommand(process.argv.slice(3));
|
||||
}
|
||||
break;
|
||||
case "-v":
|
||||
case "version":
|
||||
console.log(`claude-code version: ${version}`);
|
||||
break;
|
||||
case "-h":
|
||||
case "help":
|
||||
console.log(HELP_TEXT);
|
||||
break;
|
||||
default:
|
||||
console.log(HELP_TEXT);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
main().catch(console.error);
|
||||
@@ -7,9 +7,14 @@ export const CONFIG_FILE = `${HOME_DIR}/config.json`;
|
||||
|
||||
export const PLUGINS_DIR = `${HOME_DIR}/plugins`;
|
||||
|
||||
export const PID_FILE = path.join(HOME_DIR, '.claude-code-router.pid');
|
||||
|
||||
export const REFERENCE_COUNT_FILE = '/tmp/claude-code-reference-count.txt';
|
||||
|
||||
|
||||
export const DEFAULT_CONFIG = {
|
||||
log: false,
|
||||
OPENAI_API_KEY: "",
|
||||
OPENAI_BASE_URL: "https://openrouter.ai/api/v1",
|
||||
OPENAI_MODEL: "openai/o3-mini",
|
||||
OPENAI_BASE_URL: "",
|
||||
OPENAI_MODEL: "",
|
||||
};
|
||||
|
||||
134
src/index.ts
134
src/index.ts
@@ -4,8 +4,16 @@ import { getOpenAICommonOptions, initConfig, initDir } from "./utils";
|
||||
import { createServer } from "./server";
|
||||
import { formatRequest } from "./middlewares/formatRequest";
|
||||
import { rewriteBody } from "./middlewares/rewriteBody";
|
||||
import { router } from "./middlewares/router";
|
||||
import OpenAI from "openai";
|
||||
import { streamOpenAIResponse } from "./utils/stream";
|
||||
import {
|
||||
cleanupPidFile,
|
||||
isServiceRunning,
|
||||
savePid,
|
||||
} from "./utils/processCheck";
|
||||
import { LRUCache } from "lru-cache";
|
||||
import { log } from "./utils/log";
|
||||
|
||||
async function initializeClaudeConfig() {
|
||||
const homeDir = process.env.HOME;
|
||||
@@ -20,37 +28,131 @@ async function initializeClaudeConfig() {
|
||||
autoUpdaterStatus: "enabled",
|
||||
userID,
|
||||
hasCompletedOnboarding: true,
|
||||
lastOnboardingVersion: "0.2.9",
|
||||
lastOnboardingVersion: "1.0.17",
|
||||
projects: {},
|
||||
};
|
||||
await writeFile(configPath, JSON.stringify(configContent, null, 2));
|
||||
}
|
||||
}
|
||||
|
||||
async function run() {
|
||||
interface RunOptions {
|
||||
port?: number;
|
||||
}
|
||||
|
||||
interface ModelProvider {
|
||||
name: string;
|
||||
api_base_url: string;
|
||||
api_key: string;
|
||||
models: string[];
|
||||
}
|
||||
|
||||
async function run(options: RunOptions = {}) {
|
||||
// Check if service is already running
|
||||
if (isServiceRunning()) {
|
||||
console.log("✅ Service is already running in the background.");
|
||||
return;
|
||||
}
|
||||
|
||||
await initializeClaudeConfig();
|
||||
await initDir();
|
||||
await initConfig();
|
||||
const server = createServer(3456);
|
||||
server.useMiddleware(formatRequest);
|
||||
server.useMiddleware(rewriteBody);
|
||||
const config = await initConfig();
|
||||
|
||||
const openai = new OpenAI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
baseURL: process.env.OPENAI_BASE_URL,
|
||||
...getOpenAICommonOptions(),
|
||||
const Providers = new Map<string, ModelProvider>();
|
||||
const providerCache = new LRUCache<string, OpenAI>({
|
||||
max: 10,
|
||||
ttl: 2 * 60 * 60 * 1000,
|
||||
});
|
||||
|
||||
function getProviderInstance(providerName: string): OpenAI {
|
||||
const provider: ModelProvider | undefined = Providers.get(providerName);
|
||||
if (provider === undefined) {
|
||||
throw new Error(`Provider ${providerName} not found`);
|
||||
}
|
||||
let openai = providerCache.get(provider.name);
|
||||
if (!openai) {
|
||||
openai = new OpenAI({
|
||||
baseURL: provider.api_base_url,
|
||||
apiKey: provider.api_key,
|
||||
...getOpenAICommonOptions(),
|
||||
});
|
||||
providerCache.set(provider.name, openai);
|
||||
}
|
||||
return openai;
|
||||
}
|
||||
|
||||
if (Array.isArray(config.Providers)) {
|
||||
config.Providers.forEach((provider) => {
|
||||
try {
|
||||
Providers.set(provider.name, provider);
|
||||
} catch (error) {
|
||||
console.error("Failed to parse model provider:", error);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (config.OPENAI_API_KEY && config.OPENAI_BASE_URL && config.OPENAI_MODEL) {
|
||||
const defaultProvider = {
|
||||
name: "default",
|
||||
api_base_url: config.OPENAI_BASE_URL,
|
||||
api_key: config.OPENAI_API_KEY,
|
||||
models: [config.OPENAI_MODEL],
|
||||
};
|
||||
Providers.set("default", defaultProvider);
|
||||
} else if (Providers.size > 0) {
|
||||
const defaultProvider = Providers.values().next().value!;
|
||||
Providers.set("default", defaultProvider);
|
||||
}
|
||||
const port = options.port || 3456;
|
||||
|
||||
// Save the PID of the background process
|
||||
savePid(process.pid);
|
||||
|
||||
// Handle SIGINT (Ctrl+C) to clean up PID file
|
||||
process.on("SIGINT", () => {
|
||||
console.log("Received SIGINT, cleaning up...");
|
||||
cleanupPidFile();
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
// Handle SIGTERM to clean up PID file
|
||||
process.on("SIGTERM", () => {
|
||||
cleanupPidFile();
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
// Use port from environment variable if set (for background process)
|
||||
const servicePort = process.env.SERVICE_PORT
|
||||
? parseInt(process.env.SERVICE_PORT)
|
||||
: port;
|
||||
|
||||
const server = await createServer(servicePort);
|
||||
server.useMiddleware((req, res, next) => {
|
||||
console.log("Middleware triggered for request:", req.body.model);
|
||||
req.config = config;
|
||||
next();
|
||||
});
|
||||
server.useMiddleware(rewriteBody);
|
||||
if (
|
||||
config.Router?.background &&
|
||||
config.Router?.think &&
|
||||
config?.Router?.longContext
|
||||
) {
|
||||
server.useMiddleware(router);
|
||||
}
|
||||
server.useMiddleware(formatRequest);
|
||||
|
||||
server.app.post("/v1/messages", async (req, res) => {
|
||||
try {
|
||||
if (process.env.OPENAI_MODEL) {
|
||||
req.body.model = process.env.OPENAI_MODEL;
|
||||
}
|
||||
const completion: any = await openai.chat.completions.create(req.body);
|
||||
await streamOpenAIResponse(res, completion, req.body.model);
|
||||
const provider = getProviderInstance(req.provider || "default");
|
||||
const completion: any = await provider.chat.completions.create(req.body);
|
||||
await streamOpenAIResponse(res, completion, req.body.model, req.body);
|
||||
} catch (e) {
|
||||
console.error("Error in OpenAI API call:", e);
|
||||
}
|
||||
});
|
||||
server.start();
|
||||
console.log(`🚀 Claude Code Router is running on port ${servicePort}`);
|
||||
}
|
||||
run();
|
||||
|
||||
export { run };
|
||||
// run();
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import { Request, Response, NextFunction } from "express";
|
||||
import { ContentBlockParam } from "@anthropic-ai/sdk/resources";
|
||||
import { MessageCreateParamsBase } from "@anthropic-ai/sdk/resources/messages";
|
||||
import OpenAI from "openai";
|
||||
import { streamOpenAIResponse } from "../utils/stream";
|
||||
import { log } from "../utils/log";
|
||||
|
||||
export const formatRequest = async (
|
||||
req: Request,
|
||||
@@ -17,33 +17,138 @@ export const formatRequest = async (
|
||||
temperature,
|
||||
metadata,
|
||||
tools,
|
||||
stream,
|
||||
}: MessageCreateParamsBase = req.body;
|
||||
log("formatRequest: ", req.body);
|
||||
try {
|
||||
const openAIMessages: OpenAI.Chat.Completions.ChatCompletionMessageParam[] =
|
||||
messages.map((item) => {
|
||||
if (item.content instanceof Array) {
|
||||
return {
|
||||
role: item.role,
|
||||
content: item.content
|
||||
.map((it: ContentBlockParam) => {
|
||||
if (it.type === "text") {
|
||||
return typeof it.text === "string"
|
||||
? it.text
|
||||
: JSON.stringify(it);
|
||||
}
|
||||
return JSON.stringify(it);
|
||||
})
|
||||
.join(""),
|
||||
} as OpenAI.Chat.Completions.ChatCompletionMessageParam;
|
||||
}
|
||||
return {
|
||||
role: item.role,
|
||||
content:
|
||||
typeof item.content === "string"
|
||||
? item.content
|
||||
: JSON.stringify(item.content),
|
||||
};
|
||||
});
|
||||
// @ts-ignore
|
||||
const openAIMessages = Array.isArray(messages)
|
||||
? messages.flatMap((anthropicMessage) => {
|
||||
const openAiMessagesFromThisAnthropicMessage = [];
|
||||
|
||||
if (!Array.isArray(anthropicMessage.content)) {
|
||||
// Handle simple string content
|
||||
if (typeof anthropicMessage.content === "string") {
|
||||
openAiMessagesFromThisAnthropicMessage.push({
|
||||
role: anthropicMessage.role,
|
||||
content: anthropicMessage.content,
|
||||
});
|
||||
}
|
||||
// If content is not string and not array (e.g. null/undefined), it will result in an empty array, effectively skipping this message.
|
||||
return openAiMessagesFromThisAnthropicMessage;
|
||||
}
|
||||
|
||||
// Handle array content
|
||||
if (anthropicMessage.role === "assistant") {
|
||||
const assistantMessage = {
|
||||
role: "assistant",
|
||||
content: null, // Will be populated if text parts exist
|
||||
};
|
||||
let textContent = "";
|
||||
// @ts-ignore
|
||||
const toolCalls = []; // Corrected type here
|
||||
|
||||
anthropicMessage.content.forEach((contentPart) => {
|
||||
if (contentPart.type === "text") {
|
||||
textContent +=
|
||||
(typeof contentPart.text === "string"
|
||||
? contentPart.text
|
||||
: JSON.stringify(contentPart.text)) + "\\n";
|
||||
} else if (contentPart.type === "tool_use") {
|
||||
toolCalls.push({
|
||||
id: contentPart.id,
|
||||
type: "function",
|
||||
function: {
|
||||
name: contentPart.name,
|
||||
arguments: JSON.stringify(contentPart.input),
|
||||
},
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
const trimmedTextContent = textContent.trim();
|
||||
if (trimmedTextContent.length > 0) {
|
||||
// @ts-ignore
|
||||
assistantMessage.content = trimmedTextContent;
|
||||
}
|
||||
if (toolCalls.length > 0) {
|
||||
// @ts-ignore
|
||||
assistantMessage.tool_calls = toolCalls;
|
||||
}
|
||||
// @ts-ignore
|
||||
if (
|
||||
assistantMessage.content ||
|
||||
// @ts-ignore
|
||||
(assistantMessage.tool_calls &&
|
||||
// @ts-ignore
|
||||
assistantMessage.tool_calls.length > 0)
|
||||
) {
|
||||
openAiMessagesFromThisAnthropicMessage.push(assistantMessage);
|
||||
}
|
||||
} else if (anthropicMessage.role === "user") {
|
||||
// For user messages, text parts are combined into one message.
|
||||
// Tool results are transformed into subsequent, separate 'tool' role messages.
|
||||
let userTextMessageContent = "";
|
||||
// @ts-ignore
|
||||
const subsequentToolMessages = [];
|
||||
|
||||
anthropicMessage.content.forEach((contentPart) => {
|
||||
if (contentPart.type === "text") {
|
||||
userTextMessageContent +=
|
||||
(typeof contentPart.text === "string"
|
||||
? contentPart.text
|
||||
: JSON.stringify(contentPart.text)) + "\\n";
|
||||
} else if (contentPart.type === "tool_result") {
|
||||
// Each tool_result becomes a separate 'tool' message
|
||||
subsequentToolMessages.push({
|
||||
role: "tool",
|
||||
tool_call_id: contentPart.tool_use_id,
|
||||
content:
|
||||
typeof contentPart.content === "string"
|
||||
? contentPart.content
|
||||
: JSON.stringify(contentPart.content),
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
const trimmedUserText = userTextMessageContent.trim();
|
||||
if (trimmedUserText.length > 0) {
|
||||
openAiMessagesFromThisAnthropicMessage.push({
|
||||
role: "user",
|
||||
content: trimmedUserText,
|
||||
});
|
||||
}
|
||||
// @ts-ignore
|
||||
openAiMessagesFromThisAnthropicMessage.push(
|
||||
// @ts-ignore
|
||||
...subsequentToolMessages
|
||||
);
|
||||
} else {
|
||||
// Fallback for other roles (e.g. system, or custom roles if they were to appear here with array content)
|
||||
// This will combine all text parts into a single message for that role.
|
||||
let combinedContent = "";
|
||||
anthropicMessage.content.forEach((contentPart) => {
|
||||
if (contentPart.type === "text") {
|
||||
combinedContent +=
|
||||
(typeof contentPart.text === "string"
|
||||
? contentPart.text
|
||||
: JSON.stringify(contentPart.text)) + "\\n";
|
||||
} else {
|
||||
// For non-text parts in other roles, stringify them or handle as appropriate
|
||||
combinedContent += JSON.stringify(contentPart) + "\\n";
|
||||
}
|
||||
});
|
||||
const trimmedCombinedContent = combinedContent.trim();
|
||||
if (trimmedCombinedContent.length > 0) {
|
||||
openAiMessagesFromThisAnthropicMessage.push({
|
||||
role: anthropicMessage.role, // Cast needed as role could be other than 'user'/'assistant'
|
||||
content: trimmedCombinedContent,
|
||||
});
|
||||
}
|
||||
}
|
||||
return openAiMessagesFromThisAnthropicMessage;
|
||||
})
|
||||
: [];
|
||||
const systemMessages: OpenAI.Chat.Completions.ChatCompletionMessageParam[] =
|
||||
Array.isArray(system)
|
||||
? system.map((item) => ({
|
||||
@@ -51,11 +156,11 @@ export const formatRequest = async (
|
||||
content: item.text,
|
||||
}))
|
||||
: [{ role: "system", content: system }];
|
||||
const data: OpenAI.Chat.Completions.ChatCompletionCreateParams = {
|
||||
const data: any = {
|
||||
model,
|
||||
messages: [...systemMessages, ...openAIMessages],
|
||||
temperature,
|
||||
stream: true,
|
||||
stream,
|
||||
};
|
||||
if (tools) {
|
||||
data.tools = tools
|
||||
@@ -69,10 +174,13 @@ export const formatRequest = async (
|
||||
},
|
||||
}));
|
||||
}
|
||||
res.setHeader("Content-Type", "text/event-stream");
|
||||
if (stream) {
|
||||
res.setHeader("Content-Type", "text/event-stream");
|
||||
}
|
||||
res.setHeader("Cache-Control", "no-cache");
|
||||
res.setHeader("Connection", "keep-alive");
|
||||
req.body = data;
|
||||
console.log(JSON.stringify(data.messages, null, 2));
|
||||
} catch (error) {
|
||||
console.error("Error in request processing:", error);
|
||||
const errorCompletion: AsyncIterable<OpenAI.Chat.Completions.ChatCompletionChunk> =
|
||||
@@ -81,7 +189,7 @@ export const formatRequest = async (
|
||||
yield {
|
||||
id: `error_${Date.now()}`,
|
||||
created: Math.floor(Date.now() / 1000),
|
||||
model: "gpt-3.5-turbo",
|
||||
model,
|
||||
object: "chat.completion.chunk",
|
||||
choices: [
|
||||
{
|
||||
@@ -95,7 +203,7 @@ export const formatRequest = async (
|
||||
};
|
||||
},
|
||||
};
|
||||
await streamOpenAIResponse(res, errorCompletion, model);
|
||||
await streamOpenAIResponse(res, errorCompletion, model, req.body);
|
||||
}
|
||||
next();
|
||||
};
|
||||
|
||||
@@ -28,16 +28,18 @@ export const rewriteBody = async (
|
||||
res: Response,
|
||||
next: NextFunction
|
||||
) => {
|
||||
if (!process.env.usePlugin) {
|
||||
if (!req.config.usePlugins) {
|
||||
return next();
|
||||
}
|
||||
const pluginPath = path.join(PLUGINS_DIR, `${process.env.usePlugin}.js`);
|
||||
try {
|
||||
await access(pluginPath);
|
||||
const rewritePlugin = require(pluginPath);
|
||||
rewritePlugin(req, res, next);
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
next();
|
||||
for (const plugin of req.config.usePlugins) {
|
||||
const pluginPath = path.join(PLUGINS_DIR, `${plugin.trim()}.js`);
|
||||
try {
|
||||
await access(pluginPath);
|
||||
const rewritePlugin = require(pluginPath);
|
||||
await rewritePlugin(req, res);
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
}
|
||||
}
|
||||
next();
|
||||
};
|
||||
|
||||
110
src/middlewares/router.ts
Normal file
110
src/middlewares/router.ts
Normal file
@@ -0,0 +1,110 @@
|
||||
import { MessageCreateParamsBase } from "@anthropic-ai/sdk/resources/messages";
|
||||
import { Request, Response, NextFunction } from "express";
|
||||
import { get_encoding } from "tiktoken";
|
||||
import { log } from "../utils/log";
|
||||
|
||||
const enc = get_encoding("cl100k_base");
|
||||
|
||||
const getUseModel = (req: Request, tokenCount: number) => {
|
||||
// if tokenCount is greater than 32K, use the long context model
|
||||
if (tokenCount > 1000 * 32) {
|
||||
log("Using long context model due to token count:", tokenCount);
|
||||
const [provider, model] = req.config.Router!.longContext.split(",");
|
||||
return {
|
||||
provider,
|
||||
model,
|
||||
};
|
||||
}
|
||||
// If the model is claude-3-5-haiku, use the background model
|
||||
if (req.body.model?.startsWith("claude-3-5-haiku")) {
|
||||
log("Using background model for ", req.body.model);
|
||||
const [provider, model] = req.config.Router!.background.split(",");
|
||||
return {
|
||||
provider,
|
||||
model,
|
||||
};
|
||||
}
|
||||
// if exits thinking, use the think model
|
||||
if (req.body.thinking) {
|
||||
log("Using think model for ", req.body.thinking);
|
||||
const [provider, model] = req.config.Router!.think.split(",");
|
||||
return {
|
||||
provider,
|
||||
model,
|
||||
};
|
||||
}
|
||||
const [provider, model] = req.body.model.split(",");
|
||||
if (provider && model) {
|
||||
return {
|
||||
provider,
|
||||
model,
|
||||
};
|
||||
}
|
||||
return {
|
||||
provider: "default",
|
||||
model: req.config.OPENAI_MODEL,
|
||||
};
|
||||
};
|
||||
|
||||
export const router = async (
|
||||
req: Request,
|
||||
res: Response,
|
||||
next: NextFunction
|
||||
) => {
|
||||
const { messages, system = [], tools }: MessageCreateParamsBase = req.body;
|
||||
try {
|
||||
let tokenCount = 0;
|
||||
if (Array.isArray(messages)) {
|
||||
messages.forEach((message) => {
|
||||
if (typeof message.content === "string") {
|
||||
tokenCount += enc.encode(message.content).length;
|
||||
} else if (Array.isArray(message.content)) {
|
||||
message.content.forEach((contentPart) => {
|
||||
if (contentPart.type === "text") {
|
||||
tokenCount += enc.encode(contentPart.text).length;
|
||||
} else if (contentPart.type === "tool_use") {
|
||||
tokenCount += enc.encode(
|
||||
JSON.stringify(contentPart.input)
|
||||
).length;
|
||||
} else if (contentPart.type === "tool_result") {
|
||||
tokenCount += enc.encode(contentPart.content || "").length;
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
if (typeof system === "string") {
|
||||
tokenCount += enc.encode(system).length;
|
||||
} else if (Array.isArray(system)) {
|
||||
system.forEach((item) => {
|
||||
if (item.type !== "text") return;
|
||||
if (typeof item.text === "string") {
|
||||
tokenCount += enc.encode(item.text).length;
|
||||
} else if (Array.isArray(item.text)) {
|
||||
item.text.forEach((textPart) => {
|
||||
tokenCount += enc.encode(textPart || "").length;
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
if (tools) {
|
||||
tools.forEach((tool) => {
|
||||
if (tool.description) {
|
||||
tokenCount += enc.encode(tool.name + tool.description).length;
|
||||
}
|
||||
if (tool.input_schema) {
|
||||
tokenCount += enc.encode(JSON.stringify(tool.input_schema)).length;
|
||||
}
|
||||
});
|
||||
}
|
||||
const { provider, model } = getUseModel(req, tokenCount);
|
||||
req.provider = provider;
|
||||
req.body.model = model;
|
||||
} catch (error) {
|
||||
log("Error in router middleware:", error.message);
|
||||
req.provider = "default";
|
||||
req.body.model = req.config.OPENAI_MODEL;
|
||||
} finally {
|
||||
next();
|
||||
}
|
||||
};
|
||||
@@ -6,7 +6,7 @@ interface Server {
|
||||
start: () => void;
|
||||
}
|
||||
|
||||
export const createServer = (port: number): Server => {
|
||||
export const createServer = async (port: number): Promise<Server> => {
|
||||
const app = express();
|
||||
app.use(express.json({ limit: "500mb" }));
|
||||
return {
|
||||
|
||||
27
src/utils/close.ts
Normal file
27
src/utils/close.ts
Normal file
@@ -0,0 +1,27 @@
|
||||
import { isServiceRunning, cleanupPidFile, getReferenceCount } from './processCheck';
|
||||
import { readFileSync } from 'fs';
|
||||
import { HOME_DIR } from '../constants';
|
||||
import { join } from 'path';
|
||||
|
||||
export async function closeService() {
|
||||
const PID_FILE = join(HOME_DIR, '.claude-code-router.pid');
|
||||
|
||||
if (!isServiceRunning()) {
|
||||
console.log("No service is currently running.");
|
||||
return;
|
||||
}
|
||||
|
||||
if (getReferenceCount() > 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const pid = parseInt(readFileSync(PID_FILE, 'utf-8'));
|
||||
process.kill(pid);
|
||||
cleanupPidFile();
|
||||
console.log("claude code router service has been successfully stopped.");
|
||||
} catch (e) {
|
||||
console.log("Failed to stop the service. It may have already been stopped.");
|
||||
cleanupPidFile();
|
||||
}
|
||||
}
|
||||
42
src/utils/codeCommand.ts
Normal file
42
src/utils/codeCommand.ts
Normal file
@@ -0,0 +1,42 @@
|
||||
import { spawn } from "child_process";
|
||||
import {
|
||||
incrementReferenceCount,
|
||||
decrementReferenceCount,
|
||||
} from "./processCheck";
|
||||
import { closeService } from "./close";
|
||||
|
||||
export async function executeCodeCommand(args: string[] = []) {
|
||||
// Set environment variables
|
||||
const env = {
|
||||
...process.env,
|
||||
DISABLE_PROMPT_CACHING: "1",
|
||||
ANTHROPIC_AUTH_TOKEN: "test",
|
||||
ANTHROPIC_BASE_URL: `http://127.0.0.1:3456`,
|
||||
API_TIMEOUT_MS: "600000",
|
||||
};
|
||||
|
||||
// Increment reference count when command starts
|
||||
incrementReferenceCount();
|
||||
|
||||
// Execute claude command
|
||||
const claudeProcess = spawn("claude", args, {
|
||||
env,
|
||||
stdio: "inherit",
|
||||
shell: true,
|
||||
});
|
||||
|
||||
claudeProcess.on("error", (error) => {
|
||||
console.error("Failed to start claude command:", error.message);
|
||||
console.log(
|
||||
"Make sure Claude Code is installed: npm install -g @anthropic-ai/claude-code"
|
||||
);
|
||||
decrementReferenceCount();
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
claudeProcess.on("close", (code) => {
|
||||
decrementReferenceCount();
|
||||
closeService();
|
||||
process.exit(code || 0);
|
||||
});
|
||||
}
|
||||
@@ -13,6 +13,8 @@ export function getOpenAICommonOptions(): ClientOptions {
|
||||
const options: ClientOptions = {};
|
||||
if (process.env.PROXY_URL) {
|
||||
options.httpAgent = new HttpsProxyAgent(process.env.PROXY_URL);
|
||||
} else if (process.env.HTTPS_PROXY) {
|
||||
options.httpAgent = new HttpsProxyAgent(process.env.HTTPS_PROXY);
|
||||
}
|
||||
return options;
|
||||
}
|
||||
@@ -57,34 +59,28 @@ export const readConfigFile = async () => {
|
||||
const config = await fs.readFile(CONFIG_FILE, "utf-8");
|
||||
return JSON.parse(config);
|
||||
} catch {
|
||||
const useRouter = await confirm(
|
||||
"No config file found. Enable router mode? (Y/n)"
|
||||
);
|
||||
if (!useRouter) {
|
||||
const apiKey = await question("Enter OPENAI_API_KEY: ");
|
||||
const baseUrl = await question("Enter OPENAI_BASE_URL: ");
|
||||
const model = await question("Enter OPENAI_MODEL: ");
|
||||
const config = Object.assign({}, DEFAULT_CONFIG, {
|
||||
OPENAI_API_KEY: apiKey,
|
||||
OPENAI_BASE_URL: baseUrl,
|
||||
OPENAI_MODEL: model,
|
||||
});
|
||||
await writeConfigFile(config);
|
||||
return config;
|
||||
} else {
|
||||
const router = await question("Enter OPENAI_API_KEY: ");
|
||||
return DEFAULT_CONFIG;
|
||||
}
|
||||
const apiKey = await question("Enter OPENAI_API_KEY: ");
|
||||
const baseUrl = await question("Enter OPENAI_BASE_URL: ");
|
||||
const model = await question("Enter OPENAI_MODEL: ");
|
||||
const config = Object.assign({}, DEFAULT_CONFIG, {
|
||||
OPENAI_API_KEY: apiKey,
|
||||
OPENAI_BASE_URL: baseUrl,
|
||||
OPENAI_MODEL: model,
|
||||
});
|
||||
await writeConfigFile(config);
|
||||
return config;
|
||||
}
|
||||
};
|
||||
|
||||
export const writeConfigFile = async (config: any) => {
|
||||
await ensureDir(HOME_DIR);
|
||||
await fs.writeFile(CONFIG_FILE, JSON.stringify(config, null, 2));
|
||||
};
|
||||
|
||||
export const initConfig = async () => {
|
||||
const config = await readConfigFile();
|
||||
Object.assign(process.env, config);
|
||||
return config;
|
||||
};
|
||||
|
||||
export const createClient = (options: ClientOptions) => {
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import fs from 'node:fs';
|
||||
import path from 'node:path';
|
||||
import { HOME_DIR } from '../constants';
|
||||
import fs from "node:fs";
|
||||
import path from "node:path";
|
||||
import { HOME_DIR } from "../constants";
|
||||
|
||||
const LOG_FILE = path.join(HOME_DIR, 'claude-code-router.log');
|
||||
const LOG_FILE = path.join(HOME_DIR, "claude-code-router.log");
|
||||
|
||||
// Ensure log directory exists
|
||||
if (!fs.existsSync(HOME_DIR)) {
|
||||
@@ -11,17 +11,23 @@ if (!fs.existsSync(HOME_DIR)) {
|
||||
|
||||
export function log(...args: any[]) {
|
||||
// Check if logging is enabled via environment variable
|
||||
const isLogEnabled = process.env.LOG === 'true';
|
||||
|
||||
const isLogEnabled = process.env.LOG === "true";
|
||||
|
||||
if (!isLogEnabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
const timestamp = new Date().toISOString();
|
||||
const logMessage = `[${timestamp}] ${args.map(arg =>
|
||||
typeof arg === 'object' ? JSON.stringify(arg) : String(arg)
|
||||
).join(' ')}\n`;
|
||||
const logMessage = `[${timestamp}] ${
|
||||
Array.isArray(args)
|
||||
? args
|
||||
.map((arg) =>
|
||||
typeof arg === "object" ? JSON.stringify(arg) : String(arg)
|
||||
)
|
||||
.join(" ")
|
||||
: ""
|
||||
}\n`;
|
||||
|
||||
// Append to log file
|
||||
fs.appendFileSync(LOG_FILE, logMessage, 'utf8');
|
||||
fs.appendFileSync(LOG_FILE, logMessage, "utf8");
|
||||
}
|
||||
|
||||
85
src/utils/processCheck.ts
Normal file
85
src/utils/processCheck.ts
Normal file
@@ -0,0 +1,85 @@
|
||||
import { existsSync, readFileSync, writeFileSync } from 'fs';
|
||||
import { PID_FILE, REFERENCE_COUNT_FILE } from '../constants';
|
||||
|
||||
export function incrementReferenceCount() {
|
||||
let count = 0;
|
||||
if (existsSync(REFERENCE_COUNT_FILE)) {
|
||||
count = parseInt(readFileSync(REFERENCE_COUNT_FILE, 'utf-8')) || 0;
|
||||
}
|
||||
count++;
|
||||
writeFileSync(REFERENCE_COUNT_FILE, count.toString());
|
||||
}
|
||||
|
||||
export function decrementReferenceCount() {
|
||||
let count = 0;
|
||||
if (existsSync(REFERENCE_COUNT_FILE)) {
|
||||
count = parseInt(readFileSync(REFERENCE_COUNT_FILE, 'utf-8')) || 0;
|
||||
}
|
||||
count = Math.max(0, count - 1);
|
||||
writeFileSync(REFERENCE_COUNT_FILE, count.toString());
|
||||
}
|
||||
|
||||
export function getReferenceCount(): number {
|
||||
if (!existsSync(REFERENCE_COUNT_FILE)) {
|
||||
return 0;
|
||||
}
|
||||
return parseInt(readFileSync(REFERENCE_COUNT_FILE, 'utf-8')) || 0;
|
||||
}
|
||||
|
||||
export function isServiceRunning(): boolean {
|
||||
if (!existsSync(PID_FILE)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
const pid = parseInt(readFileSync(PID_FILE, 'utf-8'));
|
||||
process.kill(pid, 0);
|
||||
return true;
|
||||
} catch (e) {
|
||||
// Process not running, clean up pid file
|
||||
cleanupPidFile();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
export function savePid(pid: number) {
|
||||
writeFileSync(PID_FILE, pid.toString());
|
||||
}
|
||||
|
||||
export function cleanupPidFile() {
|
||||
if (existsSync(PID_FILE)) {
|
||||
try {
|
||||
const fs = require('fs');
|
||||
fs.unlinkSync(PID_FILE);
|
||||
} catch (e) {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export function getServicePid(): number | null {
|
||||
if (!existsSync(PID_FILE)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
const pid = parseInt(readFileSync(PID_FILE, 'utf-8'));
|
||||
return isNaN(pid) ? null : pid;
|
||||
} catch (e) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export function getServiceInfo() {
|
||||
const pid = getServicePid();
|
||||
const running = isServiceRunning();
|
||||
|
||||
return {
|
||||
running,
|
||||
pid,
|
||||
port: 3456,
|
||||
endpoint: 'http://127.0.0.1:3456',
|
||||
pidFile: PID_FILE,
|
||||
referenceCount: getReferenceCount()
|
||||
};
|
||||
}
|
||||
27
src/utils/status.ts
Normal file
27
src/utils/status.ts
Normal file
@@ -0,0 +1,27 @@
|
||||
import { getServiceInfo } from './processCheck';
|
||||
|
||||
export function showStatus() {
|
||||
const info = getServiceInfo();
|
||||
|
||||
console.log('\n📊 Claude Code Router Status');
|
||||
console.log('═'.repeat(40));
|
||||
|
||||
if (info.running) {
|
||||
console.log('✅ Status: Running');
|
||||
console.log(`🆔 Process ID: ${info.pid}`);
|
||||
console.log(`🌐 Port: ${info.port}`);
|
||||
console.log(`📡 API Endpoint: ${info.endpoint}`);
|
||||
console.log(`📄 PID File: ${info.pidFile}`);
|
||||
console.log('');
|
||||
console.log('🚀 Ready to use! Run the following commands:');
|
||||
console.log(' claude-code-router code # Start coding with Claude');
|
||||
console.log(' claude-code-router close # Stop the service');
|
||||
} else {
|
||||
console.log('❌ Status: Not Running');
|
||||
console.log('');
|
||||
console.log('💡 To start the service:');
|
||||
console.log(' claude-code-router start');
|
||||
}
|
||||
|
||||
console.log('');
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
import { Response } from "express";
|
||||
import { OpenAI } from "openai";
|
||||
import { log } from "./log";
|
||||
|
||||
interface ContentBlock {
|
||||
type: string;
|
||||
@@ -42,10 +43,40 @@ interface MessageEvent {
|
||||
|
||||
export async function streamOpenAIResponse(
|
||||
res: Response,
|
||||
completion: AsyncIterable<OpenAI.Chat.Completions.ChatCompletionChunk>,
|
||||
model: string
|
||||
completion: any,
|
||||
model: string,
|
||||
body: any
|
||||
) {
|
||||
const write = (data: string) => {
|
||||
log("response: ", data);
|
||||
res.write(data);
|
||||
};
|
||||
const messageId = "msg_" + Date.now();
|
||||
if (!body.stream) {
|
||||
res.json({
|
||||
id: messageId,
|
||||
type: "message",
|
||||
role: "assistant",
|
||||
// @ts-ignore
|
||||
content: completion.choices[0].message.content || completion.choices[0].message.tool_calls?.map((item) => {
|
||||
return {
|
||||
type: 'tool_use',
|
||||
id: item.id,
|
||||
name: item.function?.name,
|
||||
input: item.function?.arguments ? JSON.parse(item.function.arguments) : {},
|
||||
};
|
||||
}) || '',
|
||||
stop_reason: completion.choices[0].finish_reason === 'tool_calls' ? "tool_use" : "end_turn",
|
||||
stop_sequence: null,
|
||||
usage: {
|
||||
input_tokens: 100,
|
||||
output_tokens: 50,
|
||||
},
|
||||
});
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
|
||||
let contentBlockIndex = 0;
|
||||
let currentContentBlocks: ContentBlock[] = [];
|
||||
|
||||
@@ -63,7 +94,7 @@ export async function streamOpenAIResponse(
|
||||
usage: { input_tokens: 1, output_tokens: 1 },
|
||||
},
|
||||
};
|
||||
res.write(`event: message_start\ndata: ${JSON.stringify(messageStart)}\n\n`);
|
||||
write(`event: message_start\ndata: ${JSON.stringify(messageStart)}\n\n`);
|
||||
|
||||
let isToolUse = false;
|
||||
let toolUseJson = "";
|
||||
@@ -71,6 +102,7 @@ export async function streamOpenAIResponse(
|
||||
|
||||
try {
|
||||
for await (const chunk of completion) {
|
||||
log("Processing chunk:", chunk);
|
||||
const delta = chunk.choices[0].delta;
|
||||
|
||||
if (delta.tool_calls && delta.tool_calls.length > 0) {
|
||||
@@ -94,7 +126,7 @@ export async function streamOpenAIResponse(
|
||||
|
||||
currentContentBlocks.push(toolBlock);
|
||||
|
||||
res.write(
|
||||
write(
|
||||
`event: content_block_start\ndata: ${JSON.stringify(
|
||||
toolBlockStart
|
||||
)}\n\n`
|
||||
@@ -119,23 +151,25 @@ export async function streamOpenAIResponse(
|
||||
const parsedJson = JSON.parse(toolUseJson);
|
||||
currentContentBlocks[contentBlockIndex].input = parsedJson;
|
||||
} catch (e) {
|
||||
log(e);
|
||||
// JSON not yet complete, continue accumulating
|
||||
}
|
||||
|
||||
res.write(
|
||||
write(
|
||||
`event: content_block_delta\ndata: ${JSON.stringify(jsonDelta)}\n\n`
|
||||
);
|
||||
}
|
||||
} else if (delta.content) {
|
||||
// Handle regular text content
|
||||
if (isToolUse) {
|
||||
log("Tool call ended here:", delta);
|
||||
// End previous tool call block
|
||||
const contentBlockStop: MessageEvent = {
|
||||
type: "content_block_stop",
|
||||
index: contentBlockIndex,
|
||||
};
|
||||
|
||||
res.write(
|
||||
write(
|
||||
`event: content_block_stop\ndata: ${JSON.stringify(
|
||||
contentBlockStop
|
||||
)}\n\n`
|
||||
@@ -161,7 +195,7 @@ export async function streamOpenAIResponse(
|
||||
|
||||
currentContentBlocks.push(textBlock);
|
||||
|
||||
res.write(
|
||||
write(
|
||||
`event: content_block_start\ndata: ${JSON.stringify(
|
||||
textBlockStart
|
||||
)}\n\n`
|
||||
@@ -184,7 +218,7 @@ export async function streamOpenAIResponse(
|
||||
currentContentBlocks[contentBlockIndex].text += delta.content;
|
||||
}
|
||||
|
||||
res.write(
|
||||
write(
|
||||
`event: content_block_delta\ndata: ${JSON.stringify(
|
||||
contentDelta
|
||||
)}\n\n`
|
||||
@@ -207,7 +241,7 @@ export async function streamOpenAIResponse(
|
||||
|
||||
currentContentBlocks.push(textBlock);
|
||||
|
||||
res.write(
|
||||
write(
|
||||
`event: content_block_start\ndata: ${JSON.stringify(
|
||||
textBlockStart
|
||||
)}\n\n`
|
||||
@@ -230,7 +264,7 @@ export async function streamOpenAIResponse(
|
||||
currentContentBlocks[contentBlockIndex].text += JSON.stringify(e);
|
||||
}
|
||||
|
||||
res.write(
|
||||
write(
|
||||
`event: content_block_delta\ndata: ${JSON.stringify(contentDelta)}\n\n`
|
||||
);
|
||||
}
|
||||
@@ -241,7 +275,7 @@ export async function streamOpenAIResponse(
|
||||
index: contentBlockIndex,
|
||||
};
|
||||
|
||||
res.write(
|
||||
write(
|
||||
`event: content_block_stop\ndata: ${JSON.stringify(contentBlockStop)}\n\n`
|
||||
);
|
||||
|
||||
@@ -255,14 +289,17 @@ export async function streamOpenAIResponse(
|
||||
},
|
||||
usage: { input_tokens: 100, output_tokens: 150 },
|
||||
};
|
||||
if (!isToolUse) {
|
||||
log("body: ", body, "messageDelta: ", messageDelta);
|
||||
}
|
||||
|
||||
res.write(`event: message_delta\ndata: ${JSON.stringify(messageDelta)}\n\n`);
|
||||
write(`event: message_delta\ndata: ${JSON.stringify(messageDelta)}\n\n`);
|
||||
|
||||
// Send message_stop event
|
||||
const messageStop: MessageEvent = {
|
||||
type: "message_stop",
|
||||
};
|
||||
|
||||
res.write(`event: message_stop\ndata: ${JSON.stringify(messageStop)}\n\n`);
|
||||
write(`event: message_stop\ndata: ${JSON.stringify(messageStop)}\n\n`);
|
||||
res.end();
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user