Implements Claude Code as a new AI provider that uses the Claude Code CLI without requiring API keys. This enables users to leverage Claude models through their local Claude Code installation. Key changes: - Add complete AI SDK v1 implementation for Claude Code provider - Custom SDK with streaming/non-streaming support - Session management for conversation continuity - JSON extraction for object generation mode - Support for advanced settings (maxTurns, allowedTools, etc.) - Integrate Claude Code into Task Master's provider system - Update ai-services-unified.js to handle keyless authentication - Add provider to supported-models.json with opus/sonnet models - Ensure correct maxTokens values are applied (opus: 32000, sonnet: 64000) - Fix maxTokens configuration issue - Add max_tokens property to getAvailableModels() output - Update setModel() to properly handle claude-code models - Create update-config-tokens.js utility for init process - Add comprehensive documentation - User guide with configuration examples - Advanced settings explanation and future integration options The implementation maintains full backward compatibility with existing providers while adding seamless Claude Code support to all Task Master commands.
631 lines
12 KiB
JSON
631 lines
12 KiB
JSON
{
|
|
"bedrock": [
|
|
{
|
|
"id": "us.anthropic.claude-3-7-sonnet-20250219-v1:0",
|
|
"swe_score": 0.623,
|
|
"cost_per_1m_tokens": { "input": 3, "output": 15 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 65536
|
|
},
|
|
{
|
|
"id": "us.deepseek.r1-v1:0",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 1.35, "output": 5.4 },
|
|
"allowed_roles": ["research"],
|
|
"max_tokens": 65536
|
|
}
|
|
],
|
|
"anthropic": [
|
|
{
|
|
"id": "claude-sonnet-4-20250514",
|
|
"swe_score": 0.727,
|
|
"cost_per_1m_tokens": {
|
|
"input": 3.0,
|
|
"output": 15.0
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 64000
|
|
},
|
|
{
|
|
"id": "claude-opus-4-20250514",
|
|
"swe_score": 0.725,
|
|
"cost_per_1m_tokens": {
|
|
"input": 15.0,
|
|
"output": 75.0
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 32000
|
|
},
|
|
{
|
|
"id": "claude-3-7-sonnet-20250219",
|
|
"swe_score": 0.623,
|
|
"cost_per_1m_tokens": {
|
|
"input": 3.0,
|
|
"output": 15.0
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 120000
|
|
},
|
|
{
|
|
"id": "claude-3-5-sonnet-20241022",
|
|
"swe_score": 0.49,
|
|
"cost_per_1m_tokens": {
|
|
"input": 3.0,
|
|
"output": 15.0
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 64000
|
|
}
|
|
],
|
|
"openai": [
|
|
{
|
|
"id": "gpt-4o",
|
|
"swe_score": 0.332,
|
|
"cost_per_1m_tokens": {
|
|
"input": 2.5,
|
|
"output": 10.0
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 16384
|
|
},
|
|
{
|
|
"id": "o1",
|
|
"swe_score": 0.489,
|
|
"cost_per_1m_tokens": {
|
|
"input": 15.0,
|
|
"output": 60.0
|
|
},
|
|
"allowed_roles": ["main"]
|
|
},
|
|
{
|
|
"id": "o3",
|
|
"swe_score": 0.5,
|
|
"cost_per_1m_tokens": {
|
|
"input": 2.0,
|
|
"output": 8.0
|
|
},
|
|
"allowed_roles": ["main", "fallback"]
|
|
},
|
|
{
|
|
"id": "o3-mini",
|
|
"swe_score": 0.493,
|
|
"cost_per_1m_tokens": {
|
|
"input": 1.1,
|
|
"output": 4.4
|
|
},
|
|
"allowed_roles": ["main"],
|
|
"max_tokens": 100000
|
|
},
|
|
{
|
|
"id": "o4-mini",
|
|
"swe_score": 0.45,
|
|
"cost_per_1m_tokens": {
|
|
"input": 1.1,
|
|
"output": 4.4
|
|
},
|
|
"allowed_roles": ["main", "fallback"]
|
|
},
|
|
{
|
|
"id": "o1-mini",
|
|
"swe_score": 0.4,
|
|
"cost_per_1m_tokens": {
|
|
"input": 1.1,
|
|
"output": 4.4
|
|
},
|
|
"allowed_roles": ["main"]
|
|
},
|
|
{
|
|
"id": "o1-pro",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 150.0,
|
|
"output": 600.0
|
|
},
|
|
"allowed_roles": ["main"]
|
|
},
|
|
{
|
|
"id": "gpt-4-5-preview",
|
|
"swe_score": 0.38,
|
|
"cost_per_1m_tokens": {
|
|
"input": 75.0,
|
|
"output": 150.0
|
|
},
|
|
"allowed_roles": ["main"]
|
|
},
|
|
{
|
|
"id": "gpt-4-1-mini",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0.4,
|
|
"output": 1.6
|
|
},
|
|
"allowed_roles": ["main"]
|
|
},
|
|
{
|
|
"id": "gpt-4-1-nano",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0.1,
|
|
"output": 0.4
|
|
},
|
|
"allowed_roles": ["main"]
|
|
},
|
|
{
|
|
"id": "gpt-4o-mini",
|
|
"swe_score": 0.3,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0.15,
|
|
"output": 0.6
|
|
},
|
|
"allowed_roles": ["main"]
|
|
},
|
|
{
|
|
"id": "gpt-4o-search-preview",
|
|
"swe_score": 0.33,
|
|
"cost_per_1m_tokens": {
|
|
"input": 2.5,
|
|
"output": 10.0
|
|
},
|
|
"allowed_roles": ["research"]
|
|
},
|
|
{
|
|
"id": "gpt-4o-mini-search-preview",
|
|
"swe_score": 0.3,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0.15,
|
|
"output": 0.6
|
|
},
|
|
"allowed_roles": ["research"]
|
|
}
|
|
],
|
|
"google": [
|
|
{
|
|
"id": "gemini-2.5-pro-preview-05-06",
|
|
"swe_score": 0.638,
|
|
"cost_per_1m_tokens": null,
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 1048000
|
|
},
|
|
{
|
|
"id": "gemini-2.5-pro-preview-03-25",
|
|
"swe_score": 0.638,
|
|
"cost_per_1m_tokens": null,
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 1048000
|
|
},
|
|
{
|
|
"id": "gemini-2.5-flash-preview-04-17",
|
|
"swe_score": 0.604,
|
|
"cost_per_1m_tokens": null,
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 1048000
|
|
},
|
|
{
|
|
"id": "gemini-2.0-flash",
|
|
"swe_score": 0.518,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0.15,
|
|
"output": 0.6
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 1048000
|
|
},
|
|
{
|
|
"id": "gemini-2.0-flash-lite",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": null,
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 1048000
|
|
}
|
|
],
|
|
"perplexity": [
|
|
{
|
|
"id": "sonar-pro",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 3,
|
|
"output": 15
|
|
},
|
|
"allowed_roles": ["main", "research"],
|
|
"max_tokens": 8700
|
|
},
|
|
{
|
|
"id": "sonar",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 1,
|
|
"output": 1
|
|
},
|
|
"allowed_roles": ["research"],
|
|
"max_tokens": 8700
|
|
},
|
|
{
|
|
"id": "deep-research",
|
|
"swe_score": 0.211,
|
|
"cost_per_1m_tokens": {
|
|
"input": 2,
|
|
"output": 8
|
|
},
|
|
"allowed_roles": ["research"],
|
|
"max_tokens": 8700
|
|
},
|
|
{
|
|
"id": "sonar-reasoning-pro",
|
|
"swe_score": 0.211,
|
|
"cost_per_1m_tokens": {
|
|
"input": 2,
|
|
"output": 8
|
|
},
|
|
"allowed_roles": ["main", "research", "fallback"],
|
|
"max_tokens": 8700
|
|
},
|
|
{
|
|
"id": "sonar-reasoning",
|
|
"swe_score": 0.211,
|
|
"cost_per_1m_tokens": {
|
|
"input": 1,
|
|
"output": 5
|
|
},
|
|
"allowed_roles": ["main", "research", "fallback"],
|
|
"max_tokens": 8700
|
|
}
|
|
],
|
|
"xai": [
|
|
{
|
|
"id": "grok-3",
|
|
"name": "Grok 3",
|
|
"swe_score": null,
|
|
"cost_per_1m_tokens": {
|
|
"input": 3,
|
|
"output": 15
|
|
},
|
|
"allowed_roles": ["main", "fallback", "research"],
|
|
"max_tokens": 131072
|
|
},
|
|
{
|
|
"id": "grok-3-fast",
|
|
"name": "Grok 3 Fast",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 5,
|
|
"output": 25
|
|
},
|
|
"allowed_roles": ["main", "fallback", "research"],
|
|
"max_tokens": 131072
|
|
}
|
|
],
|
|
"ollama": [
|
|
{
|
|
"id": "devstral:latest",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0,
|
|
"output": 0
|
|
},
|
|
"allowed_roles": ["main", "fallback"]
|
|
},
|
|
{
|
|
"id": "qwen3:latest",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0,
|
|
"output": 0
|
|
},
|
|
"allowed_roles": ["main", "fallback"]
|
|
},
|
|
{
|
|
"id": "qwen3:14b",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0,
|
|
"output": 0
|
|
},
|
|
"allowed_roles": ["main", "fallback"]
|
|
},
|
|
{
|
|
"id": "qwen3:32b",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0,
|
|
"output": 0
|
|
},
|
|
"allowed_roles": ["main", "fallback"]
|
|
},
|
|
{
|
|
"id": "mistral-small3.1:latest",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0,
|
|
"output": 0
|
|
},
|
|
"allowed_roles": ["main", "fallback"]
|
|
},
|
|
{
|
|
"id": "llama3.3:latest",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0,
|
|
"output": 0
|
|
},
|
|
"allowed_roles": ["main", "fallback"]
|
|
},
|
|
{
|
|
"id": "phi4:latest",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0,
|
|
"output": 0
|
|
},
|
|
"allowed_roles": ["main", "fallback"]
|
|
}
|
|
],
|
|
"openrouter": [
|
|
{
|
|
"id": "google/gemini-2.5-flash-preview-05-20",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0.15,
|
|
"output": 0.6
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 1048576
|
|
},
|
|
{
|
|
"id": "google/gemini-2.5-flash-preview-05-20:thinking",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0.15,
|
|
"output": 3.5
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 1048576
|
|
},
|
|
{
|
|
"id": "google/gemini-2.5-pro-exp-03-25",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0,
|
|
"output": 0
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 1000000
|
|
},
|
|
{
|
|
"id": "deepseek/deepseek-chat-v3-0324:free",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0,
|
|
"output": 0
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 163840
|
|
},
|
|
{
|
|
"id": "deepseek/deepseek-chat-v3-0324",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0.27,
|
|
"output": 1.1
|
|
},
|
|
"allowed_roles": ["main"],
|
|
"max_tokens": 64000
|
|
},
|
|
{
|
|
"id": "openai/gpt-4.1",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 2,
|
|
"output": 8
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 1000000
|
|
},
|
|
{
|
|
"id": "openai/gpt-4.1-mini",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0.4,
|
|
"output": 1.6
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 1000000
|
|
},
|
|
{
|
|
"id": "openai/gpt-4.1-nano",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0.1,
|
|
"output": 0.4
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 1000000
|
|
},
|
|
{
|
|
"id": "openai/o3",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 10,
|
|
"output": 40
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 200000
|
|
},
|
|
{
|
|
"id": "openai/codex-mini",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 1.5,
|
|
"output": 6
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 100000
|
|
},
|
|
{
|
|
"id": "openai/gpt-4o-mini",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0.15,
|
|
"output": 0.6
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 100000
|
|
},
|
|
{
|
|
"id": "openai/o4-mini",
|
|
"swe_score": 0.45,
|
|
"cost_per_1m_tokens": {
|
|
"input": 1.1,
|
|
"output": 4.4
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 100000
|
|
},
|
|
{
|
|
"id": "openai/o4-mini-high",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 1.1,
|
|
"output": 4.4
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 100000
|
|
},
|
|
{
|
|
"id": "openai/o1-pro",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 150,
|
|
"output": 600
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 100000
|
|
},
|
|
{
|
|
"id": "meta-llama/llama-3.3-70b-instruct",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 120,
|
|
"output": 600
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 1048576
|
|
},
|
|
{
|
|
"id": "meta-llama/llama-4-maverick",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0.18,
|
|
"output": 0.6
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 1000000
|
|
},
|
|
{
|
|
"id": "meta-llama/llama-4-scout",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0.08,
|
|
"output": 0.3
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 1000000
|
|
},
|
|
{
|
|
"id": "qwen/qwen-max",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 1.6,
|
|
"output": 6.4
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 32768
|
|
},
|
|
{
|
|
"id": "qwen/qwen-turbo",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0.05,
|
|
"output": 0.2
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 1000000
|
|
},
|
|
{
|
|
"id": "qwen/qwen3-235b-a22b",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0.14,
|
|
"output": 2
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 24000
|
|
},
|
|
{
|
|
"id": "mistralai/mistral-small-3.1-24b-instruct:free",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0,
|
|
"output": 0
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 96000
|
|
},
|
|
{
|
|
"id": "mistralai/mistral-small-3.1-24b-instruct",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0.1,
|
|
"output": 0.3
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 128000
|
|
},
|
|
{
|
|
"id": "mistralai/devstral-small",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0.1,
|
|
"output": 0.3
|
|
},
|
|
"allowed_roles": ["main"],
|
|
"max_tokens": 110000
|
|
},
|
|
{
|
|
"id": "mistralai/mistral-nemo",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0.03,
|
|
"output": 0.07
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 100000
|
|
},
|
|
{
|
|
"id": "thudm/glm-4-32b:free",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": {
|
|
"input": 0,
|
|
"output": 0
|
|
},
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 32768
|
|
}
|
|
],
|
|
"claude-code": [
|
|
{
|
|
"id": "opus",
|
|
"swe_score": 0.725,
|
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
|
"allowed_roles": ["main", "fallback", "research"],
|
|
"max_tokens": 32000
|
|
},
|
|
{
|
|
"id": "sonnet",
|
|
"swe_score": 0.727,
|
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
|
"allowed_roles": ["main", "fallback", "research"],
|
|
"max_tokens": 64000
|
|
}
|
|
]
|
|
}
|