This commit introduces several improvements and refactorings across MCP tools, core logic, and configuration.
**Major Changes:**
1. **Refactor updateSubtaskById:**
- Switched from generateTextService to generateObjectService for structured AI responses, using a Zod schema (subtaskSchema) for validation.
- Revised prompts to have the AI generate relevant content based on user request and context (parent/sibling tasks), while explicitly preventing AI from handling timestamp/tag formatting.
- Implemented **local timestamp generation (new Date().toISOString()) and formatting** (using <info added on ...> tags) within the function *after* receiving the AI response. This ensures reliable and correctly formatted details are appended.
- Corrected logic to append only the locally formatted, AI-generated content block to the existing subtask.details.
2. **Consolidate MCP Utilities:**
- Moved/consolidated the withNormalizedProjectRoot HOF into mcp-server/src/tools/utils.js.
- Updated MCP tools (like update-subtask.js) to import withNormalizedProjectRoot from the new location.
3. **Refactor Project Initialization:**
- Deleted the redundant mcp-server/src/core/direct-functions/initialize-project-direct.js file.
- Updated mcp-server/src/core/task-master-core.js to import initializeProjectDirect from its correct location (./direct-functions/initialize-project.js).
**Other Changes:**
- Updated .taskmasterconfig fallback model to claude-3-7-sonnet-20250219.
- Clarified model cost representation in the models tool description (taskmaster.mdc and mcp-server/src/tools/models.js).
410 lines
9.9 KiB
JSON
410 lines
9.9 KiB
JSON
{
|
|
"anthropic": [
|
|
{
|
|
"id": "claude-3-7-sonnet-20250219",
|
|
"swe_score": 0.623,
|
|
"cost_per_1m_tokens": { "input": 3.0, "output": 15.0 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 120000
|
|
},
|
|
{
|
|
"id": "claude-3-5-sonnet-20241022",
|
|
"swe_score": 0.49,
|
|
"cost_per_1m_tokens": { "input": 3.0, "output": 15.0 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 64000
|
|
}
|
|
],
|
|
"openai": [
|
|
{
|
|
"id": "gpt-4o",
|
|
"swe_score": 0.332,
|
|
"cost_per_1m_tokens": { "input": 2.5, "output": 10.0 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 16384
|
|
},
|
|
{
|
|
"id": "o1",
|
|
"swe_score": 0.489,
|
|
"cost_per_1m_tokens": { "input": 15.0, "output": 60.0 },
|
|
"allowed_roles": ["main"]
|
|
},
|
|
{
|
|
"id": "o3",
|
|
"swe_score": 0.5,
|
|
"cost_per_1m_tokens": { "input": 10.0, "output": 40.0 },
|
|
"allowed_roles": ["main", "fallback"]
|
|
},
|
|
{
|
|
"id": "o3-mini",
|
|
"swe_score": 0.493,
|
|
"cost_per_1m_tokens": { "input": 1.1, "output": 4.4 },
|
|
"allowed_roles": ["main"],
|
|
"max_tokens": 100000
|
|
},
|
|
{
|
|
"id": "o4-mini",
|
|
"swe_score": 0.45,
|
|
"cost_per_1m_tokens": { "input": 1.1, "output": 4.4 },
|
|
"allowed_roles": ["main", "fallback"]
|
|
},
|
|
{
|
|
"id": "o1-mini",
|
|
"swe_score": 0.4,
|
|
"cost_per_1m_tokens": { "input": 1.1, "output": 4.4 },
|
|
"allowed_roles": ["main"]
|
|
},
|
|
{
|
|
"id": "o1-pro",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 150.0, "output": 600.0 },
|
|
"allowed_roles": ["main"]
|
|
},
|
|
{
|
|
"id": "gpt-4-5-preview",
|
|
"swe_score": 0.38,
|
|
"cost_per_1m_tokens": { "input": 75.0, "output": 150.0 },
|
|
"allowed_roles": ["main"]
|
|
},
|
|
{
|
|
"id": "gpt-4-1-mini",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 0.4, "output": 1.6 },
|
|
"allowed_roles": ["main"]
|
|
},
|
|
{
|
|
"id": "gpt-4-1-nano",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 0.1, "output": 0.4 },
|
|
"allowed_roles": ["main"]
|
|
},
|
|
{
|
|
"id": "gpt-4o-mini",
|
|
"swe_score": 0.3,
|
|
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
|
|
"allowed_roles": ["main"]
|
|
},
|
|
{
|
|
"id": "gpt-4o-search-preview",
|
|
"swe_score": 0.33,
|
|
"cost_per_1m_tokens": { "input": 2.5, "output": 10.0 },
|
|
"allowed_roles": ["research"]
|
|
},
|
|
{
|
|
"id": "gpt-4o-mini-search-preview",
|
|
"swe_score": 0.3,
|
|
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
|
|
"allowed_roles": ["research"]
|
|
}
|
|
],
|
|
"google": [
|
|
{
|
|
"id": "gemini-2.5-pro-exp-03-25",
|
|
"swe_score": 0.638,
|
|
"cost_per_1m_tokens": null,
|
|
"allowed_roles": ["main", "fallback"]
|
|
},
|
|
{
|
|
"id": "gemini-2.5-flash-preview-04-17",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": null,
|
|
"allowed_roles": ["main", "fallback"]
|
|
},
|
|
{
|
|
"id": "gemini-2.0-flash",
|
|
"swe_score": 0.754,
|
|
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
|
|
"allowed_roles": ["main", "fallback"]
|
|
},
|
|
{
|
|
"id": "gemini-2.0-flash-thinking-experimental",
|
|
"swe_score": 0.754,
|
|
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
|
|
"allowed_roles": ["main", "fallback"]
|
|
},
|
|
{
|
|
"id": "gemini-2.0-pro",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": null,
|
|
"allowed_roles": ["main", "fallback"]
|
|
}
|
|
],
|
|
"perplexity": [
|
|
{
|
|
"id": "sonar-pro",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 3, "output": 15 },
|
|
"allowed_roles": ["research"],
|
|
"max_tokens": 8700
|
|
},
|
|
{
|
|
"id": "sonar",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 1, "output": 1 },
|
|
"allowed_roles": ["research"],
|
|
"max_tokens": 8700
|
|
},
|
|
{
|
|
"id": "deep-research",
|
|
"swe_score": 0.211,
|
|
"cost_per_1m_tokens": { "input": 2, "output": 8 },
|
|
"allowed_roles": ["research"],
|
|
"max_tokens": 8700
|
|
},
|
|
{
|
|
"id": "sonar-reasoning-pro",
|
|
"swe_score": 0.211,
|
|
"cost_per_1m_tokens": { "input": 2, "output": 8 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 8700
|
|
},
|
|
{
|
|
"id": "sonar-reasoning",
|
|
"swe_score": 0.211,
|
|
"cost_per_1m_tokens": { "input": 1, "output": 5 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 8700
|
|
}
|
|
],
|
|
"xai": [
|
|
{
|
|
"id": "grok-3",
|
|
"name": "Grok 3",
|
|
"swe_score": null,
|
|
"cost_per_1m_tokens": { "input": 3, "output": 15 },
|
|
"allowed_roles": ["main", "fallback", "research"],
|
|
"max_tokens": 131072
|
|
},
|
|
{
|
|
"id": "grok-3-fast",
|
|
"name": "Grok 3 Fast",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 5, "output": 25 },
|
|
"allowed_roles": ["main", "fallback", "research"],
|
|
"max_tokens": 131072
|
|
}
|
|
],
|
|
"ollama": [
|
|
{
|
|
"id": "gemma3:27b",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
|
"allowed_roles": ["main", "fallback"]
|
|
},
|
|
{
|
|
"id": "gemma3:12b",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
|
"allowed_roles": ["main", "fallback"]
|
|
},
|
|
{
|
|
"id": "qwq",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
|
"allowed_roles": ["main", "fallback"]
|
|
},
|
|
{
|
|
"id": "deepseek-r1",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
|
"allowed_roles": ["main", "fallback"]
|
|
},
|
|
{
|
|
"id": "mistral-small3.1",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
|
"allowed_roles": ["main", "fallback"]
|
|
},
|
|
{
|
|
"id": "llama3.3",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
|
"allowed_roles": ["main", "fallback"]
|
|
},
|
|
{
|
|
"id": "phi4",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
|
"allowed_roles": ["main", "fallback"]
|
|
}
|
|
],
|
|
"openrouter": [
|
|
{
|
|
"id": "google/gemini-2.0-flash-001",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 0.1, "output": 0.4 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 1048576
|
|
},
|
|
{
|
|
"id": "google/gemini-2.5-pro-exp-03-25",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 1000000
|
|
},
|
|
{
|
|
"id": "deepseek/deepseek-chat-v3-0324:free",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 163840
|
|
},
|
|
{
|
|
"id": "deepseek/deepseek-chat-v3-0324",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 0.27, "output": 1.1 },
|
|
"allowed_roles": ["main"],
|
|
"max_tokens": 64000
|
|
},
|
|
{
|
|
"id": "deepseek/deepseek-r1:free",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 163840
|
|
},
|
|
|
|
{
|
|
"id": "microsoft/mai-ds-r1:free",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 163840
|
|
},
|
|
{
|
|
"id": "google/gemini-2.5-pro-preview-03-25",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 1.25, "output": 10 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 65535
|
|
},
|
|
{
|
|
"id": "google/gemini-2.5-flash-preview",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
|
|
"allowed_roles": ["main"],
|
|
"max_tokens": 65535
|
|
},
|
|
{
|
|
"id": "google/gemini-2.5-flash-preview:thinking",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 0.15, "output": 3.5 },
|
|
"allowed_roles": ["main"],
|
|
"max_tokens": 65535
|
|
},
|
|
{
|
|
"id": "openai/o3",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 10, "output": 40 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 200000
|
|
},
|
|
{
|
|
"id": "openai/o4-mini",
|
|
"swe_score": 0.45,
|
|
"cost_per_1m_tokens": { "input": 1.1, "output": 4.4 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 100000
|
|
},
|
|
{
|
|
"id": "openai/o4-mini-high",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 1.1, "output": 4.4 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 100000
|
|
},
|
|
{
|
|
"id": "openai/o1-pro",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 150, "output": 600 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 100000
|
|
},
|
|
{
|
|
"id": "meta-llama/llama-3.3-70b-instruct",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 120, "output": 600 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 1048576
|
|
},
|
|
{
|
|
"id": "google/gemma-3-12b-it:free",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 131072
|
|
},
|
|
{
|
|
"id": "google/gemma-3-12b-it",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 50, "output": 100 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 131072
|
|
},
|
|
{
|
|
"id": "google/gemma-3-27b-it:free",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 96000
|
|
},
|
|
{
|
|
"id": "google/gemma-3-27b-it",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 100, "output": 200 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 131072
|
|
},
|
|
{
|
|
"id": "qwen/qwq-32b:free",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 40000
|
|
},
|
|
{
|
|
"id": "qwen/qwq-32b",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 150, "output": 200 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 131072
|
|
},
|
|
{
|
|
"id": "qwen/qwen-max",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 1.6, "output": 6.4 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 32768
|
|
},
|
|
{
|
|
"id": "qwen/qwen-turbo",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 0.05, "output": 0.2 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 1000000
|
|
},
|
|
{
|
|
"id": "mistralai/mistral-small-3.1-24b-instruct:free",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 96000
|
|
},
|
|
{
|
|
"id": "mistralai/mistral-small-3.1-24b-instruct",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 0.1, "output": 0.3 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 128000
|
|
},
|
|
{
|
|
"id": "thudm/glm-4-32b:free",
|
|
"swe_score": 0,
|
|
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
|
"allowed_roles": ["main", "fallback"],
|
|
"max_tokens": 32768
|
|
}
|
|
]
|
|
}
|