Compare commits
27 Commits
bedrock_fi
...
feature/co
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
181012b5ba | ||
|
|
409a97195a | ||
|
|
ee37e4bbbd | ||
|
|
6816882c5b | ||
|
|
1c8b1b405a | ||
|
|
cd197ba1b5 | ||
|
|
5359a33dca | ||
|
|
3e1b8b957e | ||
|
|
91b9f11c03 | ||
|
|
030694bb96 | ||
|
|
3e0f696c49 | ||
|
|
4b0c9d9af6 | ||
|
|
3fa91f56e5 | ||
|
|
e69ac5d5cf | ||
|
|
c60c9354a4 | ||
|
|
30b895be2c | ||
|
|
9995075093 | ||
|
|
b62cb1bbe7 | ||
|
|
7defcba465 | ||
|
|
3e838ed34b | ||
|
|
1b8c320c57 | ||
|
|
5da5b59bde | ||
|
|
04f44a2d3d | ||
|
|
36fe838fd5 | ||
|
|
415b1835d4 | ||
|
|
78112277b3 | ||
|
|
2bb4260966 |
12
.changeset/bright-llamas-enter.md
Normal file
12
.changeset/bright-llamas-enter.md
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
Fix expand command preserving tagged task structure and preventing data corruption
|
||||
|
||||
- Enhance E2E tests with comprehensive tag-aware expand testing to verify tag corruption fix
|
||||
- Add new test section for feature-expand tag creation and testing during expand operations
|
||||
- Verify tag preservation during expand, force expand, and expand --all operations
|
||||
- Test that master tag remains intact while feature-expand tag receives subtasks correctly
|
||||
- Fix file path references to use correct .taskmaster/config.json and .taskmaster/tasks/tasks.json locations
|
||||
- All tag corruption verification tests pass successfully, confirming the expand command tag corruption bug fix works as expected
|
||||
8
.changeset/huge-moose-prove.md
Normal file
8
.changeset/huge-moose-prove.md
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
Can now configure baseURL of provider with `<PROVIDER>_BASE_URL`
|
||||
|
||||
- For example:
|
||||
- `OPENAI_BASE_URL`
|
||||
5
.changeset/modern-cats-pick.md
Normal file
5
.changeset/modern-cats-pick.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
Improves Amazon Bedrock support
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
Fix contextGatherer bug when adding a task `Cannot read properties of undefined (reading 'forEach')`
|
||||
5
.changeset/shy-groups-fly.md
Normal file
5
.changeset/shy-groups-fly.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
Add better support for python projects by adding `pyproject.toml` as a projectRoot marker
|
||||
5
.changeset/tiny-dogs-change.md
Normal file
5
.changeset/tiny-dogs-change.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
Improve mcp keys check in cursor
|
||||
22
.changeset/wet-berries-dress.md
Normal file
22
.changeset/wet-berries-dress.md
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
Add Claude Code provider support
|
||||
|
||||
Introduces a new provider that enables using Claude models (Opus and Sonnet) through the Claude Code CLI without requiring an API key.
|
||||
|
||||
Key features:
|
||||
- New claude-code provider with support for opus and sonnet models
|
||||
- No API key required - uses local Claude Code CLI installation
|
||||
- Optional dependency - won't affect users who don't need Claude Code
|
||||
- Lazy loading ensures the provider only loads when requested
|
||||
- Full integration with existing Task Master commands and workflows
|
||||
- Comprehensive test coverage for reliability
|
||||
- New --claude-code flag for the models command
|
||||
|
||||
Users can now configure Claude Code models with:
|
||||
task-master models --set-main sonnet --claude-code
|
||||
task-master models --set-research opus --claude-code
|
||||
|
||||
The @anthropic-ai/claude-code package is optional and won't be installed unless explicitly needed.
|
||||
@@ -1,5 +1,11 @@
|
||||
# task-master-ai
|
||||
|
||||
## 0.17.1
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- [#789](https://github.com/eyaltoledano/claude-task-master/pull/789) [`8cde6c2`](https://github.com/eyaltoledano/claude-task-master/commit/8cde6c27087f401d085fe267091ae75334309d96) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix contextGatherer bug when adding a task `Cannot read properties of undefined (reading 'forEach')`
|
||||
|
||||
## 0.17.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
22
README.md
22
README.md
@@ -47,8 +47,9 @@ At least one (1) of the following is required:
|
||||
- Perplexity API key (for research model)
|
||||
- xAI API Key (for research or main model)
|
||||
- OpenRouter API Key (for research or main model)
|
||||
- Claude Code (no API key required - requires Claude Code CLI)
|
||||
|
||||
Using the research model is optional but highly recommended. You will need at least ONE API key. Adding all API keys enables you to seamlessly switch between model providers at will.
|
||||
Using the research model is optional but highly recommended. You will need at least ONE API key (unless using Claude Code). Adding all API keys enables you to seamlessly switch between model providers at will.
|
||||
|
||||
## Quick Start
|
||||
|
||||
@@ -93,6 +94,8 @@ MCP (Model Control Protocol) lets you run Task Master directly from your editor.
|
||||
|
||||
> 🔑 Replace `YOUR_…_KEY_HERE` with your real API keys. You can remove keys you don't use.
|
||||
|
||||
> **Note**: If you see `0 tools enabled` in the MCP settings, try removing the `--package=task-master-ai` flag from `args`.
|
||||
|
||||
###### VS Code (`servers` + `type`)
|
||||
|
||||
```json
|
||||
@@ -131,7 +134,12 @@ In your editor's AI chat pane, say:
|
||||
Change the main, research and fallback models to <model_name>, <model_name> and <model_name> respectively.
|
||||
```
|
||||
|
||||
[Table of available models](docs/models.md)
|
||||
For example, to use Claude Code (no API key required):
|
||||
```txt
|
||||
Change the main model to claude-code/sonnet
|
||||
```
|
||||
|
||||
[Table of available models](docs/models.md) | [Claude Code setup](docs/examples/claude-code-usage.md)
|
||||
|
||||
#### 4. Initialize Task Master
|
||||
|
||||
@@ -224,6 +232,16 @@ task-master generate
|
||||
task-master rules add windsurf,roo,vscode
|
||||
```
|
||||
|
||||
## Claude Code Support
|
||||
|
||||
Task Master now supports Claude models through the Claude Code CLI, which requires no API key:
|
||||
|
||||
- **Models**: `claude-code/opus` and `claude-code/sonnet`
|
||||
- **Requirements**: Claude Code CLI installed
|
||||
- **Benefits**: No API key needed, uses your local Claude instance
|
||||
|
||||
[Learn more about Claude Code setup](docs/examples/claude-code-usage.md)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### If `task-master init` doesn't respond
|
||||
|
||||
@@ -72,6 +72,7 @@ Taskmaster uses two primary methods for configuration:
|
||||
- `XAI_API_KEY`: Your X-AI API key.
|
||||
- **Optional Endpoint Overrides:**
|
||||
- **Per-role `baseURL` in `.taskmasterconfig`:** You can add a `baseURL` property to any model role (`main`, `research`, `fallback`) to override the default API endpoint for that provider. If omitted, the provider's standard endpoint is used.
|
||||
- **Environment Variable Overrides (`<PROVIDER>_BASE_URL`):** For greater flexibility, especially with third-party services, you can set an environment variable like `OPENAI_BASE_URL` or `MISTRAL_BASE_URL`. This will override any `baseURL` set in the configuration file for that provider. This is the recommended way to connect to OpenAI-compatible APIs.
|
||||
- `AZURE_OPENAI_ENDPOINT`: Required if using Azure OpenAI key (can also be set as `baseURL` for the Azure model role).
|
||||
- `OLLAMA_BASE_URL`: Override the default Ollama API URL (Default: `http://localhost:11434/api`).
|
||||
- `VERTEX_PROJECT_ID`: Your Google Cloud project ID for Vertex AI. Required when using the 'vertex' provider.
|
||||
@@ -131,13 +132,14 @@ PERPLEXITY_API_KEY=pplx-your-key-here
|
||||
# etc.
|
||||
|
||||
# Optional Endpoint Overrides
|
||||
# Use a specific provider's base URL, e.g., for an OpenAI-compatible API
|
||||
# OPENAI_BASE_URL=https://api.third-party.com/v1
|
||||
#
|
||||
# AZURE_OPENAI_ENDPOINT=https://your-azure-endpoint.openai.azure.com/
|
||||
# OLLAMA_BASE_URL=http://custom-ollama-host:11434/api
|
||||
|
||||
# Google Vertex AI Configuration (Required if using 'vertex' provider)
|
||||
# VERTEX_PROJECT_ID=your-gcp-project-id
|
||||
# VERTEX_LOCATION=us-central1
|
||||
# GOOGLE_APPLICATION_CREDENTIALS=/path/to/service-account-credentials.json
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
169
docs/examples/claude-code-usage.md
Normal file
169
docs/examples/claude-code-usage.md
Normal file
@@ -0,0 +1,169 @@
|
||||
# Claude Code Provider Usage Example
|
||||
|
||||
The Claude Code provider allows you to use Claude models through the Claude Code CLI without requiring an API key.
|
||||
|
||||
## Configuration
|
||||
|
||||
To use the Claude Code provider, update your `.taskmaster/config.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"models": {
|
||||
"main": {
|
||||
"provider": "claude-code",
|
||||
"modelId": "sonnet",
|
||||
"maxTokens": 64000,
|
||||
"temperature": 0.2
|
||||
},
|
||||
"research": {
|
||||
"provider": "claude-code",
|
||||
"modelId": "opus",
|
||||
"maxTokens": 32000,
|
||||
"temperature": 0.1
|
||||
},
|
||||
"fallback": {
|
||||
"provider": "claude-code",
|
||||
"modelId": "sonnet",
|
||||
"maxTokens": 64000,
|
||||
"temperature": 0.2
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Available Models
|
||||
|
||||
- `opus` - Claude Opus model (SWE score: 0.725)
|
||||
- `sonnet` - Claude Sonnet model (SWE score: 0.727)
|
||||
|
||||
## Usage
|
||||
|
||||
Once configured, you can use Claude Code with all Task Master commands:
|
||||
|
||||
```bash
|
||||
# Generate tasks from a PRD
|
||||
task-master parse-prd --input=prd.txt
|
||||
|
||||
# Analyze project complexity
|
||||
task-master analyze-complexity
|
||||
|
||||
# Show the next task to work on
|
||||
task-master next
|
||||
|
||||
# View a specific task
|
||||
task-master show task-001
|
||||
|
||||
# Update task status
|
||||
task-master set-status --id=task-001 --status=in-progress
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
1. Claude Code CLI must be installed and authenticated on your system
|
||||
2. Install the optional `@anthropic-ai/claude-code` package if you enable this provider:
|
||||
```bash
|
||||
npm install @anthropic-ai/claude-code
|
||||
```
|
||||
3. No API key is required in your environment variables or MCP configuration
|
||||
|
||||
## Advanced Settings
|
||||
|
||||
The Claude Code SDK supports additional settings that provide fine-grained control over Claude's behavior. While these settings are implemented in the underlying SDK (`src/ai-providers/custom-sdk/claude-code/`), they are not currently exposed through Task Master's standard API due to architectural constraints.
|
||||
|
||||
### Supported Settings
|
||||
|
||||
```javascript
|
||||
const settings = {
|
||||
// Maximum conversation turns Claude can make in a single request
|
||||
maxTurns: 5,
|
||||
|
||||
// Custom system prompt to override Claude Code's default behavior
|
||||
customSystemPrompt: "You are a helpful assistant focused on code quality",
|
||||
|
||||
// Permission mode for file system operations
|
||||
permissionMode: 'default', // Options: 'default', 'restricted', 'permissive'
|
||||
|
||||
// Explicitly allow only certain tools
|
||||
allowedTools: ['Read', 'LS'], // Claude can only read files and list directories
|
||||
|
||||
// Explicitly disallow certain tools
|
||||
disallowedTools: ['Write', 'Edit'], // Prevent Claude from modifying files
|
||||
|
||||
// MCP servers for additional tool integrations
|
||||
mcpServers: []
|
||||
};
|
||||
```
|
||||
|
||||
### Current Limitations
|
||||
|
||||
Task Master uses a standardized `BaseAIProvider` interface that only passes through common parameters (modelId, messages, maxTokens, temperature) to maintain consistency across all providers. The Claude Code advanced settings are implemented in the SDK but not accessible through Task Master's high-level commands.
|
||||
|
||||
### Future Integration Options
|
||||
|
||||
For developers who need to use these advanced settings, there are three potential approaches:
|
||||
|
||||
#### Option 1: Extend BaseAIProvider
|
||||
Modify the core Task Master architecture to support provider-specific settings:
|
||||
|
||||
```javascript
|
||||
// In BaseAIProvider
|
||||
const result = await generateText({
|
||||
model: client(params.modelId),
|
||||
messages: params.messages,
|
||||
maxTokens: params.maxTokens,
|
||||
temperature: params.temperature,
|
||||
...params.providerSettings // New: pass through provider-specific settings
|
||||
});
|
||||
```
|
||||
|
||||
#### Option 2: Override Methods in ClaudeCodeProvider
|
||||
Create custom implementations that extract and use Claude-specific settings:
|
||||
|
||||
```javascript
|
||||
// In ClaudeCodeProvider
|
||||
async generateText(params) {
|
||||
const { maxTurns, allowedTools, disallowedTools, ...baseParams } = params;
|
||||
|
||||
const client = this.getClient({
|
||||
...baseParams,
|
||||
settings: { maxTurns, allowedTools, disallowedTools }
|
||||
});
|
||||
|
||||
// Continue with generation...
|
||||
}
|
||||
```
|
||||
|
||||
#### Option 3: Direct SDK Usage
|
||||
For immediate access to advanced features, developers can use the Claude Code SDK directly:
|
||||
|
||||
```javascript
|
||||
import { createClaudeCode } from 'task-master-ai/ai-providers/custom-sdk/claude-code';
|
||||
|
||||
const claude = createClaudeCode({
|
||||
defaultSettings: {
|
||||
maxTurns: 5,
|
||||
allowedTools: ['Read', 'LS'],
|
||||
disallowedTools: ['Write', 'Edit']
|
||||
}
|
||||
});
|
||||
|
||||
const model = claude('sonnet');
|
||||
const result = await generateText({
|
||||
model,
|
||||
messages: [{ role: 'user', content: 'Analyze this code...' }]
|
||||
});
|
||||
```
|
||||
|
||||
### Why These Settings Matter
|
||||
|
||||
- **maxTurns**: Useful for complex refactoring tasks that require multiple iterations
|
||||
- **customSystemPrompt**: Allows specializing Claude for specific domains or coding standards
|
||||
- **permissionMode**: Critical for security in production environments
|
||||
- **allowedTools/disallowedTools**: Enable read-only analysis modes or restrict access to sensitive operations
|
||||
- **mcpServers**: Future extensibility for custom tool integrations
|
||||
|
||||
## Notes
|
||||
|
||||
- The Claude Code provider doesn't track usage costs (shown as 0 in telemetry)
|
||||
- Session management is handled automatically for conversation continuity
|
||||
- Some AI SDK parameters (temperature, maxTokens) are not supported by Claude Code CLI and will be ignored
|
||||
245
docs/models.md
245
docs/models.md
@@ -2,127 +2,136 @@
|
||||
|
||||
## Main Models
|
||||
|
||||
| Provider | Model Name | SWE Score | Input Cost | Output Cost |
|
||||
| ---------- | ---------------------------------------------- | --------- | ---------- | ----------- |
|
||||
| anthropic | claude-sonnet-4-20250514 | 0.727 | 3 | 15 |
|
||||
| anthropic | claude-opus-4-20250514 | 0.725 | 15 | 75 |
|
||||
| anthropic | claude-3-7-sonnet-20250219 | 0.623 | 3 | 15 |
|
||||
| anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 |
|
||||
| openai | gpt-4o | 0.332 | 2.5 | 10 |
|
||||
| openai | o1 | 0.489 | 15 | 60 |
|
||||
| openai | o3 | 0.5 | 2 | 8 |
|
||||
| openai | o3-mini | 0.493 | 1.1 | 4.4 |
|
||||
| openai | o4-mini | 0.45 | 1.1 | 4.4 |
|
||||
| openai | o1-mini | 0.4 | 1.1 | 4.4 |
|
||||
| openai | o1-pro | — | 150 | 600 |
|
||||
| openai | gpt-4-5-preview | 0.38 | 75 | 150 |
|
||||
| openai | gpt-4-1-mini | — | 0.4 | 1.6 |
|
||||
| openai | gpt-4-1-nano | — | 0.1 | 0.4 |
|
||||
| openai | gpt-4o-mini | 0.3 | 0.15 | 0.6 |
|
||||
| google | gemini-2.5-pro-preview-05-06 | 0.638 | — | — |
|
||||
| google | gemini-2.5-pro-preview-03-25 | 0.638 | — | — |
|
||||
| google | gemini-2.5-flash-preview-04-17 | 0.604 | — | — |
|
||||
| google | gemini-2.0-flash | 0.518 | 0.15 | 0.6 |
|
||||
| google | gemini-2.0-flash-lite | — | — | — |
|
||||
| perplexity | sonar-pro | — | 3 | 15 |
|
||||
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
|
||||
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
|
||||
| xai | grok-3 | — | 3 | 15 |
|
||||
| xai | grok-3-fast | — | 5 | 25 |
|
||||
| ollama | devstral:latest | — | 0 | 0 |
|
||||
| ollama | qwen3:latest | — | 0 | 0 |
|
||||
| ollama | qwen3:14b | — | 0 | 0 |
|
||||
| ollama | qwen3:32b | — | 0 | 0 |
|
||||
| ollama | mistral-small3.1:latest | — | 0 | 0 |
|
||||
| ollama | llama3.3:latest | — | 0 | 0 |
|
||||
| ollama | phi4:latest | — | 0 | 0 |
|
||||
| openrouter | google/gemini-2.5-flash-preview-05-20 | — | 0.15 | 0.6 |
|
||||
| openrouter | google/gemini-2.5-flash-preview-05-20:thinking | — | 0.15 | 3.5 |
|
||||
| openrouter | google/gemini-2.5-pro-exp-03-25 | — | 0 | 0 |
|
||||
| openrouter | deepseek/deepseek-chat-v3-0324:free | — | 0 | 0 |
|
||||
| openrouter | deepseek/deepseek-chat-v3-0324 | — | 0.27 | 1.1 |
|
||||
| openrouter | openai/gpt-4.1 | — | 2 | 8 |
|
||||
| openrouter | openai/gpt-4.1-mini | — | 0.4 | 1.6 |
|
||||
| openrouter | openai/gpt-4.1-nano | — | 0.1 | 0.4 |
|
||||
| openrouter | openai/o3 | — | 10 | 40 |
|
||||
| openrouter | openai/codex-mini | — | 1.5 | 6 |
|
||||
| openrouter | openai/gpt-4o-mini | — | 0.15 | 0.6 |
|
||||
| openrouter | openai/o4-mini | 0.45 | 1.1 | 4.4 |
|
||||
| openrouter | openai/o4-mini-high | — | 1.1 | 4.4 |
|
||||
| openrouter | openai/o1-pro | — | 150 | 600 |
|
||||
| openrouter | meta-llama/llama-3.3-70b-instruct | — | 120 | 600 |
|
||||
| openrouter | meta-llama/llama-4-maverick | — | 0.18 | 0.6 |
|
||||
| openrouter | meta-llama/llama-4-scout | — | 0.08 | 0.3 |
|
||||
| openrouter | qwen/qwen-max | — | 1.6 | 6.4 |
|
||||
| openrouter | qwen/qwen-turbo | — | 0.05 | 0.2 |
|
||||
| openrouter | qwen/qwen3-235b-a22b | — | 0.14 | 2 |
|
||||
| openrouter | mistralai/mistral-small-3.1-24b-instruct:free | — | 0 | 0 |
|
||||
| openrouter | mistralai/mistral-small-3.1-24b-instruct | — | 0.1 | 0.3 |
|
||||
| openrouter | mistralai/devstral-small | — | 0.1 | 0.3 |
|
||||
| openrouter | mistralai/mistral-nemo | — | 0.03 | 0.07 |
|
||||
| openrouter | thudm/glm-4-32b:free | — | 0 | 0 |
|
||||
| Provider | Model Name | SWE Score | Input Cost | Output Cost |
|
||||
| ----------- | ---------------------------------------------- | --------- | ---------- | ----------- |
|
||||
| bedrock | us.anthropic.claude-3-7-sonnet-20250219-v1:0 | 0.623 | 3 | 15 |
|
||||
| anthropic | claude-sonnet-4-20250514 | 0.727 | 3 | 15 |
|
||||
| anthropic | claude-opus-4-20250514 | 0.725 | 15 | 75 |
|
||||
| anthropic | claude-3-7-sonnet-20250219 | 0.623 | 3 | 15 |
|
||||
| anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 |
|
||||
| openai | gpt-4o | 0.332 | 2.5 | 10 |
|
||||
| openai | o1 | 0.489 | 15 | 60 |
|
||||
| openai | o3 | 0.5 | 2 | 8 |
|
||||
| openai | o3-mini | 0.493 | 1.1 | 4.4 |
|
||||
| openai | o4-mini | 0.45 | 1.1 | 4.4 |
|
||||
| openai | o1-mini | 0.4 | 1.1 | 4.4 |
|
||||
| openai | o1-pro | — | 150 | 600 |
|
||||
| openai | gpt-4-5-preview | 0.38 | 75 | 150 |
|
||||
| openai | gpt-4-1-mini | — | 0.4 | 1.6 |
|
||||
| openai | gpt-4-1-nano | — | 0.1 | 0.4 |
|
||||
| openai | gpt-4o-mini | 0.3 | 0.15 | 0.6 |
|
||||
| google | gemini-2.5-pro-preview-05-06 | 0.638 | — | — |
|
||||
| google | gemini-2.5-pro-preview-03-25 | 0.638 | — | — |
|
||||
| google | gemini-2.5-flash-preview-04-17 | 0.604 | — | — |
|
||||
| google | gemini-2.0-flash | 0.518 | 0.15 | 0.6 |
|
||||
| google | gemini-2.0-flash-lite | — | — | — |
|
||||
| perplexity | sonar-pro | — | 3 | 15 |
|
||||
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
|
||||
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
|
||||
| xai | grok-3 | — | 3 | 15 |
|
||||
| xai | grok-3-fast | — | 5 | 25 |
|
||||
| ollama | devstral:latest | — | 0 | 0 |
|
||||
| ollama | qwen3:latest | — | 0 | 0 |
|
||||
| ollama | qwen3:14b | — | 0 | 0 |
|
||||
| ollama | qwen3:32b | — | 0 | 0 |
|
||||
| ollama | mistral-small3.1:latest | — | 0 | 0 |
|
||||
| ollama | llama3.3:latest | — | 0 | 0 |
|
||||
| ollama | phi4:latest | — | 0 | 0 |
|
||||
| openrouter | google/gemini-2.5-flash-preview-05-20 | — | 0.15 | 0.6 |
|
||||
| openrouter | google/gemini-2.5-flash-preview-05-20:thinking | — | 0.15 | 3.5 |
|
||||
| openrouter | google/gemini-2.5-pro-exp-03-25 | — | 0 | 0 |
|
||||
| openrouter | deepseek/deepseek-chat-v3-0324:free | — | 0 | 0 |
|
||||
| openrouter | deepseek/deepseek-chat-v3-0324 | — | 0.27 | 1.1 |
|
||||
| openrouter | openai/gpt-4.1 | — | 2 | 8 |
|
||||
| openrouter | openai/gpt-4.1-mini | — | 0.4 | 1.6 |
|
||||
| openrouter | openai/gpt-4.1-nano | — | 0.1 | 0.4 |
|
||||
| openrouter | openai/o3 | — | 10 | 40 |
|
||||
| openrouter | openai/codex-mini | — | 1.5 | 6 |
|
||||
| openrouter | openai/gpt-4o-mini | — | 0.15 | 0.6 |
|
||||
| openrouter | openai/o4-mini | 0.45 | 1.1 | 4.4 |
|
||||
| openrouter | openai/o4-mini-high | — | 1.1 | 4.4 |
|
||||
| openrouter | openai/o1-pro | — | 150 | 600 |
|
||||
| openrouter | meta-llama/llama-3.3-70b-instruct | — | 120 | 600 |
|
||||
| openrouter | meta-llama/llama-4-maverick | — | 0.18 | 0.6 |
|
||||
| openrouter | meta-llama/llama-4-scout | — | 0.08 | 0.3 |
|
||||
| openrouter | qwen/qwen-max | — | 1.6 | 6.4 |
|
||||
| openrouter | qwen/qwen-turbo | — | 0.05 | 0.2 |
|
||||
| openrouter | qwen/qwen3-235b-a22b | — | 0.14 | 2 |
|
||||
| openrouter | mistralai/mistral-small-3.1-24b-instruct:free | — | 0 | 0 |
|
||||
| openrouter | mistralai/mistral-small-3.1-24b-instruct | — | 0.1 | 0.3 |
|
||||
| openrouter | mistralai/devstral-small | — | 0.1 | 0.3 |
|
||||
| openrouter | mistralai/mistral-nemo | — | 0.03 | 0.07 |
|
||||
| openrouter | thudm/glm-4-32b:free | — | 0 | 0 |
|
||||
| claude-code | opus | 0.725 | 0 | 0 |
|
||||
| claude-code | sonnet | 0.727 | 0 | 0 |
|
||||
|
||||
## Research Models
|
||||
|
||||
| Provider | Model Name | SWE Score | Input Cost | Output Cost |
|
||||
| ---------- | -------------------------- | --------- | ---------- | ----------- |
|
||||
| openai | gpt-4o-search-preview | 0.33 | 2.5 | 10 |
|
||||
| openai | gpt-4o-mini-search-preview | 0.3 | 0.15 | 0.6 |
|
||||
| perplexity | sonar-pro | — | 3 | 15 |
|
||||
| perplexity | sonar | — | 1 | 1 |
|
||||
| perplexity | deep-research | 0.211 | 2 | 8 |
|
||||
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
|
||||
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
|
||||
| xai | grok-3 | — | 3 | 15 |
|
||||
| xai | grok-3-fast | — | 5 | 25 |
|
||||
| Provider | Model Name | SWE Score | Input Cost | Output Cost |
|
||||
| ----------- | -------------------------- | --------- | ---------- | ----------- |
|
||||
| bedrock | us.deepseek.r1-v1:0 | — | 1.35 | 5.4 |
|
||||
| openai | gpt-4o-search-preview | 0.33 | 2.5 | 10 |
|
||||
| openai | gpt-4o-mini-search-preview | 0.3 | 0.15 | 0.6 |
|
||||
| perplexity | sonar-pro | — | 3 | 15 |
|
||||
| perplexity | sonar | — | 1 | 1 |
|
||||
| perplexity | deep-research | 0.211 | 2 | 8 |
|
||||
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
|
||||
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
|
||||
| xai | grok-3 | — | 3 | 15 |
|
||||
| xai | grok-3-fast | — | 5 | 25 |
|
||||
| claude-code | opus | 0.725 | 0 | 0 |
|
||||
| claude-code | sonnet | 0.727 | 0 | 0 |
|
||||
|
||||
## Fallback Models
|
||||
|
||||
| Provider | Model Name | SWE Score | Input Cost | Output Cost |
|
||||
| ---------- | ---------------------------------------------- | --------- | ---------- | ----------- |
|
||||
| anthropic | claude-sonnet-4-20250514 | 0.727 | 3 | 15 |
|
||||
| anthropic | claude-opus-4-20250514 | 0.725 | 15 | 75 |
|
||||
| anthropic | claude-3-7-sonnet-20250219 | 0.623 | 3 | 15 |
|
||||
| anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 |
|
||||
| openai | gpt-4o | 0.332 | 2.5 | 10 |
|
||||
| openai | o3 | 0.5 | 2 | 8 |
|
||||
| openai | o4-mini | 0.45 | 1.1 | 4.4 |
|
||||
| google | gemini-2.5-pro-preview-05-06 | 0.638 | — | — |
|
||||
| google | gemini-2.5-pro-preview-03-25 | 0.638 | — | — |
|
||||
| google | gemini-2.5-flash-preview-04-17 | 0.604 | — | — |
|
||||
| google | gemini-2.0-flash | 0.518 | 0.15 | 0.6 |
|
||||
| google | gemini-2.0-flash-lite | — | — | — |
|
||||
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
|
||||
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
|
||||
| xai | grok-3 | — | 3 | 15 |
|
||||
| xai | grok-3-fast | — | 5 | 25 |
|
||||
| ollama | devstral:latest | — | 0 | 0 |
|
||||
| ollama | qwen3:latest | — | 0 | 0 |
|
||||
| ollama | qwen3:14b | — | 0 | 0 |
|
||||
| ollama | qwen3:32b | — | 0 | 0 |
|
||||
| ollama | mistral-small3.1:latest | — | 0 | 0 |
|
||||
| ollama | llama3.3:latest | — | 0 | 0 |
|
||||
| ollama | phi4:latest | — | 0 | 0 |
|
||||
| openrouter | google/gemini-2.5-flash-preview-05-20 | — | 0.15 | 0.6 |
|
||||
| openrouter | google/gemini-2.5-flash-preview-05-20:thinking | — | 0.15 | 3.5 |
|
||||
| openrouter | google/gemini-2.5-pro-exp-03-25 | — | 0 | 0 |
|
||||
| openrouter | deepseek/deepseek-chat-v3-0324:free | — | 0 | 0 |
|
||||
| openrouter | openai/gpt-4.1 | — | 2 | 8 |
|
||||
| openrouter | openai/gpt-4.1-mini | — | 0.4 | 1.6 |
|
||||
| openrouter | openai/gpt-4.1-nano | — | 0.1 | 0.4 |
|
||||
| openrouter | openai/o3 | — | 10 | 40 |
|
||||
| openrouter | openai/codex-mini | — | 1.5 | 6 |
|
||||
| openrouter | openai/gpt-4o-mini | — | 0.15 | 0.6 |
|
||||
| openrouter | openai/o4-mini | 0.45 | 1.1 | 4.4 |
|
||||
| openrouter | openai/o4-mini-high | — | 1.1 | 4.4 |
|
||||
| openrouter | openai/o1-pro | — | 150 | 600 |
|
||||
| openrouter | meta-llama/llama-3.3-70b-instruct | — | 120 | 600 |
|
||||
| openrouter | meta-llama/llama-4-maverick | — | 0.18 | 0.6 |
|
||||
| openrouter | meta-llama/llama-4-scout | — | 0.08 | 0.3 |
|
||||
| openrouter | qwen/qwen-max | — | 1.6 | 6.4 |
|
||||
| openrouter | qwen/qwen-turbo | — | 0.05 | 0.2 |
|
||||
| openrouter | qwen/qwen3-235b-a22b | — | 0.14 | 2 |
|
||||
| openrouter | mistralai/mistral-small-3.1-24b-instruct:free | — | 0 | 0 |
|
||||
| openrouter | mistralai/mistral-small-3.1-24b-instruct | — | 0.1 | 0.3 |
|
||||
| openrouter | mistralai/mistral-nemo | — | 0.03 | 0.07 |
|
||||
| openrouter | thudm/glm-4-32b:free | — | 0 | 0 |
|
||||
| Provider | Model Name | SWE Score | Input Cost | Output Cost |
|
||||
| ----------- | ---------------------------------------------- | --------- | ---------- | ----------- |
|
||||
| bedrock | us.anthropic.claude-3-7-sonnet-20250219-v1:0 | 0.623 | 3 | 15 |
|
||||
| anthropic | claude-sonnet-4-20250514 | 0.727 | 3 | 15 |
|
||||
| anthropic | claude-opus-4-20250514 | 0.725 | 15 | 75 |
|
||||
| anthropic | claude-3-7-sonnet-20250219 | 0.623 | 3 | 15 |
|
||||
| anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 |
|
||||
| openai | gpt-4o | 0.332 | 2.5 | 10 |
|
||||
| openai | o3 | 0.5 | 2 | 8 |
|
||||
| openai | o4-mini | 0.45 | 1.1 | 4.4 |
|
||||
| google | gemini-2.5-pro-preview-05-06 | 0.638 | — | — |
|
||||
| google | gemini-2.5-pro-preview-03-25 | 0.638 | — | — |
|
||||
| google | gemini-2.5-flash-preview-04-17 | 0.604 | — | — |
|
||||
| google | gemini-2.0-flash | 0.518 | 0.15 | 0.6 |
|
||||
| google | gemini-2.0-flash-lite | — | — | — |
|
||||
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
|
||||
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
|
||||
| xai | grok-3 | — | 3 | 15 |
|
||||
| xai | grok-3-fast | — | 5 | 25 |
|
||||
| ollama | devstral:latest | — | 0 | 0 |
|
||||
| ollama | qwen3:latest | — | 0 | 0 |
|
||||
| ollama | qwen3:14b | — | 0 | 0 |
|
||||
| ollama | qwen3:32b | — | 0 | 0 |
|
||||
| ollama | mistral-small3.1:latest | — | 0 | 0 |
|
||||
| ollama | llama3.3:latest | — | 0 | 0 |
|
||||
| ollama | phi4:latest | — | 0 | 0 |
|
||||
| openrouter | google/gemini-2.5-flash-preview-05-20 | — | 0.15 | 0.6 |
|
||||
| openrouter | google/gemini-2.5-flash-preview-05-20:thinking | — | 0.15 | 3.5 |
|
||||
| openrouter | google/gemini-2.5-pro-exp-03-25 | — | 0 | 0 |
|
||||
| openrouter | deepseek/deepseek-chat-v3-0324:free | — | 0 | 0 |
|
||||
| openrouter | openai/gpt-4.1 | — | 2 | 8 |
|
||||
| openrouter | openai/gpt-4.1-mini | — | 0.4 | 1.6 |
|
||||
| openrouter | openai/gpt-4.1-nano | — | 0.1 | 0.4 |
|
||||
| openrouter | openai/o3 | — | 10 | 40 |
|
||||
| openrouter | openai/codex-mini | — | 1.5 | 6 |
|
||||
| openrouter | openai/gpt-4o-mini | — | 0.15 | 0.6 |
|
||||
| openrouter | openai/o4-mini | 0.45 | 1.1 | 4.4 |
|
||||
| openrouter | openai/o4-mini-high | — | 1.1 | 4.4 |
|
||||
| openrouter | openai/o1-pro | — | 150 | 600 |
|
||||
| openrouter | meta-llama/llama-3.3-70b-instruct | — | 120 | 600 |
|
||||
| openrouter | meta-llama/llama-4-maverick | — | 0.18 | 0.6 |
|
||||
| openrouter | meta-llama/llama-4-scout | — | 0.08 | 0.3 |
|
||||
| openrouter | qwen/qwen-max | — | 1.6 | 6.4 |
|
||||
| openrouter | qwen/qwen-turbo | — | 0.05 | 0.2 |
|
||||
| openrouter | qwen/qwen3-235b-a22b | — | 0.14 | 2 |
|
||||
| openrouter | mistralai/mistral-small-3.1-24b-instruct:free | — | 0 | 0 |
|
||||
| openrouter | mistralai/mistral-small-3.1-24b-instruct | — | 0.1 | 0.3 |
|
||||
| openrouter | mistralai/mistral-nemo | — | 0.03 | 0.07 |
|
||||
| openrouter | thudm/glm-4-32b:free | — | 0 | 0 |
|
||||
| claude-code | opus | 0.725 | 0 | 0 |
|
||||
| claude-code | sonnet | 0.727 | 0 | 0 |
|
||||
|
||||
@@ -26,6 +26,7 @@ import { createLogWrapper } from '../../tools/utils.js';
|
||||
* @param {string} [args.prompt] - Additional context to guide subtask generation.
|
||||
* @param {boolean} [args.force] - Force expansion even if subtasks exist.
|
||||
* @param {string} [args.projectRoot] - Project root directory.
|
||||
* @param {string} [args.tag] - Tag for the task
|
||||
* @param {Object} log - Logger object
|
||||
* @param {Object} context - Context object containing session
|
||||
* @param {Object} [context.session] - MCP Session object
|
||||
@@ -34,7 +35,8 @@ import { createLogWrapper } from '../../tools/utils.js';
|
||||
export async function expandTaskDirect(args, log, context = {}) {
|
||||
const { session } = context; // Extract session
|
||||
// Destructure expected args, including projectRoot
|
||||
const { tasksJsonPath, id, num, research, prompt, force, projectRoot } = args;
|
||||
const { tasksJsonPath, id, num, research, prompt, force, projectRoot, tag } =
|
||||
args;
|
||||
|
||||
// Log session root data for debugging
|
||||
log.info(
|
||||
@@ -194,7 +196,8 @@ export async function expandTaskDirect(args, log, context = {}) {
|
||||
session,
|
||||
projectRoot,
|
||||
commandName: 'expand-task',
|
||||
outputType: 'mcp'
|
||||
outputType: 'mcp',
|
||||
tag
|
||||
},
|
||||
forceFlag
|
||||
);
|
||||
|
||||
@@ -13,6 +13,41 @@ import {
|
||||
disableSilentMode
|
||||
} from '../../../../scripts/modules/utils.js';
|
||||
import { createLogWrapper } from '../../tools/utils.js';
|
||||
import { CUSTOM_PROVIDERS_ARRAY } from '../../../../src/constants/providers.js';
|
||||
|
||||
// Define supported roles for model setting
|
||||
const MODEL_ROLES = ['main', 'research', 'fallback'];
|
||||
|
||||
/**
|
||||
* Determine provider hint from custom provider flags
|
||||
* @param {Object} args - Arguments containing provider flags
|
||||
* @returns {string|undefined} Provider hint or undefined if no custom provider flag is set
|
||||
*/
|
||||
function getProviderHint(args) {
|
||||
return CUSTOM_PROVIDERS_ARRAY.find((provider) => args[provider]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle setting models for different roles
|
||||
* @param {Object} args - Arguments containing role-specific model IDs
|
||||
* @param {Object} context - Context object with session, mcpLog, projectRoot
|
||||
* @returns {Object|null} Result if a model was set, null if no model setting was requested
|
||||
*/
|
||||
async function handleModelSetting(args, context) {
|
||||
for (const role of MODEL_ROLES) {
|
||||
const roleKey = `set${role.charAt(0).toUpperCase() + role.slice(1)}`; // setMain, setResearch, setFallback
|
||||
|
||||
if (args[roleKey]) {
|
||||
const providerHint = getProviderHint(args);
|
||||
|
||||
return await setModel(role, args[roleKey], {
|
||||
...context,
|
||||
providerHint
|
||||
});
|
||||
}
|
||||
}
|
||||
return null; // No model setting was requested
|
||||
}
|
||||
|
||||
/**
|
||||
* Get or update model configuration
|
||||
@@ -31,16 +66,21 @@ export async function modelsDirect(args, log, context = {}) {
|
||||
log.info(`Executing models_direct with args: ${JSON.stringify(args)}`);
|
||||
log.info(`Using project root: ${projectRoot}`);
|
||||
|
||||
// Validate flags: cannot use both openrouter and ollama simultaneously
|
||||
if (args.openrouter && args.ollama) {
|
||||
// Validate flags: only one custom provider flag can be used simultaneously
|
||||
const customProviderFlags = CUSTOM_PROVIDERS_ARRAY.filter(
|
||||
(provider) => args[provider]
|
||||
);
|
||||
|
||||
if (customProviderFlags.length > 1) {
|
||||
log.error(
|
||||
'Error: Cannot use both openrouter and ollama flags simultaneously.'
|
||||
'Error: Cannot use multiple custom provider flags simultaneously.'
|
||||
);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'INVALID_ARGS',
|
||||
message: 'Cannot use both openrouter and ollama flags simultaneously.'
|
||||
message:
|
||||
'Cannot use multiple custom provider flags simultaneously. Choose only one: openrouter, ollama, bedrock, azure, or vertex.'
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -54,55 +94,22 @@ export async function modelsDirect(args, log, context = {}) {
|
||||
return await getAvailableModelsList({
|
||||
session,
|
||||
mcpLog,
|
||||
projectRoot // Pass projectRoot to function
|
||||
projectRoot
|
||||
});
|
||||
}
|
||||
|
||||
// Handle setting a specific model
|
||||
if (args.setMain) {
|
||||
return await setModel('main', args.setMain, {
|
||||
session,
|
||||
mcpLog,
|
||||
projectRoot, // Pass projectRoot to function
|
||||
providerHint: args.openrouter
|
||||
? 'openrouter'
|
||||
: args.ollama
|
||||
? 'ollama'
|
||||
: undefined // Pass hint
|
||||
});
|
||||
}
|
||||
|
||||
if (args.setResearch) {
|
||||
return await setModel('research', args.setResearch, {
|
||||
session,
|
||||
mcpLog,
|
||||
projectRoot, // Pass projectRoot to function
|
||||
providerHint: args.openrouter
|
||||
? 'openrouter'
|
||||
: args.ollama
|
||||
? 'ollama'
|
||||
: undefined // Pass hint
|
||||
});
|
||||
}
|
||||
|
||||
if (args.setFallback) {
|
||||
return await setModel('fallback', args.setFallback, {
|
||||
session,
|
||||
mcpLog,
|
||||
projectRoot, // Pass projectRoot to function
|
||||
providerHint: args.openrouter
|
||||
? 'openrouter'
|
||||
: args.ollama
|
||||
? 'ollama'
|
||||
: undefined // Pass hint
|
||||
});
|
||||
// Handle setting any model role using unified function
|
||||
const modelContext = { session, mcpLog, projectRoot };
|
||||
const modelSetResult = await handleModelSetting(args, modelContext);
|
||||
if (modelSetResult) {
|
||||
return modelSetResult;
|
||||
}
|
||||
|
||||
// Default action: get current configuration
|
||||
return await getModelConfiguration({
|
||||
session,
|
||||
mcpLog,
|
||||
projectRoot // Pass projectRoot to function
|
||||
projectRoot
|
||||
});
|
||||
} finally {
|
||||
disableSilentMode();
|
||||
|
||||
@@ -45,7 +45,8 @@ export function registerExpandTaskTool(server) {
|
||||
.boolean()
|
||||
.optional()
|
||||
.default(false)
|
||||
.describe('Force expansion even if subtasks exist')
|
||||
.describe('Force expansion even if subtasks exist'),
|
||||
tag: z.string().optional().describe('Tag context to operate on')
|
||||
}),
|
||||
execute: withNormalizedProjectRoot(async (args, { log, session }) => {
|
||||
try {
|
||||
@@ -73,7 +74,8 @@ export function registerExpandTaskTool(server) {
|
||||
research: args.research,
|
||||
prompt: args.prompt,
|
||||
force: args.force,
|
||||
projectRoot: args.projectRoot
|
||||
projectRoot: args.projectRoot,
|
||||
tag: args.tag || 'master'
|
||||
},
|
||||
log,
|
||||
{ session }
|
||||
|
||||
@@ -55,7 +55,21 @@ export function registerModelsTool(server) {
|
||||
ollama: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe('Indicates the set model ID is a custom Ollama model.')
|
||||
.describe('Indicates the set model ID is a custom Ollama model.'),
|
||||
bedrock: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe('Indicates the set model ID is a custom AWS Bedrock model.'),
|
||||
azure: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe('Indicates the set model ID is a custom Azure OpenAI model.'),
|
||||
vertex: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe(
|
||||
'Indicates the set model ID is a custom Google Vertex AI model.'
|
||||
)
|
||||
}),
|
||||
execute: withNormalizedProjectRoot(async (args, { log, session }) => {
|
||||
try {
|
||||
|
||||
288
package-lock.json
generated
288
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "task-master-ai",
|
||||
"version": "0.17.0",
|
||||
"version": "0.17.1",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "task-master-ai",
|
||||
"version": "0.17.0",
|
||||
"version": "0.17.1",
|
||||
"license": "MIT WITH Commons-Clause",
|
||||
"dependencies": {
|
||||
"@ai-sdk/amazon-bedrock": "^2.2.9",
|
||||
@@ -68,6 +68,9 @@
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18.0.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@anthropic-ai/claude-code": "^1.0.25"
|
||||
}
|
||||
},
|
||||
"node_modules/@ai-sdk/amazon-bedrock": {
|
||||
@@ -446,6 +449,28 @@
|
||||
"node": ">=6.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@anthropic-ai/claude-code": {
|
||||
"version": "1.0.25",
|
||||
"resolved": "https://registry.npmjs.org/@anthropic-ai/claude-code/-/claude-code-1.0.25.tgz",
|
||||
"integrity": "sha512-5p4FLlFO4TuRf0zV0axiOxiAkUC8eer0lqJi/A/pA46LESv31Alw6xaNYgwQVkP6oSbP5PydK36u7YrB9QSaXQ==",
|
||||
"hasInstallScript": true,
|
||||
"license": "SEE LICENSE IN README.md",
|
||||
"optional": true,
|
||||
"bin": {
|
||||
"claude": "cli.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18.0.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@img/sharp-darwin-arm64": "^0.33.5",
|
||||
"@img/sharp-darwin-x64": "^0.33.5",
|
||||
"@img/sharp-linux-arm": "^0.33.5",
|
||||
"@img/sharp-linux-arm64": "^0.33.5",
|
||||
"@img/sharp-linux-x64": "^0.33.5",
|
||||
"@img/sharp-win32-x64": "^0.33.5"
|
||||
}
|
||||
},
|
||||
"node_modules/@anthropic-ai/sdk": {
|
||||
"version": "0.39.0",
|
||||
"resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.39.0.tgz",
|
||||
@@ -2651,6 +2676,215 @@
|
||||
"node": ">=18"
|
||||
}
|
||||
},
|
||||
"node_modules/@img/sharp-darwin-arm64": {
|
||||
"version": "0.33.5",
|
||||
"resolved": "https://registry.npmjs.org/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.33.5.tgz",
|
||||
"integrity": "sha512-UT4p+iz/2H4twwAoLCqfA9UH5pI6DggwKEGuaPy7nCVQ8ZsiY5PIcrRvD1DzuY3qYL07NtIQcWnBSY/heikIFQ==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"license": "Apache-2.0",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"darwin"
|
||||
],
|
||||
"engines": {
|
||||
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://opencollective.com/libvips"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@img/sharp-libvips-darwin-arm64": "1.0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/@img/sharp-darwin-x64": {
|
||||
"version": "0.33.5",
|
||||
"resolved": "https://registry.npmjs.org/@img/sharp-darwin-x64/-/sharp-darwin-x64-0.33.5.tgz",
|
||||
"integrity": "sha512-fyHac4jIc1ANYGRDxtiqelIbdWkIuQaI84Mv45KvGRRxSAa7o7d1ZKAOBaYbnepLC1WqxfpimdeWfvqqSGwR2Q==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"license": "Apache-2.0",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"darwin"
|
||||
],
|
||||
"engines": {
|
||||
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://opencollective.com/libvips"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@img/sharp-libvips-darwin-x64": "1.0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/@img/sharp-libvips-darwin-arm64": {
|
||||
"version": "1.0.4",
|
||||
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-arm64/-/sharp-libvips-darwin-arm64-1.0.4.tgz",
|
||||
"integrity": "sha512-XblONe153h0O2zuFfTAbQYAX2JhYmDHeWikp1LM9Hul9gVPjFY427k6dFEcOL72O01QxQsWi761svJ/ev9xEDg==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"license": "LGPL-3.0-or-later",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"darwin"
|
||||
],
|
||||
"funding": {
|
||||
"url": "https://opencollective.com/libvips"
|
||||
}
|
||||
},
|
||||
"node_modules/@img/sharp-libvips-darwin-x64": {
|
||||
"version": "1.0.4",
|
||||
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-x64/-/sharp-libvips-darwin-x64-1.0.4.tgz",
|
||||
"integrity": "sha512-xnGR8YuZYfJGmWPvmlunFaWJsb9T/AO2ykoP3Fz/0X5XV2aoYBPkX6xqCQvUTKKiLddarLaxpzNe+b1hjeWHAQ==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"license": "LGPL-3.0-or-later",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"darwin"
|
||||
],
|
||||
"funding": {
|
||||
"url": "https://opencollective.com/libvips"
|
||||
}
|
||||
},
|
||||
"node_modules/@img/sharp-libvips-linux-arm": {
|
||||
"version": "1.0.5",
|
||||
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm/-/sharp-libvips-linux-arm-1.0.5.tgz",
|
||||
"integrity": "sha512-gvcC4ACAOPRNATg/ov8/MnbxFDJqf/pDePbBnuBDcjsI8PssmjoKMAz4LtLaVi+OnSb5FK/yIOamqDwGmXW32g==",
|
||||
"cpu": [
|
||||
"arm"
|
||||
],
|
||||
"license": "LGPL-3.0-or-later",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"funding": {
|
||||
"url": "https://opencollective.com/libvips"
|
||||
}
|
||||
},
|
||||
"node_modules/@img/sharp-libvips-linux-arm64": {
|
||||
"version": "1.0.4",
|
||||
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm64/-/sharp-libvips-linux-arm64-1.0.4.tgz",
|
||||
"integrity": "sha512-9B+taZ8DlyyqzZQnoeIvDVR/2F4EbMepXMc/NdVbkzsJbzkUjhXv/70GQJ7tdLA4YJgNP25zukcxpX2/SueNrA==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"license": "LGPL-3.0-or-later",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"funding": {
|
||||
"url": "https://opencollective.com/libvips"
|
||||
}
|
||||
},
|
||||
"node_modules/@img/sharp-libvips-linux-x64": {
|
||||
"version": "1.0.4",
|
||||
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-x64/-/sharp-libvips-linux-x64-1.0.4.tgz",
|
||||
"integrity": "sha512-MmWmQ3iPFZr0Iev+BAgVMb3ZyC4KeFc3jFxnNbEPas60e1cIfevbtuyf9nDGIzOaW9PdnDciJm+wFFaTlj5xYw==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"license": "LGPL-3.0-or-later",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"funding": {
|
||||
"url": "https://opencollective.com/libvips"
|
||||
}
|
||||
},
|
||||
"node_modules/@img/sharp-linux-arm": {
|
||||
"version": "0.33.5",
|
||||
"resolved": "https://registry.npmjs.org/@img/sharp-linux-arm/-/sharp-linux-arm-0.33.5.tgz",
|
||||
"integrity": "sha512-JTS1eldqZbJxjvKaAkxhZmBqPRGmxgu+qFKSInv8moZ2AmT5Yib3EQ1c6gp493HvrvV8QgdOXdyaIBrhvFhBMQ==",
|
||||
"cpu": [
|
||||
"arm"
|
||||
],
|
||||
"license": "Apache-2.0",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://opencollective.com/libvips"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@img/sharp-libvips-linux-arm": "1.0.5"
|
||||
}
|
||||
},
|
||||
"node_modules/@img/sharp-linux-arm64": {
|
||||
"version": "0.33.5",
|
||||
"resolved": "https://registry.npmjs.org/@img/sharp-linux-arm64/-/sharp-linux-arm64-0.33.5.tgz",
|
||||
"integrity": "sha512-JMVv+AMRyGOHtO1RFBiJy/MBsgz0x4AWrT6QoEVVTyh1E39TrCUpTRI7mx9VksGX4awWASxqCYLCV4wBZHAYxA==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"license": "Apache-2.0",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://opencollective.com/libvips"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@img/sharp-libvips-linux-arm64": "1.0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/@img/sharp-linux-x64": {
|
||||
"version": "0.33.5",
|
||||
"resolved": "https://registry.npmjs.org/@img/sharp-linux-x64/-/sharp-linux-x64-0.33.5.tgz",
|
||||
"integrity": "sha512-opC+Ok5pRNAzuvq1AG0ar+1owsu842/Ab+4qvU879ippJBHvyY5n2mxF1izXqkPYlGuP/M556uh53jRLJmzTWA==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"license": "Apache-2.0",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://opencollective.com/libvips"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@img/sharp-libvips-linux-x64": "1.0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/@img/sharp-win32-x64": {
|
||||
"version": "0.33.5",
|
||||
"resolved": "https://registry.npmjs.org/@img/sharp-win32-x64/-/sharp-win32-x64-0.33.5.tgz",
|
||||
"integrity": "sha512-MpY/o8/8kj+EcnxwvrP4aTJSWw/aZ7JIGR4aBeZkZw5B7/Jn+tY9/VNwtcoGmdT7GfggGIU4kygOMSbYnOrAbg==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"license": "Apache-2.0 AND LGPL-3.0-or-later",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"win32"
|
||||
],
|
||||
"engines": {
|
||||
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://opencollective.com/libvips"
|
||||
}
|
||||
},
|
||||
"node_modules/@inquirer/checkbox": {
|
||||
"version": "4.1.4",
|
||||
"resolved": "https://registry.npmjs.org/@inquirer/checkbox/-/checkbox-4.1.4.tgz",
|
||||
@@ -3868,6 +4102,19 @@
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/@noble/hashes": {
|
||||
"version": "1.8.0",
|
||||
"resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.8.0.tgz",
|
||||
"integrity": "sha512-jCs9ldd7NwzpgXDIf6P3+NrHh9/sD6CQdxHyjQI+h/6rDNo88ypBxxz45UDuZHz9r3tNz7N/VInSVoVdtXEI4A==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": "^14.21.3 || >=16"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://paulmillr.com/funding/"
|
||||
}
|
||||
},
|
||||
"node_modules/@nodelib/fs.scandir": {
|
||||
"version": "2.1.5",
|
||||
"resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
|
||||
@@ -3966,6 +4213,16 @@
|
||||
"node": ">=8.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@paralleldrive/cuid2": {
|
||||
"version": "2.2.2",
|
||||
"resolved": "https://registry.npmjs.org/@paralleldrive/cuid2/-/cuid2-2.2.2.tgz",
|
||||
"integrity": "sha512-ZOBkgDwEdoYVlSeRbYYXs0S9MejQofiVYoTbKzy/6GQa39/q5tQU2IX46+shYnUkpEl3wc+J6wRlar7r2EK2xA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@noble/hashes": "^1.1.5"
|
||||
}
|
||||
},
|
||||
"node_modules/@sec-ant/readable-stream": {
|
||||
"version": "0.4.1",
|
||||
"resolved": "https://registry.npmjs.org/@sec-ant/readable-stream/-/readable-stream-0.4.1.tgz",
|
||||
@@ -5328,9 +5585,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/brace-expansion": {
|
||||
"version": "1.1.11",
|
||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
|
||||
"integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
|
||||
"version": "1.1.12",
|
||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz",
|
||||
"integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
@@ -7159,16 +7416,19 @@
|
||||
}
|
||||
},
|
||||
"node_modules/formidable": {
|
||||
"version": "3.5.2",
|
||||
"resolved": "https://registry.npmjs.org/formidable/-/formidable-3.5.2.tgz",
|
||||
"integrity": "sha512-Jqc1btCy3QzRbJaICGwKcBfGWuLADRerLzDqi2NwSt/UkXLsHJw2TVResiaoBufHVHy9aSgClOHCeJsSsFLTbg==",
|
||||
"version": "3.5.4",
|
||||
"resolved": "https://registry.npmjs.org/formidable/-/formidable-3.5.4.tgz",
|
||||
"integrity": "sha512-YikH+7CUTOtP44ZTnUhR7Ic2UASBPOqmaRkRKxRbywPTe5VxF7RRCck4af9wutiZ/QKM5nME9Bie2fFaPz5Gug==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@paralleldrive/cuid2": "^2.2.2",
|
||||
"dezalgo": "^1.0.4",
|
||||
"hexoid": "^2.0.0",
|
||||
"once": "^1.4.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=14.0.0"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://ko-fi.com/tunnckoCore/commissions"
|
||||
}
|
||||
@@ -7672,16 +7932,6 @@
|
||||
"node": ">=18.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/hexoid": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/hexoid/-/hexoid-2.0.0.tgz",
|
||||
"integrity": "sha512-qlspKUK7IlSQv2o+5I7yhUd7TxlOG2Vr5LTa3ve2XSNVKAL/n/u/7KLvKmFNimomDIKvZFXWHv0T12mv7rT8Aw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/highlight.js": {
|
||||
"version": "10.7.3",
|
||||
"resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-10.7.3.tgz",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "task-master-ai",
|
||||
"version": "0.17.0",
|
||||
"version": "0.17.1",
|
||||
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
|
||||
"main": "index.js",
|
||||
"type": "module",
|
||||
@@ -76,6 +76,9 @@
|
||||
"uuid": "^11.1.0",
|
||||
"zod": "^3.23.8"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@anthropic-ai/claude-code": "^1.0.25"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18.0.0"
|
||||
},
|
||||
|
||||
@@ -44,7 +44,8 @@ import {
|
||||
OllamaAIProvider,
|
||||
BedrockAIProvider,
|
||||
AzureProvider,
|
||||
VertexAIProvider
|
||||
VertexAIProvider,
|
||||
ClaudeCodeProvider
|
||||
} from '../../src/ai-providers/index.js';
|
||||
|
||||
// Create provider instances
|
||||
@@ -58,7 +59,8 @@ const PROVIDERS = {
|
||||
ollama: new OllamaAIProvider(),
|
||||
bedrock: new BedrockAIProvider(),
|
||||
azure: new AzureProvider(),
|
||||
vertex: new VertexAIProvider()
|
||||
vertex: new VertexAIProvider(),
|
||||
'claude-code': new ClaudeCodeProvider()
|
||||
};
|
||||
|
||||
// Helper function to get cost for a specific model
|
||||
@@ -225,6 +227,11 @@ function _extractErrorMessage(error) {
|
||||
* @throws {Error} If a required API key is missing.
|
||||
*/
|
||||
function _resolveApiKey(providerName, session, projectRoot = null) {
|
||||
// Claude Code doesn't require an API key
|
||||
if (providerName === 'claude-code') {
|
||||
return 'claude-code-no-key-required';
|
||||
}
|
||||
|
||||
const keyMap = {
|
||||
openai: 'OPENAI_API_KEY',
|
||||
anthropic: 'ANTHROPIC_API_KEY',
|
||||
@@ -236,7 +243,8 @@ function _resolveApiKey(providerName, session, projectRoot = null) {
|
||||
xai: 'XAI_API_KEY',
|
||||
ollama: 'OLLAMA_API_KEY',
|
||||
bedrock: 'AWS_ACCESS_KEY_ID',
|
||||
vertex: 'GOOGLE_API_KEY'
|
||||
vertex: 'GOOGLE_API_KEY',
|
||||
'claude-code': 'CLAUDE_CODE_API_KEY' // Not actually used, but included for consistency
|
||||
};
|
||||
|
||||
const envVarName = keyMap[providerName];
|
||||
|
||||
@@ -294,30 +294,14 @@ async function runInteractiveSetup(projectRoot) {
|
||||
}
|
||||
: null;
|
||||
|
||||
const customOpenRouterOption = {
|
||||
name: '* Custom OpenRouter model', // Symbol updated
|
||||
value: '__CUSTOM_OPENROUTER__'
|
||||
};
|
||||
|
||||
const customOllamaOption = {
|
||||
name: '* Custom Ollama model', // Symbol updated
|
||||
value: '__CUSTOM_OLLAMA__'
|
||||
};
|
||||
|
||||
const customBedrockOption = {
|
||||
name: '* Custom Bedrock model', // Add Bedrock custom option
|
||||
value: '__CUSTOM_BEDROCK__'
|
||||
};
|
||||
|
||||
const customAzureOption = {
|
||||
name: '* Custom Azure OpenAI model', // Add Azure custom option
|
||||
value: '__CUSTOM_AZURE__'
|
||||
};
|
||||
|
||||
const customVertexOption = {
|
||||
name: '* Custom Vertex AI model', // Add Vertex custom option
|
||||
value: '__CUSTOM_VERTEX__'
|
||||
};
|
||||
// Define custom provider options
|
||||
const customProviderOptions = [
|
||||
{ name: '* Custom OpenRouter model', value: '__CUSTOM_OPENROUTER__' },
|
||||
{ name: '* Custom Ollama model', value: '__CUSTOM_OLLAMA__' },
|
||||
{ name: '* Custom Bedrock model', value: '__CUSTOM_BEDROCK__' },
|
||||
{ name: '* Custom Azure model', value: '__CUSTOM_AZURE__' },
|
||||
{ name: '* Custom Vertex model', value: '__CUSTOM_VERTEX__' }
|
||||
];
|
||||
|
||||
let choices = [];
|
||||
let defaultIndex = 0; // Default to 'Cancel'
|
||||
@@ -364,24 +348,16 @@ async function runInteractiveSetup(projectRoot) {
|
||||
}
|
||||
systemOptions.push(cancelOption);
|
||||
|
||||
const customOptions = [
|
||||
customOpenRouterOption,
|
||||
customOllamaOption,
|
||||
customBedrockOption,
|
||||
customAzureOption,
|
||||
customVertexOption
|
||||
];
|
||||
|
||||
const systemLength = systemOptions.length;
|
||||
|
||||
if (allowNone) {
|
||||
choices = [
|
||||
...systemOptions,
|
||||
new inquirer.Separator('── Standard Models ──'),
|
||||
new inquirer.Separator('\n── Standard Models ──'),
|
||||
{ name: '⚪ None (disable)', value: null },
|
||||
...roleChoices,
|
||||
new inquirer.Separator('── Custom Providers ──'),
|
||||
...customOptions
|
||||
new inquirer.Separator('\n── Custom Providers ──'),
|
||||
...customProviderOptions
|
||||
];
|
||||
// Adjust default index: System + Sep1 + None (+2)
|
||||
const noneOptionIndex = systemLength + 1;
|
||||
@@ -392,10 +368,10 @@ async function runInteractiveSetup(projectRoot) {
|
||||
} else {
|
||||
choices = [
|
||||
...systemOptions,
|
||||
new inquirer.Separator('── Standard Models ──'),
|
||||
new inquirer.Separator('\n── Standard Models ──'),
|
||||
...roleChoices,
|
||||
new inquirer.Separator('── Custom Providers ──'),
|
||||
...customOptions
|
||||
new inquirer.Separator('\n── Custom Providers ──'),
|
||||
...customProviderOptions
|
||||
];
|
||||
// Adjust default index: System + Sep (+1)
|
||||
defaultIndex =
|
||||
@@ -596,9 +572,9 @@ async function runInteractiveSetup(projectRoot) {
|
||||
!process.env.AWS_ACCESS_KEY_ID ||
|
||||
!process.env.AWS_SECRET_ACCESS_KEY
|
||||
) {
|
||||
console.error(
|
||||
chalk.red(
|
||||
'Error: AWS_ACCESS_KEY_ID and/or AWS_SECRET_ACCESS_KEY environment variables are missing. Please set them before using custom Bedrock models.'
|
||||
console.warn(
|
||||
chalk.yellow(
|
||||
'Warning: AWS_ACCESS_KEY_ID and/or AWS_SECRET_ACCESS_KEY environment variables are missing. Will fallback to system configuration. (ex: aws config files or ec2 instance profiles)'
|
||||
)
|
||||
);
|
||||
setupSuccess = false;
|
||||
@@ -3428,6 +3404,10 @@ ${result.result}
|
||||
'--bedrock',
|
||||
'Allow setting a custom Bedrock model ID (use with --set-*) '
|
||||
)
|
||||
.option(
|
||||
'--claude-code',
|
||||
'Allow setting a Claude Code model ID (use with --set-*)'
|
||||
)
|
||||
.option(
|
||||
'--azure',
|
||||
'Allow setting a custom Azure OpenAI model ID (use with --set-*) '
|
||||
@@ -3447,6 +3427,7 @@ Examples:
|
||||
$ task-master models --set-main my-custom-model --ollama # Set custom Ollama model for main role
|
||||
$ task-master models --set-main anthropic.claude-3-sonnet-20240229-v1:0 --bedrock # Set custom Bedrock model for main role
|
||||
$ task-master models --set-main some/other-model --openrouter # Set custom OpenRouter model for main role
|
||||
$ task-master models --set-main sonnet --claude-code # Set Claude Code model for main role
|
||||
$ task-master models --set-main gpt-4o --azure # Set custom Azure OpenAI model for main role
|
||||
$ task-master models --set-main claude-3-5-sonnet@20241022 --vertex # Set custom Vertex AI model for main role
|
||||
$ task-master models --setup # Run interactive setup`
|
||||
@@ -3461,12 +3442,13 @@ Examples:
|
||||
const providerFlags = [
|
||||
options.openrouter,
|
||||
options.ollama,
|
||||
options.bedrock
|
||||
options.bedrock,
|
||||
options.claudeCode
|
||||
].filter(Boolean).length;
|
||||
if (providerFlags > 1) {
|
||||
console.error(
|
||||
chalk.red(
|
||||
'Error: Cannot use multiple provider flags (--openrouter, --ollama, --bedrock) simultaneously.'
|
||||
'Error: Cannot use multiple provider flags (--openrouter, --ollama, --bedrock, --claude-code) simultaneously.'
|
||||
)
|
||||
);
|
||||
process.exit(1);
|
||||
@@ -3508,7 +3490,9 @@ Examples:
|
||||
? 'ollama'
|
||||
: options.bedrock
|
||||
? 'bedrock'
|
||||
: undefined
|
||||
: options.claudeCode
|
||||
? 'claude-code'
|
||||
: undefined
|
||||
});
|
||||
if (result.success) {
|
||||
console.log(chalk.green(`✅ ${result.data.message}`));
|
||||
@@ -3530,7 +3514,9 @@ Examples:
|
||||
? 'ollama'
|
||||
: options.bedrock
|
||||
? 'bedrock'
|
||||
: undefined
|
||||
: options.claudeCode
|
||||
? 'claude-code'
|
||||
: undefined
|
||||
});
|
||||
if (result.success) {
|
||||
console.log(chalk.green(`✅ ${result.data.message}`));
|
||||
@@ -3554,7 +3540,9 @@ Examples:
|
||||
? 'ollama'
|
||||
: options.bedrock
|
||||
? 'bedrock'
|
||||
: undefined
|
||||
: options.claudeCode
|
||||
? 'claude-code'
|
||||
: undefined
|
||||
});
|
||||
if (result.success) {
|
||||
console.log(chalk.green(`✅ ${result.data.message}`));
|
||||
|
||||
@@ -496,10 +496,22 @@ function getParametersForRole(role, explicitRoot = null) {
|
||||
*/
|
||||
function isApiKeySet(providerName, session = null, projectRoot = null) {
|
||||
// Define the expected environment variable name for each provider
|
||||
if (providerName?.toLowerCase() === 'ollama') {
|
||||
|
||||
// Providers that don't require API keys for authentication
|
||||
const providersWithoutApiKeys = [
|
||||
CUSTOM_PROVIDERS.OLLAMA,
|
||||
CUSTOM_PROVIDERS.BEDROCK
|
||||
];
|
||||
|
||||
if (providersWithoutApiKeys.includes(providerName?.toLowerCase())) {
|
||||
return true; // Indicate key status is effectively "OK"
|
||||
}
|
||||
|
||||
// Claude Code doesn't require an API key
|
||||
if (providerName?.toLowerCase() === 'claude-code') {
|
||||
return true; // No API key needed
|
||||
}
|
||||
|
||||
const keyMap = {
|
||||
openai: 'OPENAI_API_KEY',
|
||||
anthropic: 'ANTHROPIC_API_KEY',
|
||||
@@ -509,7 +521,9 @@ function isApiKeySet(providerName, session = null, projectRoot = null) {
|
||||
azure: 'AZURE_OPENAI_API_KEY',
|
||||
openrouter: 'OPENROUTER_API_KEY',
|
||||
xai: 'XAI_API_KEY',
|
||||
vertex: 'GOOGLE_API_KEY' // Vertex uses the same key as Google
|
||||
vertex: 'GOOGLE_API_KEY', // Vertex uses the same key as Google
|
||||
'claude-code': 'CLAUDE_CODE_API_KEY', // Not actually used, but included for consistency
|
||||
bedrock: 'AWS_ACCESS_KEY_ID' // Bedrock uses AWS credentials
|
||||
// Add other providers as needed
|
||||
};
|
||||
|
||||
@@ -557,10 +571,11 @@ function getMcpApiKeyStatus(providerName, projectRoot = null) {
|
||||
const mcpConfigRaw = fs.readFileSync(mcpConfigPath, 'utf-8');
|
||||
const mcpConfig = JSON.parse(mcpConfigRaw);
|
||||
|
||||
const mcpEnv = mcpConfig?.mcpServers?.['taskmaster-ai']?.env;
|
||||
const mcpEnv =
|
||||
mcpConfig?.mcpServers?.['task-master-ai']?.env ||
|
||||
mcpConfig?.mcpServers?.['taskmaster-ai']?.env;
|
||||
if (!mcpEnv) {
|
||||
// console.warn(chalk.yellow('Warning: Could not find taskmaster-ai env in mcp.json.'));
|
||||
return false; // Structure missing
|
||||
return false;
|
||||
}
|
||||
|
||||
let apiKeyToCheck = null;
|
||||
@@ -593,6 +608,8 @@ function getMcpApiKeyStatus(providerName, projectRoot = null) {
|
||||
break;
|
||||
case 'ollama':
|
||||
return true; // No key needed
|
||||
case 'claude-code':
|
||||
return true; // No key needed
|
||||
case 'mistral':
|
||||
apiKeyToCheck = mcpEnv.MISTRAL_API_KEY;
|
||||
placeholderValue = 'YOUR_MISTRAL_API_KEY_HERE';
|
||||
@@ -605,6 +622,10 @@ function getMcpApiKeyStatus(providerName, projectRoot = null) {
|
||||
apiKeyToCheck = mcpEnv.GOOGLE_API_KEY; // Vertex uses Google API key
|
||||
placeholderValue = 'YOUR_GOOGLE_API_KEY_HERE';
|
||||
break;
|
||||
case 'bedrock':
|
||||
apiKeyToCheck = mcpEnv.AWS_ACCESS_KEY_ID; // Bedrock uses AWS credentials
|
||||
placeholderValue = 'YOUR_AWS_ACCESS_KEY_ID_HERE';
|
||||
break;
|
||||
default:
|
||||
return false; // Unknown provider
|
||||
}
|
||||
@@ -652,7 +673,8 @@ function getAvailableModels() {
|
||||
provider: provider,
|
||||
swe_score: sweScore,
|
||||
cost_per_1m_tokens: cost,
|
||||
allowed_roles: allowedRoles
|
||||
allowed_roles: allowedRoles,
|
||||
max_tokens: modelObj.max_tokens
|
||||
});
|
||||
});
|
||||
} else {
|
||||
@@ -761,9 +783,15 @@ function getAllProviders() {
|
||||
|
||||
function getBaseUrlForRole(role, explicitRoot = null) {
|
||||
const roleConfig = getModelConfigForRole(role, explicitRoot);
|
||||
return roleConfig && typeof roleConfig.baseURL === 'string'
|
||||
? roleConfig.baseURL
|
||||
: undefined;
|
||||
if (roleConfig && typeof roleConfig.baseURL === 'string') {
|
||||
return roleConfig.baseURL;
|
||||
}
|
||||
const provider = roleConfig?.provider;
|
||||
if (provider) {
|
||||
const envVarName = `${provider.toUpperCase()}_BASE_URL`;
|
||||
return resolveEnvVariable(envVarName, null, explicitRoot);
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
export {
|
||||
|
||||
@@ -1,4 +1,20 @@
|
||||
{
|
||||
"bedrock": [
|
||||
{
|
||||
"id": "us.anthropic.claude-3-7-sonnet-20250219-v1:0",
|
||||
"swe_score": 0.623,
|
||||
"cost_per_1m_tokens": { "input": 3, "output": 15 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 65536
|
||||
},
|
||||
{
|
||||
"id": "us.deepseek.r1-v1:0",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 1.35, "output": 5.4 },
|
||||
"allowed_roles": ["research"],
|
||||
"max_tokens": 65536
|
||||
}
|
||||
],
|
||||
"anthropic": [
|
||||
{
|
||||
"id": "claude-sonnet-4-20250514",
|
||||
@@ -594,5 +610,21 @@
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 32768
|
||||
}
|
||||
],
|
||||
"claude-code": [
|
||||
{
|
||||
"id": "opus",
|
||||
"swe_score": 0.725,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 32000
|
||||
},
|
||||
{
|
||||
"id": "sonnet",
|
||||
"swe_score": 0.727,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 64000
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -32,7 +32,12 @@ async function expandAllTasks(
|
||||
context = {},
|
||||
outputFormat = 'text' // Assume text default for CLI
|
||||
) {
|
||||
const { session, mcpLog, projectRoot: providedProjectRoot } = context;
|
||||
const {
|
||||
session,
|
||||
mcpLog,
|
||||
projectRoot: providedProjectRoot,
|
||||
tag: contextTag
|
||||
} = context;
|
||||
const isMCPCall = !!mcpLog; // Determine if called from MCP
|
||||
|
||||
const projectRoot = providedProjectRoot || findProjectRoot();
|
||||
@@ -74,7 +79,7 @@ async function expandAllTasks(
|
||||
|
||||
try {
|
||||
logger.info(`Reading tasks from ${tasksPath}`);
|
||||
const data = readJSON(tasksPath, projectRoot);
|
||||
const data = readJSON(tasksPath, projectRoot, contextTag);
|
||||
if (!data || !data.tasks) {
|
||||
throw new Error(`Invalid tasks data in ${tasksPath}`);
|
||||
}
|
||||
@@ -124,7 +129,7 @@ async function expandAllTasks(
|
||||
numSubtasks,
|
||||
useResearch,
|
||||
additionalContext,
|
||||
{ ...context, projectRoot }, // Pass the whole context object with projectRoot
|
||||
{ ...context, projectRoot, tag: data.tag || contextTag }, // Pass the whole context object with projectRoot and resolved tag
|
||||
force
|
||||
);
|
||||
expandedCount++;
|
||||
|
||||
@@ -417,7 +417,7 @@ async function expandTask(
|
||||
context = {},
|
||||
force = false
|
||||
) {
|
||||
const { session, mcpLog, projectRoot: contextProjectRoot } = context;
|
||||
const { session, mcpLog, projectRoot: contextProjectRoot, tag } = context;
|
||||
const outputFormat = mcpLog ? 'json' : 'text';
|
||||
|
||||
// Determine projectRoot: Use from context if available, otherwise derive from tasksPath
|
||||
@@ -439,7 +439,7 @@ async function expandTask(
|
||||
try {
|
||||
// --- Task Loading/Filtering (Unchanged) ---
|
||||
logger.info(`Reading tasks from ${tasksPath}`);
|
||||
const data = readJSON(tasksPath, projectRoot);
|
||||
const data = readJSON(tasksPath, projectRoot, tag);
|
||||
if (!data || !data.tasks)
|
||||
throw new Error(`Invalid tasks data in ${tasksPath}`);
|
||||
const taskIndex = data.tasks.findIndex(
|
||||
@@ -668,7 +668,7 @@ async function expandTask(
|
||||
// --- End Change: Append instead of replace ---
|
||||
|
||||
data.tasks[taskIndex] = task; // Assign the modified task back
|
||||
writeJSON(tasksPath, data);
|
||||
writeJSON(tasksPath, data, projectRoot, tag);
|
||||
// await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||
|
||||
// Display AI Usage Summary for CLI
|
||||
|
||||
@@ -425,7 +425,7 @@ async function setModel(role, modelId, options = {}) {
|
||||
let warningMessage = null;
|
||||
|
||||
// Find the model data in internal list initially to see if it exists at all
|
||||
const modelData = availableModels.find((m) => m.id === modelId);
|
||||
let modelData = availableModels.find((m) => m.id === modelId);
|
||||
|
||||
// --- Revised Logic: Prioritize providerHint --- //
|
||||
|
||||
@@ -495,6 +495,24 @@ async function setModel(role, modelId, options = {}) {
|
||||
determinedProvider = CUSTOM_PROVIDERS.BEDROCK;
|
||||
warningMessage = `Warning: Custom Bedrock model '${modelId}' set. Please ensure the model ID is valid and accessible in your AWS account.`;
|
||||
report('warn', warningMessage);
|
||||
} else if (providerHint === CUSTOM_PROVIDERS.CLAUDE_CODE) {
|
||||
// Claude Code provider - check if model exists in our list
|
||||
determinedProvider = CUSTOM_PROVIDERS.CLAUDE_CODE;
|
||||
// Re-find modelData specifically for claude-code provider
|
||||
const claudeCodeModels = availableModels.filter(
|
||||
(m) => m.provider === 'claude-code'
|
||||
);
|
||||
const claudeCodeModelData = claudeCodeModels.find(
|
||||
(m) => m.id === modelId
|
||||
);
|
||||
if (claudeCodeModelData) {
|
||||
// Update modelData to the found claude-code model
|
||||
modelData = claudeCodeModelData;
|
||||
report('info', `Setting Claude Code model '${modelId}'.`);
|
||||
} else {
|
||||
warningMessage = `Warning: Claude Code model '${modelId}' not found in supported models. Setting without validation.`;
|
||||
report('warn', warningMessage);
|
||||
}
|
||||
} else if (providerHint === CUSTOM_PROVIDERS.AZURE) {
|
||||
// Set provider without model validation since Azure models are managed by Azure
|
||||
determinedProvider = CUSTOM_PROVIDERS.AZURE;
|
||||
@@ -525,7 +543,7 @@ async function setModel(role, modelId, options = {}) {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'MODEL_NOT_FOUND_NO_HINT',
|
||||
message: `Model ID "${modelId}" not found in Taskmaster's supported models. If this is a custom model, please specify the provider using --openrouter or --ollama.`
|
||||
message: `Model ID "${modelId}" not found in Taskmaster's supported models. If this is a custom model, please specify the provider using --openrouter, --ollama, --bedrock, --azure, or --vertex.`
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -547,11 +565,16 @@ async function setModel(role, modelId, options = {}) {
|
||||
|
||||
// Update configuration
|
||||
currentConfig.models[role] = {
|
||||
...currentConfig.models[role], // Keep existing params like maxTokens
|
||||
...currentConfig.models[role], // Keep existing params like temperature
|
||||
provider: determinedProvider,
|
||||
modelId: modelId
|
||||
};
|
||||
|
||||
// If model data is available, update maxTokens from supported-models.json
|
||||
if (modelData && modelData.max_tokens) {
|
||||
currentConfig.models[role].maxTokens = modelData.max_tokens;
|
||||
}
|
||||
|
||||
// Write updated configuration
|
||||
const writeResult = writeConfig(currentConfig, projectRoot);
|
||||
if (!writeResult) {
|
||||
|
||||
@@ -73,7 +73,7 @@ function resolveEnvVariable(key, session = null, projectRoot = null) {
|
||||
*/
|
||||
function findProjectRoot(
|
||||
startDir = process.cwd(),
|
||||
markers = ['package.json', '.git', LEGACY_CONFIG_FILE]
|
||||
markers = ['package.json', 'pyproject.toml', '.git', LEGACY_CONFIG_FILE]
|
||||
) {
|
||||
let currentPath = path.resolve(startDir);
|
||||
const rootPath = path.parse(currentPath).root;
|
||||
|
||||
@@ -21,18 +21,10 @@ export class BedrockAIProvider extends BaseAIProvider {
|
||||
*/
|
||||
getClient(params) {
|
||||
try {
|
||||
const {
|
||||
profile = process.env.AWS_PROFILE || 'default',
|
||||
region = process.env.AWS_DEFAULT_REGION || 'us-east-1',
|
||||
baseURL
|
||||
} = params;
|
||||
|
||||
const credentialProvider = fromNodeProviderChain({ profile });
|
||||
const credentialProvider = fromNodeProviderChain();
|
||||
|
||||
return createAmazonBedrock({
|
||||
region,
|
||||
credentialProvider,
|
||||
...(baseURL && { baseURL })
|
||||
credentialProvider
|
||||
});
|
||||
} catch (error) {
|
||||
this.handleError('client initialization', error);
|
||||
|
||||
47
src/ai-providers/claude-code.js
Normal file
47
src/ai-providers/claude-code.js
Normal file
@@ -0,0 +1,47 @@
|
||||
/**
|
||||
* src/ai-providers/claude-code.js
|
||||
*
|
||||
* Implementation for interacting with Claude models via Claude Code CLI
|
||||
* using a custom AI SDK implementation.
|
||||
*/
|
||||
|
||||
import { createClaudeCode } from './custom-sdk/claude-code/index.js';
|
||||
import { BaseAIProvider } from './base-provider.js';
|
||||
|
||||
export class ClaudeCodeProvider extends BaseAIProvider {
|
||||
constructor() {
|
||||
super();
|
||||
this.name = 'Claude Code';
|
||||
}
|
||||
|
||||
/**
|
||||
* Override validateAuth to skip API key validation for Claude Code
|
||||
* @param {object} params - Parameters to validate
|
||||
*/
|
||||
validateAuth(params) {
|
||||
// Claude Code doesn't require an API key
|
||||
// No validation needed
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates and returns a Claude Code client instance.
|
||||
* @param {object} params - Parameters for client initialization
|
||||
* @param {string} [params.baseURL] - Optional custom API endpoint (not used by Claude Code)
|
||||
* @returns {Function} Claude Code client function
|
||||
* @throws {Error} If initialization fails
|
||||
*/
|
||||
getClient(params) {
|
||||
try {
|
||||
// Claude Code doesn't use API keys or base URLs
|
||||
// Just return the provider factory
|
||||
return createClaudeCode({
|
||||
defaultSettings: {
|
||||
// Add any default settings if needed
|
||||
// These can be overridden per request
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
this.handleError('client initialization', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
126
src/ai-providers/custom-sdk/claude-code/errors.js
Normal file
126
src/ai-providers/custom-sdk/claude-code/errors.js
Normal file
@@ -0,0 +1,126 @@
|
||||
/**
|
||||
* @fileoverview Error handling utilities for Claude Code provider
|
||||
*/
|
||||
|
||||
import { APICallError, LoadAPIKeyError } from '@ai-sdk/provider';
|
||||
|
||||
/**
|
||||
* @typedef {import('./types.js').ClaudeCodeErrorMetadata} ClaudeCodeErrorMetadata
|
||||
*/
|
||||
|
||||
/**
|
||||
* Create an API call error with Claude Code specific metadata
|
||||
* @param {Object} params - Error parameters
|
||||
* @param {string} params.message - Error message
|
||||
* @param {string} [params.code] - Error code
|
||||
* @param {number} [params.exitCode] - Process exit code
|
||||
* @param {string} [params.stderr] - Standard error output
|
||||
* @param {string} [params.promptExcerpt] - Excerpt of the prompt
|
||||
* @param {boolean} [params.isRetryable=false] - Whether the error is retryable
|
||||
* @returns {APICallError}
|
||||
*/
|
||||
export function createAPICallError({
|
||||
message,
|
||||
code,
|
||||
exitCode,
|
||||
stderr,
|
||||
promptExcerpt,
|
||||
isRetryable = false
|
||||
}) {
|
||||
/** @type {ClaudeCodeErrorMetadata} */
|
||||
const metadata = {
|
||||
code,
|
||||
exitCode,
|
||||
stderr,
|
||||
promptExcerpt
|
||||
};
|
||||
|
||||
return new APICallError({
|
||||
message,
|
||||
isRetryable,
|
||||
url: 'claude-code-cli://command',
|
||||
requestBodyValues: promptExcerpt ? { prompt: promptExcerpt } : undefined,
|
||||
data: metadata
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an authentication error
|
||||
* @param {Object} params - Error parameters
|
||||
* @param {string} params.message - Error message
|
||||
* @returns {LoadAPIKeyError}
|
||||
*/
|
||||
export function createAuthenticationError({ message }) {
|
||||
return new LoadAPIKeyError({
|
||||
message:
|
||||
message ||
|
||||
'Authentication failed. Please ensure Claude Code CLI is properly authenticated.'
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a timeout error
|
||||
* @param {Object} params - Error parameters
|
||||
* @param {string} params.message - Error message
|
||||
* @param {string} [params.promptExcerpt] - Excerpt of the prompt
|
||||
* @param {number} params.timeoutMs - Timeout in milliseconds
|
||||
* @returns {APICallError}
|
||||
*/
|
||||
export function createTimeoutError({ message, promptExcerpt, timeoutMs }) {
|
||||
// Store timeoutMs in metadata for potential use by error handlers
|
||||
/** @type {ClaudeCodeErrorMetadata & { timeoutMs: number }} */
|
||||
const metadata = {
|
||||
code: 'TIMEOUT',
|
||||
promptExcerpt,
|
||||
timeoutMs
|
||||
};
|
||||
|
||||
return new APICallError({
|
||||
message,
|
||||
isRetryable: true,
|
||||
url: 'claude-code-cli://command',
|
||||
requestBodyValues: promptExcerpt ? { prompt: promptExcerpt } : undefined,
|
||||
data: metadata
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if an error is an authentication error
|
||||
* @param {unknown} error - Error to check
|
||||
* @returns {boolean}
|
||||
*/
|
||||
export function isAuthenticationError(error) {
|
||||
if (error instanceof LoadAPIKeyError) return true;
|
||||
if (
|
||||
error instanceof APICallError &&
|
||||
/** @type {ClaudeCodeErrorMetadata} */ (error.data)?.exitCode === 401
|
||||
)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if an error is a timeout error
|
||||
* @param {unknown} error - Error to check
|
||||
* @returns {boolean}
|
||||
*/
|
||||
export function isTimeoutError(error) {
|
||||
if (
|
||||
error instanceof APICallError &&
|
||||
/** @type {ClaudeCodeErrorMetadata} */ (error.data)?.code === 'TIMEOUT'
|
||||
)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get error metadata from an error
|
||||
* @param {unknown} error - Error to extract metadata from
|
||||
* @returns {ClaudeCodeErrorMetadata|undefined}
|
||||
*/
|
||||
export function getErrorMetadata(error) {
|
||||
if (error instanceof APICallError && error.data) {
|
||||
return /** @type {ClaudeCodeErrorMetadata} */ (error.data);
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
83
src/ai-providers/custom-sdk/claude-code/index.js
Normal file
83
src/ai-providers/custom-sdk/claude-code/index.js
Normal file
@@ -0,0 +1,83 @@
|
||||
/**
|
||||
* @fileoverview Claude Code provider factory and exports
|
||||
*/
|
||||
|
||||
import { NoSuchModelError } from '@ai-sdk/provider';
|
||||
import { ClaudeCodeLanguageModel } from './language-model.js';
|
||||
|
||||
/**
|
||||
* @typedef {import('./types.js').ClaudeCodeSettings} ClaudeCodeSettings
|
||||
* @typedef {import('./types.js').ClaudeCodeModelId} ClaudeCodeModelId
|
||||
* @typedef {import('./types.js').ClaudeCodeProvider} ClaudeCodeProvider
|
||||
* @typedef {import('./types.js').ClaudeCodeProviderSettings} ClaudeCodeProviderSettings
|
||||
*/
|
||||
|
||||
/**
|
||||
* Create a Claude Code provider using the official SDK
|
||||
* @param {ClaudeCodeProviderSettings} [options={}] - Provider configuration options
|
||||
* @returns {ClaudeCodeProvider} Claude Code provider instance
|
||||
*/
|
||||
export function createClaudeCode(options = {}) {
|
||||
/**
|
||||
* Create a language model instance
|
||||
* @param {ClaudeCodeModelId} modelId - Model ID
|
||||
* @param {ClaudeCodeSettings} [settings={}] - Model settings
|
||||
* @returns {ClaudeCodeLanguageModel}
|
||||
*/
|
||||
const createModel = (modelId, settings = {}) => {
|
||||
return new ClaudeCodeLanguageModel({
|
||||
id: modelId,
|
||||
settings: {
|
||||
...options.defaultSettings,
|
||||
...settings
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Provider function
|
||||
* @param {ClaudeCodeModelId} modelId - Model ID
|
||||
* @param {ClaudeCodeSettings} [settings] - Model settings
|
||||
* @returns {ClaudeCodeLanguageModel}
|
||||
*/
|
||||
const provider = function (modelId, settings) {
|
||||
if (new.target) {
|
||||
throw new Error(
|
||||
'The Claude Code model function cannot be called with the new keyword.'
|
||||
);
|
||||
}
|
||||
|
||||
return createModel(modelId, settings);
|
||||
};
|
||||
|
||||
provider.languageModel = createModel;
|
||||
provider.chat = createModel; // Alias for languageModel
|
||||
|
||||
// Add textEmbeddingModel method that throws NoSuchModelError
|
||||
provider.textEmbeddingModel = (modelId) => {
|
||||
throw new NoSuchModelError({
|
||||
modelId,
|
||||
modelType: 'textEmbeddingModel'
|
||||
});
|
||||
};
|
||||
|
||||
return /** @type {ClaudeCodeProvider} */ (provider);
|
||||
}
|
||||
|
||||
/**
|
||||
* Default Claude Code provider instance
|
||||
*/
|
||||
export const claudeCode = createClaudeCode();
|
||||
|
||||
// Provider exports
|
||||
export { ClaudeCodeLanguageModel } from './language-model.js';
|
||||
|
||||
// Error handling exports
|
||||
export {
|
||||
isAuthenticationError,
|
||||
isTimeoutError,
|
||||
getErrorMetadata,
|
||||
createAPICallError,
|
||||
createAuthenticationError,
|
||||
createTimeoutError
|
||||
} from './errors.js';
|
||||
59
src/ai-providers/custom-sdk/claude-code/json-extractor.js
Normal file
59
src/ai-providers/custom-sdk/claude-code/json-extractor.js
Normal file
@@ -0,0 +1,59 @@
|
||||
/**
|
||||
* @fileoverview Extract JSON from Claude's response, handling markdown blocks and other formatting
|
||||
*/
|
||||
|
||||
/**
|
||||
* Extract JSON from Claude's response
|
||||
* @param {string} text - The text to extract JSON from
|
||||
* @returns {string} - The extracted JSON string
|
||||
*/
|
||||
export function extractJson(text) {
|
||||
// Remove markdown code blocks if present
|
||||
let jsonText = text.trim();
|
||||
|
||||
// Remove ```json blocks
|
||||
jsonText = jsonText.replace(/^```json\s*/gm, '');
|
||||
jsonText = jsonText.replace(/^```\s*/gm, '');
|
||||
jsonText = jsonText.replace(/```\s*$/gm, '');
|
||||
|
||||
// Remove common TypeScript/JavaScript patterns
|
||||
jsonText = jsonText.replace(/^const\s+\w+\s*=\s*/, ''); // Remove "const varName = "
|
||||
jsonText = jsonText.replace(/^let\s+\w+\s*=\s*/, ''); // Remove "let varName = "
|
||||
jsonText = jsonText.replace(/^var\s+\w+\s*=\s*/, ''); // Remove "var varName = "
|
||||
jsonText = jsonText.replace(/;?\s*$/, ''); // Remove trailing semicolons
|
||||
|
||||
// Try to extract JSON object or array
|
||||
const objectMatch = jsonText.match(/{[\s\S]*}/);
|
||||
const arrayMatch = jsonText.match(/\[[\s\S]*\]/);
|
||||
|
||||
if (objectMatch) {
|
||||
jsonText = objectMatch[0];
|
||||
} else if (arrayMatch) {
|
||||
jsonText = arrayMatch[0];
|
||||
}
|
||||
|
||||
// First try to parse as valid JSON
|
||||
try {
|
||||
JSON.parse(jsonText);
|
||||
return jsonText;
|
||||
} catch {
|
||||
// If it's not valid JSON, it might be a JavaScript object literal
|
||||
// Try to convert it to valid JSON
|
||||
try {
|
||||
// This is a simple conversion that handles basic cases
|
||||
// Replace unquoted keys with quoted keys
|
||||
const converted = jsonText
|
||||
.replace(/([{,]\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\s*:/g, '$1"$2":')
|
||||
// Replace single quotes with double quotes
|
||||
.replace(/'/g, '"');
|
||||
|
||||
// Validate the converted JSON
|
||||
JSON.parse(converted);
|
||||
return converted;
|
||||
} catch {
|
||||
// If all else fails, return the original text
|
||||
// The AI SDK will handle the error appropriately
|
||||
return text;
|
||||
}
|
||||
}
|
||||
}
|
||||
458
src/ai-providers/custom-sdk/claude-code/language-model.js
Normal file
458
src/ai-providers/custom-sdk/claude-code/language-model.js
Normal file
@@ -0,0 +1,458 @@
|
||||
/**
|
||||
* @fileoverview Claude Code Language Model implementation
|
||||
*/
|
||||
|
||||
import { NoSuchModelError } from '@ai-sdk/provider';
|
||||
import { generateId } from '@ai-sdk/provider-utils';
|
||||
import { convertToClaudeCodeMessages } from './message-converter.js';
|
||||
import { extractJson } from './json-extractor.js';
|
||||
import { createAPICallError, createAuthenticationError } from './errors.js';
|
||||
|
||||
let query;
|
||||
let AbortError;
|
||||
|
||||
async function loadClaudeCodeModule() {
|
||||
if (!query || !AbortError) {
|
||||
try {
|
||||
const mod = await import('@anthropic-ai/claude-code');
|
||||
query = mod.query;
|
||||
AbortError = mod.AbortError;
|
||||
} catch (err) {
|
||||
throw new Error(
|
||||
"Claude Code SDK is not installed. Please install '@anthropic-ai/claude-code' to use the claude-code provider."
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @typedef {import('./types.js').ClaudeCodeSettings} ClaudeCodeSettings
|
||||
* @typedef {import('./types.js').ClaudeCodeModelId} ClaudeCodeModelId
|
||||
* @typedef {import('./types.js').ClaudeCodeLanguageModelOptions} ClaudeCodeLanguageModelOptions
|
||||
*/
|
||||
|
||||
const modelMap = {
|
||||
opus: 'opus',
|
||||
sonnet: 'sonnet'
|
||||
};
|
||||
|
||||
export class ClaudeCodeLanguageModel {
|
||||
specificationVersion = 'v1';
|
||||
defaultObjectGenerationMode = 'json';
|
||||
supportsImageUrls = false;
|
||||
supportsStructuredOutputs = false;
|
||||
|
||||
/** @type {ClaudeCodeModelId} */
|
||||
modelId;
|
||||
|
||||
/** @type {ClaudeCodeSettings} */
|
||||
settings;
|
||||
|
||||
/** @type {string|undefined} */
|
||||
sessionId;
|
||||
|
||||
/**
|
||||
* @param {ClaudeCodeLanguageModelOptions} options
|
||||
*/
|
||||
constructor(options) {
|
||||
this.modelId = options.id;
|
||||
this.settings = options.settings ?? {};
|
||||
|
||||
// Validate model ID format
|
||||
if (
|
||||
!this.modelId ||
|
||||
typeof this.modelId !== 'string' ||
|
||||
this.modelId.trim() === ''
|
||||
) {
|
||||
throw new NoSuchModelError({
|
||||
modelId: this.modelId,
|
||||
modelType: 'languageModel'
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
get provider() {
|
||||
return 'claude-code';
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the model name for Claude Code CLI
|
||||
* @returns {string}
|
||||
*/
|
||||
getModel() {
|
||||
const mapped = modelMap[this.modelId];
|
||||
return mapped ?? this.modelId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate unsupported parameter warnings
|
||||
* @param {Object} options - Generation options
|
||||
* @returns {Array} Warnings array
|
||||
*/
|
||||
generateUnsupportedWarnings(options) {
|
||||
const warnings = [];
|
||||
const unsupportedParams = [];
|
||||
|
||||
// Check for unsupported parameters
|
||||
if (options.temperature !== undefined)
|
||||
unsupportedParams.push('temperature');
|
||||
if (options.maxTokens !== undefined) unsupportedParams.push('maxTokens');
|
||||
if (options.topP !== undefined) unsupportedParams.push('topP');
|
||||
if (options.topK !== undefined) unsupportedParams.push('topK');
|
||||
if (options.presencePenalty !== undefined)
|
||||
unsupportedParams.push('presencePenalty');
|
||||
if (options.frequencyPenalty !== undefined)
|
||||
unsupportedParams.push('frequencyPenalty');
|
||||
if (options.stopSequences !== undefined && options.stopSequences.length > 0)
|
||||
unsupportedParams.push('stopSequences');
|
||||
if (options.seed !== undefined) unsupportedParams.push('seed');
|
||||
|
||||
if (unsupportedParams.length > 0) {
|
||||
// Add a warning for each unsupported parameter
|
||||
for (const param of unsupportedParams) {
|
||||
warnings.push({
|
||||
type: 'unsupported-setting',
|
||||
setting: param,
|
||||
details: `Claude Code CLI does not support the ${param} parameter. It will be ignored.`
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return warnings;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate text using Claude Code
|
||||
* @param {Object} options - Generation options
|
||||
* @returns {Promise<Object>}
|
||||
*/
|
||||
async doGenerate(options) {
|
||||
await loadClaudeCodeModule();
|
||||
const { messagesPrompt } = convertToClaudeCodeMessages(
|
||||
options.prompt,
|
||||
options.mode
|
||||
);
|
||||
|
||||
const abortController = new AbortController();
|
||||
if (options.abortSignal) {
|
||||
options.abortSignal.addEventListener('abort', () =>
|
||||
abortController.abort()
|
||||
);
|
||||
}
|
||||
|
||||
const queryOptions = {
|
||||
model: this.getModel(),
|
||||
abortController,
|
||||
resume: this.sessionId,
|
||||
pathToClaudeCodeExecutable: this.settings.pathToClaudeCodeExecutable,
|
||||
customSystemPrompt: this.settings.customSystemPrompt,
|
||||
appendSystemPrompt: this.settings.appendSystemPrompt,
|
||||
maxTurns: this.settings.maxTurns,
|
||||
maxThinkingTokens: this.settings.maxThinkingTokens,
|
||||
cwd: this.settings.cwd,
|
||||
executable: this.settings.executable,
|
||||
executableArgs: this.settings.executableArgs,
|
||||
permissionMode: this.settings.permissionMode,
|
||||
permissionPromptToolName: this.settings.permissionPromptToolName,
|
||||
continue: this.settings.continue,
|
||||
allowedTools: this.settings.allowedTools,
|
||||
disallowedTools: this.settings.disallowedTools,
|
||||
mcpServers: this.settings.mcpServers
|
||||
};
|
||||
|
||||
let text = '';
|
||||
let usage = { promptTokens: 0, completionTokens: 0 };
|
||||
let finishReason = 'stop';
|
||||
let costUsd;
|
||||
let durationMs;
|
||||
let rawUsage;
|
||||
const warnings = this.generateUnsupportedWarnings(options);
|
||||
|
||||
try {
|
||||
const response = query({
|
||||
prompt: messagesPrompt,
|
||||
options: queryOptions
|
||||
});
|
||||
|
||||
for await (const message of response) {
|
||||
if (message.type === 'assistant') {
|
||||
text += message.message.content
|
||||
.map((c) => (c.type === 'text' ? c.text : ''))
|
||||
.join('');
|
||||
} else if (message.type === 'result') {
|
||||
this.sessionId = message.session_id;
|
||||
costUsd = message.total_cost_usd;
|
||||
durationMs = message.duration_ms;
|
||||
|
||||
if ('usage' in message) {
|
||||
rawUsage = message.usage;
|
||||
usage = {
|
||||
promptTokens:
|
||||
(message.usage.cache_creation_input_tokens ?? 0) +
|
||||
(message.usage.cache_read_input_tokens ?? 0) +
|
||||
(message.usage.input_tokens ?? 0),
|
||||
completionTokens: message.usage.output_tokens ?? 0
|
||||
};
|
||||
}
|
||||
|
||||
if (message.subtype === 'error_max_turns') {
|
||||
finishReason = 'length';
|
||||
} else if (message.subtype === 'error_during_execution') {
|
||||
finishReason = 'error';
|
||||
}
|
||||
} else if (message.type === 'system' && message.subtype === 'init') {
|
||||
this.sessionId = message.session_id;
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
if (error instanceof AbortError) {
|
||||
throw options.abortSignal?.aborted ? options.abortSignal.reason : error;
|
||||
}
|
||||
|
||||
// Check for authentication errors
|
||||
if (
|
||||
error.message?.includes('not logged in') ||
|
||||
error.message?.includes('authentication') ||
|
||||
error.exitCode === 401
|
||||
) {
|
||||
throw createAuthenticationError({
|
||||
message:
|
||||
error.message ||
|
||||
'Authentication failed. Please ensure Claude Code CLI is properly authenticated.'
|
||||
});
|
||||
}
|
||||
|
||||
// Wrap other errors with API call error
|
||||
throw createAPICallError({
|
||||
message: error.message || 'Claude Code CLI error',
|
||||
code: error.code,
|
||||
exitCode: error.exitCode,
|
||||
stderr: error.stderr,
|
||||
promptExcerpt: messagesPrompt.substring(0, 200),
|
||||
isRetryable: error.code === 'ENOENT' || error.code === 'ECONNREFUSED'
|
||||
});
|
||||
}
|
||||
|
||||
// Extract JSON if in object-json mode
|
||||
if (options.mode?.type === 'object-json' && text) {
|
||||
text = extractJson(text);
|
||||
}
|
||||
|
||||
return {
|
||||
text: text || undefined,
|
||||
usage,
|
||||
finishReason,
|
||||
rawCall: {
|
||||
rawPrompt: messagesPrompt,
|
||||
rawSettings: queryOptions
|
||||
},
|
||||
warnings: warnings.length > 0 ? warnings : undefined,
|
||||
response: {
|
||||
id: generateId(),
|
||||
timestamp: new Date(),
|
||||
modelId: this.modelId
|
||||
},
|
||||
request: {
|
||||
body: messagesPrompt
|
||||
},
|
||||
providerMetadata: {
|
||||
'claude-code': {
|
||||
...(this.sessionId !== undefined && { sessionId: this.sessionId }),
|
||||
...(costUsd !== undefined && { costUsd }),
|
||||
...(durationMs !== undefined && { durationMs }),
|
||||
...(rawUsage !== undefined && { rawUsage })
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Stream text using Claude Code
|
||||
* @param {Object} options - Stream options
|
||||
* @returns {Promise<Object>}
|
||||
*/
|
||||
async doStream(options) {
|
||||
await loadClaudeCodeModule();
|
||||
const { messagesPrompt } = convertToClaudeCodeMessages(
|
||||
options.prompt,
|
||||
options.mode
|
||||
);
|
||||
|
||||
const abortController = new AbortController();
|
||||
if (options.abortSignal) {
|
||||
options.abortSignal.addEventListener('abort', () =>
|
||||
abortController.abort()
|
||||
);
|
||||
}
|
||||
|
||||
const queryOptions = {
|
||||
model: this.getModel(),
|
||||
abortController,
|
||||
resume: this.sessionId,
|
||||
pathToClaudeCodeExecutable: this.settings.pathToClaudeCodeExecutable,
|
||||
customSystemPrompt: this.settings.customSystemPrompt,
|
||||
appendSystemPrompt: this.settings.appendSystemPrompt,
|
||||
maxTurns: this.settings.maxTurns,
|
||||
maxThinkingTokens: this.settings.maxThinkingTokens,
|
||||
cwd: this.settings.cwd,
|
||||
executable: this.settings.executable,
|
||||
executableArgs: this.settings.executableArgs,
|
||||
permissionMode: this.settings.permissionMode,
|
||||
permissionPromptToolName: this.settings.permissionPromptToolName,
|
||||
continue: this.settings.continue,
|
||||
allowedTools: this.settings.allowedTools,
|
||||
disallowedTools: this.settings.disallowedTools,
|
||||
mcpServers: this.settings.mcpServers
|
||||
};
|
||||
|
||||
const warnings = this.generateUnsupportedWarnings(options);
|
||||
|
||||
const stream = new ReadableStream({
|
||||
start: async (controller) => {
|
||||
try {
|
||||
const response = query({
|
||||
prompt: messagesPrompt,
|
||||
options: queryOptions
|
||||
});
|
||||
|
||||
let usage = { promptTokens: 0, completionTokens: 0 };
|
||||
let accumulatedText = '';
|
||||
|
||||
for await (const message of response) {
|
||||
if (message.type === 'assistant') {
|
||||
const text = message.message.content
|
||||
.map((c) => (c.type === 'text' ? c.text : ''))
|
||||
.join('');
|
||||
|
||||
if (text) {
|
||||
accumulatedText += text;
|
||||
|
||||
// In object-json mode, we need to accumulate the full text
|
||||
// and extract JSON at the end, so don't stream individual deltas
|
||||
if (options.mode?.type !== 'object-json') {
|
||||
controller.enqueue({
|
||||
type: 'text-delta',
|
||||
textDelta: text
|
||||
});
|
||||
}
|
||||
}
|
||||
} else if (message.type === 'result') {
|
||||
let rawUsage;
|
||||
if ('usage' in message) {
|
||||
rawUsage = message.usage;
|
||||
usage = {
|
||||
promptTokens:
|
||||
(message.usage.cache_creation_input_tokens ?? 0) +
|
||||
(message.usage.cache_read_input_tokens ?? 0) +
|
||||
(message.usage.input_tokens ?? 0),
|
||||
completionTokens: message.usage.output_tokens ?? 0
|
||||
};
|
||||
}
|
||||
|
||||
let finishReason = 'stop';
|
||||
if (message.subtype === 'error_max_turns') {
|
||||
finishReason = 'length';
|
||||
} else if (message.subtype === 'error_during_execution') {
|
||||
finishReason = 'error';
|
||||
}
|
||||
|
||||
// Store session ID in the model instance
|
||||
this.sessionId = message.session_id;
|
||||
|
||||
// In object-json mode, extract JSON and send the full text at once
|
||||
if (options.mode?.type === 'object-json' && accumulatedText) {
|
||||
const extractedJson = extractJson(accumulatedText);
|
||||
controller.enqueue({
|
||||
type: 'text-delta',
|
||||
textDelta: extractedJson
|
||||
});
|
||||
}
|
||||
|
||||
controller.enqueue({
|
||||
type: 'finish',
|
||||
finishReason,
|
||||
usage,
|
||||
providerMetadata: {
|
||||
'claude-code': {
|
||||
sessionId: message.session_id,
|
||||
...(message.total_cost_usd !== undefined && {
|
||||
costUsd: message.total_cost_usd
|
||||
}),
|
||||
...(message.duration_ms !== undefined && {
|
||||
durationMs: message.duration_ms
|
||||
}),
|
||||
...(rawUsage !== undefined && { rawUsage })
|
||||
}
|
||||
}
|
||||
});
|
||||
} else if (
|
||||
message.type === 'system' &&
|
||||
message.subtype === 'init'
|
||||
) {
|
||||
// Store session ID for future use
|
||||
this.sessionId = message.session_id;
|
||||
|
||||
// Emit response metadata when session is initialized
|
||||
controller.enqueue({
|
||||
type: 'response-metadata',
|
||||
id: message.session_id,
|
||||
timestamp: new Date(),
|
||||
modelId: this.modelId
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
controller.close();
|
||||
} catch (error) {
|
||||
let errorToEmit;
|
||||
|
||||
if (error instanceof AbortError) {
|
||||
errorToEmit = options.abortSignal?.aborted
|
||||
? options.abortSignal.reason
|
||||
: error;
|
||||
} else if (
|
||||
error.message?.includes('not logged in') ||
|
||||
error.message?.includes('authentication') ||
|
||||
error.exitCode === 401
|
||||
) {
|
||||
errorToEmit = createAuthenticationError({
|
||||
message:
|
||||
error.message ||
|
||||
'Authentication failed. Please ensure Claude Code CLI is properly authenticated.'
|
||||
});
|
||||
} else {
|
||||
errorToEmit = createAPICallError({
|
||||
message: error.message || 'Claude Code CLI error',
|
||||
code: error.code,
|
||||
exitCode: error.exitCode,
|
||||
stderr: error.stderr,
|
||||
promptExcerpt: messagesPrompt.substring(0, 200),
|
||||
isRetryable:
|
||||
error.code === 'ENOENT' || error.code === 'ECONNREFUSED'
|
||||
});
|
||||
}
|
||||
|
||||
// Emit error as a stream part
|
||||
controller.enqueue({
|
||||
type: 'error',
|
||||
error: errorToEmit
|
||||
});
|
||||
|
||||
controller.close();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
stream,
|
||||
rawCall: {
|
||||
rawPrompt: messagesPrompt,
|
||||
rawSettings: queryOptions
|
||||
},
|
||||
warnings: warnings.length > 0 ? warnings : undefined,
|
||||
request: {
|
||||
body: messagesPrompt
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
139
src/ai-providers/custom-sdk/claude-code/message-converter.js
Normal file
139
src/ai-providers/custom-sdk/claude-code/message-converter.js
Normal file
@@ -0,0 +1,139 @@
|
||||
/**
|
||||
* @fileoverview Converts AI SDK prompt format to Claude Code message format
|
||||
*/
|
||||
|
||||
/**
|
||||
* Convert AI SDK prompt to Claude Code messages format
|
||||
* @param {Array} prompt - AI SDK prompt array
|
||||
* @param {Object} [mode] - Generation mode
|
||||
* @param {string} mode.type - Mode type ('regular', 'object-json', 'object-tool')
|
||||
* @returns {{messagesPrompt: string, systemPrompt?: string}}
|
||||
*/
|
||||
export function convertToClaudeCodeMessages(prompt, mode) {
|
||||
const messages = [];
|
||||
let systemPrompt;
|
||||
|
||||
for (const message of prompt) {
|
||||
switch (message.role) {
|
||||
case 'system':
|
||||
systemPrompt = message.content;
|
||||
break;
|
||||
|
||||
case 'user':
|
||||
if (typeof message.content === 'string') {
|
||||
messages.push(message.content);
|
||||
} else {
|
||||
// Handle multi-part content
|
||||
const textParts = message.content
|
||||
.filter((part) => part.type === 'text')
|
||||
.map((part) => part.text)
|
||||
.join('\n');
|
||||
|
||||
if (textParts) {
|
||||
messages.push(textParts);
|
||||
}
|
||||
|
||||
// Note: Image parts are not supported by Claude Code CLI
|
||||
const imageParts = message.content.filter(
|
||||
(part) => part.type === 'image'
|
||||
);
|
||||
if (imageParts.length > 0) {
|
||||
console.warn(
|
||||
'Claude Code CLI does not support image inputs. Images will be ignored.'
|
||||
);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case 'assistant':
|
||||
if (typeof message.content === 'string') {
|
||||
messages.push(`Assistant: ${message.content}`);
|
||||
} else {
|
||||
const textParts = message.content
|
||||
.filter((part) => part.type === 'text')
|
||||
.map((part) => part.text)
|
||||
.join('\n');
|
||||
|
||||
if (textParts) {
|
||||
messages.push(`Assistant: ${textParts}`);
|
||||
}
|
||||
|
||||
// Handle tool calls if present
|
||||
const toolCalls = message.content.filter(
|
||||
(part) => part.type === 'tool-call'
|
||||
);
|
||||
if (toolCalls.length > 0) {
|
||||
// For now, we'll just note that tool calls were made
|
||||
messages.push(`Assistant: [Tool calls made]`);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case 'tool':
|
||||
// Tool results could be included in the conversation
|
||||
messages.push(
|
||||
`Tool Result (${message.content[0].toolName}): ${JSON.stringify(
|
||||
message.content[0].result
|
||||
)}`
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// For the SDK, we need to provide a single prompt string
|
||||
// Format the conversation history properly
|
||||
|
||||
// Combine system prompt with messages
|
||||
let finalPrompt = '';
|
||||
|
||||
// Add system prompt at the beginning if present
|
||||
if (systemPrompt) {
|
||||
finalPrompt = systemPrompt;
|
||||
}
|
||||
|
||||
if (messages.length === 0) {
|
||||
return { messagesPrompt: finalPrompt, systemPrompt };
|
||||
}
|
||||
|
||||
// Format messages
|
||||
const formattedMessages = [];
|
||||
for (let i = 0; i < messages.length; i++) {
|
||||
const msg = messages[i];
|
||||
// Check if this is a user or assistant message based on content
|
||||
if (msg.startsWith('Assistant:') || msg.startsWith('Tool Result')) {
|
||||
formattedMessages.push(msg);
|
||||
} else {
|
||||
// User messages
|
||||
formattedMessages.push(`Human: ${msg}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Combine system prompt with messages
|
||||
if (finalPrompt) {
|
||||
finalPrompt = finalPrompt + '\n\n' + formattedMessages.join('\n\n');
|
||||
} else {
|
||||
finalPrompt = formattedMessages.join('\n\n');
|
||||
}
|
||||
|
||||
// For JSON mode, add explicit instruction to ensure JSON output
|
||||
if (mode?.type === 'object-json') {
|
||||
// Make the JSON instruction even more explicit
|
||||
finalPrompt = `${finalPrompt}
|
||||
|
||||
CRITICAL INSTRUCTION: You MUST respond with ONLY valid JSON. Follow these rules EXACTLY:
|
||||
1. Start your response with an opening brace {
|
||||
2. End your response with a closing brace }
|
||||
3. Do NOT include any text before the opening brace
|
||||
4. Do NOT include any text after the closing brace
|
||||
5. Do NOT use markdown code blocks or backticks
|
||||
6. Do NOT include explanations or commentary
|
||||
7. The ENTIRE response must be valid JSON that can be parsed with JSON.parse()
|
||||
|
||||
Begin your response with { and end with }`;
|
||||
}
|
||||
|
||||
return {
|
||||
messagesPrompt: finalPrompt,
|
||||
systemPrompt
|
||||
};
|
||||
}
|
||||
73
src/ai-providers/custom-sdk/claude-code/types.js
Normal file
73
src/ai-providers/custom-sdk/claude-code/types.js
Normal file
@@ -0,0 +1,73 @@
|
||||
/**
|
||||
* @fileoverview Type definitions for Claude Code AI SDK provider
|
||||
* These JSDoc types mirror the TypeScript interfaces from the original provider
|
||||
*/
|
||||
|
||||
/**
|
||||
* Claude Code provider settings
|
||||
* @typedef {Object} ClaudeCodeSettings
|
||||
* @property {string} [pathToClaudeCodeExecutable='claude'] - Custom path to Claude Code CLI executable
|
||||
* @property {string} [customSystemPrompt] - Custom system prompt to use
|
||||
* @property {string} [appendSystemPrompt] - Append additional content to the system prompt
|
||||
* @property {number} [maxTurns] - Maximum number of turns for the conversation
|
||||
* @property {number} [maxThinkingTokens] - Maximum thinking tokens for the model
|
||||
* @property {string} [cwd] - Working directory for CLI operations
|
||||
* @property {'bun'|'deno'|'node'} [executable='node'] - JavaScript runtime to use
|
||||
* @property {string[]} [executableArgs] - Additional arguments for the JavaScript runtime
|
||||
* @property {'default'|'acceptEdits'|'bypassPermissions'|'plan'} [permissionMode='default'] - Permission mode for tool usage
|
||||
* @property {string} [permissionPromptToolName] - Custom tool name for permission prompts
|
||||
* @property {boolean} [continue] - Continue the most recent conversation
|
||||
* @property {string} [resume] - Resume a specific session by ID
|
||||
* @property {string[]} [allowedTools] - Tools to explicitly allow during execution (e.g., ['Read', 'LS', 'Bash(git log:*)'])
|
||||
* @property {string[]} [disallowedTools] - Tools to disallow during execution (e.g., ['Write', 'Edit', 'Bash(rm:*)'])
|
||||
* @property {Object.<string, MCPServerConfig>} [mcpServers] - MCP server configuration
|
||||
* @property {boolean} [verbose] - Enable verbose logging for debugging
|
||||
*/
|
||||
|
||||
/**
|
||||
* MCP Server configuration
|
||||
* @typedef {Object} MCPServerConfig
|
||||
* @property {'stdio'|'sse'} [type='stdio'] - Server type
|
||||
* @property {string} command - Command to execute (for stdio type)
|
||||
* @property {string[]} [args] - Arguments for the command
|
||||
* @property {Object.<string, string>} [env] - Environment variables
|
||||
* @property {string} url - URL for SSE type servers
|
||||
* @property {Object.<string, string>} [headers] - Headers for SSE type servers
|
||||
*/
|
||||
|
||||
/**
|
||||
* Model ID type - either 'opus', 'sonnet', or any string
|
||||
* @typedef {'opus'|'sonnet'|string} ClaudeCodeModelId
|
||||
*/
|
||||
|
||||
/**
|
||||
* Language model options
|
||||
* @typedef {Object} ClaudeCodeLanguageModelOptions
|
||||
* @property {ClaudeCodeModelId} id - The model ID
|
||||
* @property {ClaudeCodeSettings} [settings] - Optional settings
|
||||
*/
|
||||
|
||||
/**
|
||||
* Error metadata for Claude Code errors
|
||||
* @typedef {Object} ClaudeCodeErrorMetadata
|
||||
* @property {string} [code] - Error code
|
||||
* @property {number} [exitCode] - Process exit code
|
||||
* @property {string} [stderr] - Standard error output
|
||||
* @property {string} [promptExcerpt] - Excerpt of the prompt that caused the error
|
||||
*/
|
||||
|
||||
/**
|
||||
* Claude Code provider interface
|
||||
* @typedef {Object} ClaudeCodeProvider
|
||||
* @property {function(ClaudeCodeModelId, ClaudeCodeSettings=): Object} languageModel - Create a language model
|
||||
* @property {function(ClaudeCodeModelId, ClaudeCodeSettings=): Object} chat - Alias for languageModel
|
||||
* @property {function(string): never} textEmbeddingModel - Throws NoSuchModelError (not supported)
|
||||
*/
|
||||
|
||||
/**
|
||||
* Claude Code provider settings
|
||||
* @typedef {Object} ClaudeCodeProviderSettings
|
||||
* @property {ClaudeCodeSettings} [defaultSettings] - Default settings to use for all models
|
||||
*/
|
||||
|
||||
export {}; // This ensures the file is treated as a module
|
||||
@@ -13,3 +13,4 @@ export { OllamaAIProvider } from './ollama.js';
|
||||
export { BedrockAIProvider } from './bedrock.js';
|
||||
export { AzureProvider } from './azure.js';
|
||||
export { VertexAIProvider } from './google-vertex.js';
|
||||
export { ClaudeCodeProvider } from './claude-code.js';
|
||||
|
||||
@@ -19,7 +19,8 @@ export const CUSTOM_PROVIDERS = {
|
||||
VERTEX: 'vertex',
|
||||
BEDROCK: 'bedrock',
|
||||
OPENROUTER: 'openrouter',
|
||||
OLLAMA: 'ollama'
|
||||
OLLAMA: 'ollama',
|
||||
CLAUDE_CODE: 'claude-code'
|
||||
};
|
||||
|
||||
// Custom providers array (for backward compatibility and iteration)
|
||||
|
||||
@@ -333,8 +333,8 @@ log_step() {
|
||||
|
||||
log_step "Initializing Task Master project (non-interactive)"
|
||||
task-master init -y --name="E2E Test $TIMESTAMP" --description="Automated E2E test run"
|
||||
if [ ! -f ".taskmasterconfig" ]; then
|
||||
log_error "Initialization failed: .taskmasterconfig not found."
|
||||
if [ ! -f ".taskmaster/config.json" ]; then
|
||||
log_error "Initialization failed: .taskmaster/config.json not found."
|
||||
exit 1
|
||||
fi
|
||||
log_success "Project initialized."
|
||||
@@ -344,8 +344,8 @@ log_step() {
|
||||
exit_status_prd=$?
|
||||
echo "$cmd_output_prd"
|
||||
extract_and_sum_cost "$cmd_output_prd"
|
||||
if [ $exit_status_prd -ne 0 ] || [ ! -s "tasks/tasks.json" ]; then
|
||||
log_error "Parsing PRD failed: tasks/tasks.json not found or is empty. Exit status: $exit_status_prd"
|
||||
if [ $exit_status_prd -ne 0 ] || [ ! -s ".taskmaster/tasks/tasks.json" ]; then
|
||||
log_error "Parsing PRD failed: .taskmaster/tasks/tasks.json not found or is empty. Exit status: $exit_status_prd"
|
||||
exit 1
|
||||
else
|
||||
log_success "PRD parsed successfully."
|
||||
@@ -386,6 +386,95 @@ log_step() {
|
||||
task-master list --with-subtasks > task_list_after_changes.log
|
||||
log_success "Task list after changes saved to task_list_after_changes.log"
|
||||
|
||||
# === Start New Test Section: Tag-Aware Expand Testing ===
|
||||
log_step "Creating additional tag for expand testing"
|
||||
task-master add-tag feature-expand --description="Tag for testing expand command with tag preservation"
|
||||
log_success "Created feature-expand tag."
|
||||
|
||||
log_step "Adding task to feature-expand tag"
|
||||
task-master add-task --tag=feature-expand --prompt="Test task for tag-aware expansion" --priority=medium
|
||||
# Get the new task ID dynamically
|
||||
new_expand_task_id=$(jq -r '.["feature-expand"].tasks[-1].id' .taskmaster/tasks/tasks.json)
|
||||
log_success "Added task $new_expand_task_id to feature-expand tag."
|
||||
|
||||
log_step "Verifying tags exist before expand test"
|
||||
task-master tags > tags_before_expand.log
|
||||
tag_count_before=$(jq 'keys | length' .taskmaster/tasks/tasks.json)
|
||||
log_success "Tag count before expand: $tag_count_before"
|
||||
|
||||
log_step "Expanding task in feature-expand tag (testing tag corruption fix)"
|
||||
cmd_output_expand_tagged=$(task-master expand --tag=feature-expand --id="$new_expand_task_id" 2>&1)
|
||||
exit_status_expand_tagged=$?
|
||||
echo "$cmd_output_expand_tagged"
|
||||
extract_and_sum_cost "$cmd_output_expand_tagged"
|
||||
if [ $exit_status_expand_tagged -ne 0 ]; then
|
||||
log_error "Tagged expand failed. Exit status: $exit_status_expand_tagged"
|
||||
else
|
||||
log_success "Tagged expand completed."
|
||||
fi
|
||||
|
||||
log_step "Verifying tag preservation after expand"
|
||||
task-master tags > tags_after_expand.log
|
||||
tag_count_after=$(jq 'keys | length' .taskmaster/tasks/tasks.json)
|
||||
|
||||
if [ "$tag_count_before" -eq "$tag_count_after" ]; then
|
||||
log_success "Tag count preserved: $tag_count_after (no corruption detected)"
|
||||
else
|
||||
log_error "Tag corruption detected! Before: $tag_count_before, After: $tag_count_after"
|
||||
fi
|
||||
|
||||
log_step "Verifying master tag still exists and has tasks"
|
||||
master_task_count=$(jq -r '.master.tasks | length' .taskmaster/tasks/tasks.json 2>/dev/null || echo "0")
|
||||
if [ "$master_task_count" -gt "0" ]; then
|
||||
log_success "Master tag preserved with $master_task_count tasks"
|
||||
else
|
||||
log_error "Master tag corrupted or empty after tagged expand"
|
||||
fi
|
||||
|
||||
log_step "Verifying feature-expand tag has expanded subtasks"
|
||||
expanded_subtask_count=$(jq -r ".\"feature-expand\".tasks[] | select(.id == $new_expand_task_id) | .subtasks | length" .taskmaster/tasks/tasks.json 2>/dev/null || echo "0")
|
||||
if [ "$expanded_subtask_count" -gt "0" ]; then
|
||||
log_success "Expand successful: $expanded_subtask_count subtasks created in feature-expand tag"
|
||||
else
|
||||
log_error "Expand failed: No subtasks found in feature-expand tag"
|
||||
fi
|
||||
|
||||
log_step "Testing force expand with tag preservation"
|
||||
cmd_output_force_expand=$(task-master expand --tag=feature-expand --id="$new_expand_task_id" --force 2>&1)
|
||||
exit_status_force_expand=$?
|
||||
echo "$cmd_output_force_expand"
|
||||
extract_and_sum_cost "$cmd_output_force_expand"
|
||||
|
||||
# Verify tags still preserved after force expand
|
||||
tag_count_after_force=$(jq 'keys | length' .taskmaster/tasks/tasks.json)
|
||||
if [ "$tag_count_before" -eq "$tag_count_after_force" ]; then
|
||||
log_success "Force expand preserved all tags"
|
||||
else
|
||||
log_error "Force expand caused tag corruption"
|
||||
fi
|
||||
|
||||
log_step "Testing expand --all with tag preservation"
|
||||
# Add another task to feature-expand for expand-all testing
|
||||
task-master add-task --tag=feature-expand --prompt="Second task for expand-all testing" --priority=low
|
||||
second_expand_task_id=$(jq -r '.["feature-expand"].tasks[-1].id' .taskmaster/tasks/tasks.json)
|
||||
|
||||
cmd_output_expand_all=$(task-master expand --tag=feature-expand --all 2>&1)
|
||||
exit_status_expand_all=$?
|
||||
echo "$cmd_output_expand_all"
|
||||
extract_and_sum_cost "$cmd_output_expand_all"
|
||||
|
||||
# Verify tags preserved after expand-all
|
||||
tag_count_after_all=$(jq 'keys | length' .taskmaster/tasks/tasks.json)
|
||||
if [ "$tag_count_before" -eq "$tag_count_after_all" ]; then
|
||||
log_success "Expand --all preserved all tags"
|
||||
else
|
||||
log_error "Expand --all caused tag corruption"
|
||||
fi
|
||||
|
||||
log_success "Completed expand --all tag preservation test."
|
||||
|
||||
# === End New Test Section: Tag-Aware Expand Testing ===
|
||||
|
||||
# === Test Model Commands ===
|
||||
log_step "Checking initial model configuration"
|
||||
task-master models > models_initial_config.log
|
||||
@@ -626,7 +715,7 @@ log_step() {
|
||||
|
||||
# Find the next available task ID dynamically instead of hardcoding 11, 12
|
||||
# Assuming tasks are added sequentially and we didn't remove any core tasks yet
|
||||
last_task_id=$(jq '[.tasks[].id] | max' tasks/tasks.json)
|
||||
last_task_id=$(jq '[.master.tasks[].id] | max' .taskmaster/tasks/tasks.json)
|
||||
manual_task_id=$((last_task_id + 1))
|
||||
ai_task_id=$((manual_task_id + 1))
|
||||
|
||||
@@ -747,30 +836,30 @@ log_step() {
|
||||
task-master list --with-subtasks > task_list_after_clear_all.log
|
||||
log_success "Task list after clear-all saved. (Manual/LLM check recommended to verify subtasks removed)"
|
||||
|
||||
log_step "Expanding Task 1 again (to have subtasks for next test)"
|
||||
task-master expand --id=1
|
||||
log_success "Attempted to expand Task 1 again."
|
||||
# Verify 1.1 exists again
|
||||
if ! jq -e '.tasks[] | select(.id == 1) | .subtasks[] | select(.id == 1)' tasks/tasks.json > /dev/null; then
|
||||
log_error "Subtask 1.1 not found in tasks.json after re-expanding Task 1."
|
||||
log_step "Expanding Task 3 again (to have subtasks for next test)"
|
||||
task-master expand --id=3
|
||||
log_success "Attempted to expand Task 3."
|
||||
# Verify 3.1 exists
|
||||
if ! jq -e '.master.tasks[] | select(.id == 3) | .subtasks[] | select(.id == 1)' .taskmaster/tasks/tasks.json > /dev/null; then
|
||||
log_error "Subtask 3.1 not found in tasks.json after expanding Task 3."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log_step "Adding dependency: Task 3 depends on Subtask 1.1"
|
||||
task-master add-dependency --id=3 --depends-on=1.1
|
||||
log_success "Added dependency 3 -> 1.1."
|
||||
log_step "Adding dependency: Task 4 depends on Subtask 3.1"
|
||||
task-master add-dependency --id=4 --depends-on=3.1
|
||||
log_success "Added dependency 4 -> 3.1."
|
||||
|
||||
log_step "Showing Task 3 details (after adding subtask dependency)"
|
||||
task-master show 3 > task_3_details_after_dep_add.log
|
||||
log_success "Task 3 details saved. (Manual/LLM check recommended for dependency [1.1])"
|
||||
log_step "Showing Task 4 details (after adding subtask dependency)"
|
||||
task-master show 4 > task_4_details_after_dep_add.log
|
||||
log_success "Task 4 details saved. (Manual/LLM check recommended for dependency [3.1])"
|
||||
|
||||
log_step "Removing dependency: Task 3 depends on Subtask 1.1"
|
||||
task-master remove-dependency --id=3 --depends-on=1.1
|
||||
log_success "Removed dependency 3 -> 1.1."
|
||||
log_step "Removing dependency: Task 4 depends on Subtask 3.1"
|
||||
task-master remove-dependency --id=4 --depends-on=3.1
|
||||
log_success "Removed dependency 4 -> 3.1."
|
||||
|
||||
log_step "Showing Task 3 details (after removing subtask dependency)"
|
||||
task-master show 3 > task_3_details_after_dep_remove.log
|
||||
log_success "Task 3 details saved. (Manual/LLM check recommended to verify dependency removed)"
|
||||
log_step "Showing Task 4 details (after removing subtask dependency)"
|
||||
task-master show 4 > task_4_details_after_dep_remove.log
|
||||
log_success "Task 4 details saved. (Manual/LLM check recommended to verify dependency removed)"
|
||||
|
||||
# === End New Test Section ===
|
||||
|
||||
|
||||
95
tests/integration/claude-code-optional.test.js
Normal file
95
tests/integration/claude-code-optional.test.js
Normal file
@@ -0,0 +1,95 @@
|
||||
import { jest } from '@jest/globals';
|
||||
|
||||
// Mock the base provider to avoid circular dependencies
|
||||
jest.unstable_mockModule('../../src/ai-providers/base-provider.js', () => ({
|
||||
BaseAIProvider: class {
|
||||
constructor() {
|
||||
this.name = 'Base Provider';
|
||||
}
|
||||
handleError(context, error) {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}));
|
||||
|
||||
// Mock the claude-code SDK to simulate it not being installed
|
||||
jest.unstable_mockModule('@anthropic-ai/claude-code', () => {
|
||||
throw new Error("Cannot find module '@anthropic-ai/claude-code'");
|
||||
});
|
||||
|
||||
// Import after mocking
|
||||
const { ClaudeCodeProvider } = await import(
|
||||
'../../src/ai-providers/claude-code.js'
|
||||
);
|
||||
|
||||
describe('Claude Code Optional Dependency Integration', () => {
|
||||
describe('when @anthropic-ai/claude-code is not installed', () => {
|
||||
it('should allow provider instantiation', () => {
|
||||
// Provider should instantiate without error
|
||||
const provider = new ClaudeCodeProvider();
|
||||
expect(provider).toBeDefined();
|
||||
expect(provider.name).toBe('Claude Code');
|
||||
});
|
||||
|
||||
it('should allow client creation', () => {
|
||||
const provider = new ClaudeCodeProvider();
|
||||
// Client creation should work
|
||||
const client = provider.getClient({});
|
||||
expect(client).toBeDefined();
|
||||
expect(typeof client).toBe('function');
|
||||
});
|
||||
|
||||
it('should fail with clear error when trying to use the model', async () => {
|
||||
const provider = new ClaudeCodeProvider();
|
||||
const client = provider.getClient({});
|
||||
const model = client('opus');
|
||||
|
||||
// The actual usage should fail with the lazy loading error
|
||||
await expect(
|
||||
model.doGenerate({
|
||||
prompt: [{ role: 'user', content: 'Hello' }],
|
||||
mode: { type: 'regular' }
|
||||
})
|
||||
).rejects.toThrow(
|
||||
"Claude Code SDK is not installed. Please install '@anthropic-ai/claude-code' to use the claude-code provider."
|
||||
);
|
||||
});
|
||||
|
||||
it('should provide helpful error message for streaming', async () => {
|
||||
const provider = new ClaudeCodeProvider();
|
||||
const client = provider.getClient({});
|
||||
const model = client('sonnet');
|
||||
|
||||
await expect(
|
||||
model.doStream({
|
||||
prompt: [{ role: 'user', content: 'Hello' }],
|
||||
mode: { type: 'regular' }
|
||||
})
|
||||
).rejects.toThrow(
|
||||
"Claude Code SDK is not installed. Please install '@anthropic-ai/claude-code' to use the claude-code provider."
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider behavior', () => {
|
||||
it('should not require API key', () => {
|
||||
const provider = new ClaudeCodeProvider();
|
||||
// Should not throw
|
||||
expect(() => provider.validateAuth()).not.toThrow();
|
||||
expect(() => provider.validateAuth({ apiKey: null })).not.toThrow();
|
||||
});
|
||||
|
||||
it('should work with ai-services-unified when provider is configured', async () => {
|
||||
// This tests that the provider can be selected but will fail appropriately
|
||||
// when the actual model is used
|
||||
const provider = new ClaudeCodeProvider();
|
||||
expect(provider).toBeDefined();
|
||||
|
||||
// In real usage, ai-services-unified would:
|
||||
// 1. Get the provider instance (works)
|
||||
// 2. Call provider.getClient() (works)
|
||||
// 3. Create a model (works)
|
||||
// 4. Try to generate (fails with clear error)
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -625,19 +625,38 @@ describe('MCP Server Direct Functions', () => {
|
||||
// For successful cases, record that functions were called but don't make real calls
|
||||
mockEnableSilentMode();
|
||||
|
||||
// Mock expandAllTasks
|
||||
// Mock expandAllTasks - now returns a structured object instead of undefined
|
||||
const mockExpandAll = jest.fn().mockImplementation(async () => {
|
||||
// Just simulate success without any real operations
|
||||
return undefined; // expandAllTasks doesn't return anything
|
||||
// Return the new structured response that matches the actual implementation
|
||||
return {
|
||||
success: true,
|
||||
expandedCount: 2,
|
||||
failedCount: 0,
|
||||
skippedCount: 1,
|
||||
tasksToExpand: 3,
|
||||
telemetryData: {
|
||||
timestamp: new Date().toISOString(),
|
||||
commandName: 'expand-all-tasks',
|
||||
totalCost: 0.05,
|
||||
totalTokens: 1000,
|
||||
inputTokens: 600,
|
||||
outputTokens: 400
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
// Call mock expandAllTasks
|
||||
await mockExpandAll(
|
||||
args.num,
|
||||
args.research || false,
|
||||
args.prompt || '',
|
||||
args.force || false,
|
||||
{ mcpLog: mockLogger, session: options.session }
|
||||
// Call mock expandAllTasks with the correct signature
|
||||
const result = await mockExpandAll(
|
||||
args.file, // tasksPath
|
||||
args.num, // numSubtasks
|
||||
args.research || false, // useResearch
|
||||
args.prompt || '', // additionalContext
|
||||
args.force || false, // force
|
||||
{
|
||||
mcpLog: mockLogger,
|
||||
session: options.session,
|
||||
projectRoot: args.projectRoot
|
||||
}
|
||||
);
|
||||
|
||||
mockDisableSilentMode();
|
||||
@@ -645,13 +664,14 @@ describe('MCP Server Direct Functions', () => {
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
message: 'Successfully expanded all pending tasks with subtasks',
|
||||
message: `Expand all operation completed. Expanded: ${result.expandedCount}, Failed: ${result.failedCount}, Skipped: ${result.skippedCount}`,
|
||||
details: {
|
||||
numSubtasks: args.num,
|
||||
research: args.research || false,
|
||||
prompt: args.prompt || '',
|
||||
force: args.force || false
|
||||
}
|
||||
expandedCount: result.expandedCount,
|
||||
failedCount: result.failedCount,
|
||||
skippedCount: result.skippedCount,
|
||||
tasksToExpand: result.tasksToExpand
|
||||
},
|
||||
telemetryData: result.telemetryData
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -671,10 +691,13 @@ describe('MCP Server Direct Functions', () => {
|
||||
|
||||
// Assert
|
||||
expect(result.success).toBe(true);
|
||||
expect(result.data.message).toBe(
|
||||
'Successfully expanded all pending tasks with subtasks'
|
||||
);
|
||||
expect(result.data.details.numSubtasks).toBe(3);
|
||||
expect(result.data.message).toMatch(/Expand all operation completed/);
|
||||
expect(result.data.details.expandedCount).toBe(2);
|
||||
expect(result.data.details.failedCount).toBe(0);
|
||||
expect(result.data.details.skippedCount).toBe(1);
|
||||
expect(result.data.details.tasksToExpand).toBe(3);
|
||||
expect(result.data.telemetryData).toBeDefined();
|
||||
expect(result.data.telemetryData.commandName).toBe('expand-all-tasks');
|
||||
expect(mockEnableSilentMode).toHaveBeenCalled();
|
||||
expect(mockDisableSilentMode).toHaveBeenCalled();
|
||||
});
|
||||
@@ -695,7 +718,8 @@ describe('MCP Server Direct Functions', () => {
|
||||
|
||||
// Assert
|
||||
expect(result.success).toBe(true);
|
||||
expect(result.data.details.research).toBe(true);
|
||||
expect(result.data.details.expandedCount).toBe(2);
|
||||
expect(result.data.telemetryData).toBeDefined();
|
||||
expect(mockEnableSilentMode).toHaveBeenCalled();
|
||||
expect(mockDisableSilentMode).toHaveBeenCalled();
|
||||
});
|
||||
@@ -715,7 +739,8 @@ describe('MCP Server Direct Functions', () => {
|
||||
|
||||
// Assert
|
||||
expect(result.success).toBe(true);
|
||||
expect(result.data.details.force).toBe(true);
|
||||
expect(result.data.details.expandedCount).toBe(2);
|
||||
expect(result.data.telemetryData).toBeDefined();
|
||||
expect(mockEnableSilentMode).toHaveBeenCalled();
|
||||
expect(mockDisableSilentMode).toHaveBeenCalled();
|
||||
});
|
||||
@@ -735,11 +760,77 @@ describe('MCP Server Direct Functions', () => {
|
||||
|
||||
// Assert
|
||||
expect(result.success).toBe(true);
|
||||
expect(result.data.details.prompt).toBe(
|
||||
'Additional context for subtasks'
|
||||
);
|
||||
expect(result.data.details.expandedCount).toBe(2);
|
||||
expect(result.data.telemetryData).toBeDefined();
|
||||
expect(mockEnableSilentMode).toHaveBeenCalled();
|
||||
expect(mockDisableSilentMode).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should handle case with no eligible tasks', async () => {
|
||||
// Arrange
|
||||
const args = {
|
||||
projectRoot: testProjectRoot,
|
||||
file: testTasksPath,
|
||||
num: 3
|
||||
};
|
||||
|
||||
// Act - Mock the scenario where no tasks are eligible for expansion
|
||||
async function testNoEligibleTasks(args, mockLogger, options = {}) {
|
||||
mockEnableSilentMode();
|
||||
|
||||
const mockExpandAll = jest.fn().mockImplementation(async () => {
|
||||
return {
|
||||
success: true,
|
||||
expandedCount: 0,
|
||||
failedCount: 0,
|
||||
skippedCount: 0,
|
||||
tasksToExpand: 0,
|
||||
telemetryData: null,
|
||||
message: 'No tasks eligible for expansion.'
|
||||
};
|
||||
});
|
||||
|
||||
const result = await mockExpandAll(
|
||||
args.file,
|
||||
args.num,
|
||||
false,
|
||||
'',
|
||||
false,
|
||||
{
|
||||
mcpLog: mockLogger,
|
||||
session: options.session,
|
||||
projectRoot: args.projectRoot
|
||||
},
|
||||
'json'
|
||||
);
|
||||
|
||||
mockDisableSilentMode();
|
||||
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
message: result.message,
|
||||
details: {
|
||||
expandedCount: result.expandedCount,
|
||||
failedCount: result.failedCount,
|
||||
skippedCount: result.skippedCount,
|
||||
tasksToExpand: result.tasksToExpand
|
||||
},
|
||||
telemetryData: result.telemetryData
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
const result = await testNoEligibleTasks(args, mockLogger, {
|
||||
session: mockSession
|
||||
});
|
||||
|
||||
// Assert
|
||||
expect(result.success).toBe(true);
|
||||
expect(result.data.message).toBe('No tasks eligible for expansion.');
|
||||
expect(result.data.details.expandedCount).toBe(0);
|
||||
expect(result.data.details.tasksToExpand).toBe(0);
|
||||
expect(result.data.telemetryData).toBeNull();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
115
tests/unit/ai-providers/claude-code.test.js
Normal file
115
tests/unit/ai-providers/claude-code.test.js
Normal file
@@ -0,0 +1,115 @@
|
||||
import { jest } from '@jest/globals';
|
||||
|
||||
// Mock the claude-code SDK module
|
||||
jest.unstable_mockModule(
|
||||
'../../../src/ai-providers/custom-sdk/claude-code/index.js',
|
||||
() => ({
|
||||
createClaudeCode: jest.fn(() => {
|
||||
const provider = (modelId, settings) => ({
|
||||
// Mock language model
|
||||
id: modelId,
|
||||
settings
|
||||
});
|
||||
provider.languageModel = jest.fn((id, settings) => ({ id, settings }));
|
||||
provider.chat = provider.languageModel;
|
||||
return provider;
|
||||
})
|
||||
})
|
||||
);
|
||||
|
||||
// Mock the base provider
|
||||
jest.unstable_mockModule('../../../src/ai-providers/base-provider.js', () => ({
|
||||
BaseAIProvider: class {
|
||||
constructor() {
|
||||
this.name = 'Base Provider';
|
||||
}
|
||||
handleError(context, error) {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}));
|
||||
|
||||
// Import after mocking
|
||||
const { ClaudeCodeProvider } = await import(
|
||||
'../../../src/ai-providers/claude-code.js'
|
||||
);
|
||||
|
||||
describe('ClaudeCodeProvider', () => {
|
||||
let provider;
|
||||
|
||||
beforeEach(() => {
|
||||
provider = new ClaudeCodeProvider();
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('constructor', () => {
|
||||
it('should set the provider name to Claude Code', () => {
|
||||
expect(provider.name).toBe('Claude Code');
|
||||
});
|
||||
});
|
||||
|
||||
describe('validateAuth', () => {
|
||||
it('should not throw an error (no API key required)', () => {
|
||||
expect(() => provider.validateAuth({})).not.toThrow();
|
||||
});
|
||||
|
||||
it('should not require any parameters', () => {
|
||||
expect(() => provider.validateAuth()).not.toThrow();
|
||||
});
|
||||
|
||||
it('should work with any params passed', () => {
|
||||
expect(() =>
|
||||
provider.validateAuth({
|
||||
apiKey: 'some-key',
|
||||
baseURL: 'https://example.com'
|
||||
})
|
||||
).not.toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
describe('getClient', () => {
|
||||
it('should return a claude code client', () => {
|
||||
const client = provider.getClient({});
|
||||
expect(client).toBeDefined();
|
||||
expect(typeof client).toBe('function');
|
||||
});
|
||||
|
||||
it('should create client without API key or base URL', () => {
|
||||
const client = provider.getClient({});
|
||||
expect(client).toBeDefined();
|
||||
});
|
||||
|
||||
it('should handle params even though they are not used', () => {
|
||||
const client = provider.getClient({
|
||||
baseURL: 'https://example.com',
|
||||
apiKey: 'unused-key'
|
||||
});
|
||||
expect(client).toBeDefined();
|
||||
});
|
||||
|
||||
it('should have languageModel and chat methods', () => {
|
||||
const client = provider.getClient({});
|
||||
expect(client.languageModel).toBeDefined();
|
||||
expect(client.chat).toBeDefined();
|
||||
expect(client.chat).toBe(client.languageModel);
|
||||
});
|
||||
});
|
||||
|
||||
describe('error handling', () => {
|
||||
it('should handle client initialization errors', async () => {
|
||||
// Force an error by making createClaudeCode throw
|
||||
const { createClaudeCode } = await import(
|
||||
'../../../src/ai-providers/custom-sdk/claude-code/index.js'
|
||||
);
|
||||
createClaudeCode.mockImplementationOnce(() => {
|
||||
throw new Error('Mock initialization error');
|
||||
});
|
||||
|
||||
// Create a new provider instance to use the mocked createClaudeCode
|
||||
const errorProvider = new ClaudeCodeProvider();
|
||||
expect(() => errorProvider.getClient({})).toThrow(
|
||||
'Mock initialization error'
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,237 @@
|
||||
import { jest } from '@jest/globals';
|
||||
|
||||
// Mock modules before importing
|
||||
jest.unstable_mockModule('@ai-sdk/provider', () => ({
|
||||
NoSuchModelError: class NoSuchModelError extends Error {
|
||||
constructor({ modelId, modelType }) {
|
||||
super(`No such model: ${modelId}`);
|
||||
this.modelId = modelId;
|
||||
this.modelType = modelType;
|
||||
}
|
||||
}
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule('@ai-sdk/provider-utils', () => ({
|
||||
generateId: jest.fn(() => 'test-id-123')
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../src/ai-providers/custom-sdk/claude-code/message-converter.js',
|
||||
() => ({
|
||||
convertToClaudeCodeMessages: jest.fn((prompt) => ({
|
||||
messagesPrompt: 'converted-prompt',
|
||||
systemPrompt: 'system'
|
||||
}))
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../src/ai-providers/custom-sdk/claude-code/json-extractor.js',
|
||||
() => ({
|
||||
extractJson: jest.fn((text) => text)
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../src/ai-providers/custom-sdk/claude-code/errors.js',
|
||||
() => ({
|
||||
createAPICallError: jest.fn((opts) => new Error(opts.message)),
|
||||
createAuthenticationError: jest.fn((opts) => new Error(opts.message))
|
||||
})
|
||||
);
|
||||
|
||||
// This mock will be controlled by tests
|
||||
let mockClaudeCodeModule = null;
|
||||
jest.unstable_mockModule('@anthropic-ai/claude-code', () => {
|
||||
if (mockClaudeCodeModule) {
|
||||
return mockClaudeCodeModule;
|
||||
}
|
||||
throw new Error("Cannot find module '@anthropic-ai/claude-code'");
|
||||
});
|
||||
|
||||
// Import the module under test
|
||||
const { ClaudeCodeLanguageModel } = await import(
|
||||
'../../../../../src/ai-providers/custom-sdk/claude-code/language-model.js'
|
||||
);
|
||||
|
||||
describe('ClaudeCodeLanguageModel', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
// Reset the module mock
|
||||
mockClaudeCodeModule = null;
|
||||
// Clear module cache to ensure fresh imports
|
||||
jest.resetModules();
|
||||
});
|
||||
|
||||
describe('constructor', () => {
|
||||
it('should initialize with valid model ID', () => {
|
||||
const model = new ClaudeCodeLanguageModel({
|
||||
id: 'opus',
|
||||
settings: { maxTurns: 5 }
|
||||
});
|
||||
|
||||
expect(model.modelId).toBe('opus');
|
||||
expect(model.settings).toEqual({ maxTurns: 5 });
|
||||
expect(model.provider).toBe('claude-code');
|
||||
});
|
||||
|
||||
it('should throw NoSuchModelError for invalid model ID', async () => {
|
||||
expect(
|
||||
() =>
|
||||
new ClaudeCodeLanguageModel({
|
||||
id: '',
|
||||
settings: {}
|
||||
})
|
||||
).toThrow('No such model: ');
|
||||
|
||||
expect(
|
||||
() =>
|
||||
new ClaudeCodeLanguageModel({
|
||||
id: null,
|
||||
settings: {}
|
||||
})
|
||||
).toThrow('No such model: null');
|
||||
});
|
||||
});
|
||||
|
||||
describe('lazy loading of @anthropic-ai/claude-code', () => {
|
||||
it('should throw error when package is not installed', async () => {
|
||||
// Keep mockClaudeCodeModule as null to simulate missing package
|
||||
const model = new ClaudeCodeLanguageModel({
|
||||
id: 'opus',
|
||||
settings: {}
|
||||
});
|
||||
|
||||
await expect(
|
||||
model.doGenerate({
|
||||
prompt: [{ role: 'user', content: 'test' }],
|
||||
mode: { type: 'regular' }
|
||||
})
|
||||
).rejects.toThrow(
|
||||
"Claude Code SDK is not installed. Please install '@anthropic-ai/claude-code' to use the claude-code provider."
|
||||
);
|
||||
});
|
||||
|
||||
it('should load package successfully when available', async () => {
|
||||
// Mock successful package load
|
||||
const mockQuery = jest.fn(async function* () {
|
||||
yield {
|
||||
type: 'assistant',
|
||||
message: { content: [{ type: 'text', text: 'Hello' }] }
|
||||
};
|
||||
yield {
|
||||
type: 'result',
|
||||
subtype: 'done',
|
||||
usage: { output_tokens: 10, input_tokens: 5 }
|
||||
};
|
||||
});
|
||||
|
||||
mockClaudeCodeModule = {
|
||||
query: mockQuery,
|
||||
AbortError: class AbortError extends Error {}
|
||||
};
|
||||
|
||||
// Need to re-import to get fresh module with mocks
|
||||
jest.resetModules();
|
||||
const { ClaudeCodeLanguageModel: FreshModel } = await import(
|
||||
'../../../../../src/ai-providers/custom-sdk/claude-code/language-model.js'
|
||||
);
|
||||
|
||||
const model = new FreshModel({
|
||||
id: 'opus',
|
||||
settings: {}
|
||||
});
|
||||
|
||||
const result = await model.doGenerate({
|
||||
prompt: [{ role: 'user', content: 'test' }],
|
||||
mode: { type: 'regular' }
|
||||
});
|
||||
|
||||
expect(result.text).toBe('Hello');
|
||||
expect(mockQuery).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should only attempt to load package once', async () => {
|
||||
// Get a fresh import to ensure clean state
|
||||
jest.resetModules();
|
||||
const { ClaudeCodeLanguageModel: TestModel } = await import(
|
||||
'../../../../../src/ai-providers/custom-sdk/claude-code/language-model.js'
|
||||
);
|
||||
|
||||
const model = new TestModel({
|
||||
id: 'opus',
|
||||
settings: {}
|
||||
});
|
||||
|
||||
// First call should throw
|
||||
await expect(
|
||||
model.doGenerate({
|
||||
prompt: [{ role: 'user', content: 'test' }],
|
||||
mode: { type: 'regular' }
|
||||
})
|
||||
).rejects.toThrow('Claude Code SDK is not installed');
|
||||
|
||||
// Second call should also throw without trying to load again
|
||||
await expect(
|
||||
model.doGenerate({
|
||||
prompt: [{ role: 'user', content: 'test' }],
|
||||
mode: { type: 'regular' }
|
||||
})
|
||||
).rejects.toThrow('Claude Code SDK is not installed');
|
||||
});
|
||||
});
|
||||
|
||||
describe('generateUnsupportedWarnings', () => {
|
||||
it('should generate warnings for unsupported parameters', () => {
|
||||
const model = new ClaudeCodeLanguageModel({
|
||||
id: 'opus',
|
||||
settings: {}
|
||||
});
|
||||
|
||||
const warnings = model.generateUnsupportedWarnings({
|
||||
temperature: 0.7,
|
||||
maxTokens: 1000,
|
||||
topP: 0.9,
|
||||
seed: 42
|
||||
});
|
||||
|
||||
expect(warnings).toHaveLength(4);
|
||||
expect(warnings[0]).toEqual({
|
||||
type: 'unsupported-setting',
|
||||
setting: 'temperature',
|
||||
details:
|
||||
'Claude Code CLI does not support the temperature parameter. It will be ignored.'
|
||||
});
|
||||
});
|
||||
|
||||
it('should return empty array when no unsupported parameters', () => {
|
||||
const model = new ClaudeCodeLanguageModel({
|
||||
id: 'opus',
|
||||
settings: {}
|
||||
});
|
||||
|
||||
const warnings = model.generateUnsupportedWarnings({});
|
||||
expect(warnings).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getModel', () => {
|
||||
it('should map model IDs correctly', () => {
|
||||
const model = new ClaudeCodeLanguageModel({
|
||||
id: 'opus',
|
||||
settings: {}
|
||||
});
|
||||
|
||||
expect(model.getModel()).toBe('opus');
|
||||
});
|
||||
|
||||
it('should return unmapped model IDs as-is', () => {
|
||||
const model = new ClaudeCodeLanguageModel({
|
||||
id: 'custom-model',
|
||||
settings: {}
|
||||
});
|
||||
|
||||
expect(model.getModel()).toBe('custom-model');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -180,6 +180,11 @@ jest.unstable_mockModule('../../src/ai-providers/index.js', () => ({
|
||||
generateText: jest.fn(),
|
||||
streamText: jest.fn(),
|
||||
generateObject: jest.fn()
|
||||
})),
|
||||
ClaudeCodeProvider: jest.fn(() => ({
|
||||
generateText: jest.fn(),
|
||||
streamText: jest.fn(),
|
||||
generateObject: jest.fn()
|
||||
}))
|
||||
}));
|
||||
|
||||
|
||||
@@ -266,6 +266,7 @@ describe('Validation Functions', () => {
|
||||
expect(configManager.validateProvider('perplexity')).toBe(true);
|
||||
expect(configManager.validateProvider('ollama')).toBe(true);
|
||||
expect(configManager.validateProvider('openrouter')).toBe(true);
|
||||
expect(configManager.validateProvider('bedrock')).toBe(true);
|
||||
});
|
||||
|
||||
test('validateProvider should return false for invalid providers', () => {
|
||||
|
||||
324
tests/unit/mcp/tools/expand-all.test.js
Normal file
324
tests/unit/mcp/tools/expand-all.test.js
Normal file
@@ -0,0 +1,324 @@
|
||||
/**
|
||||
* Tests for the expand-all MCP tool
|
||||
*
|
||||
* Note: This test does NOT test the actual implementation. It tests that:
|
||||
* 1. The tool is registered correctly with the correct parameters
|
||||
* 2. Arguments are passed correctly to expandAllTasksDirect
|
||||
* 3. Error handling works as expected
|
||||
*
|
||||
* We do NOT import the real implementation - everything is mocked
|
||||
*/
|
||||
|
||||
import { jest } from '@jest/globals';
|
||||
|
||||
// Mock EVERYTHING
|
||||
const mockExpandAllTasksDirect = jest.fn();
|
||||
jest.mock('../../../../mcp-server/src/core/task-master-core.js', () => ({
|
||||
expandAllTasksDirect: mockExpandAllTasksDirect
|
||||
}));
|
||||
|
||||
const mockHandleApiResult = jest.fn((result) => result);
|
||||
const mockGetProjectRootFromSession = jest.fn(() => '/mock/project/root');
|
||||
const mockCreateErrorResponse = jest.fn((msg) => ({
|
||||
success: false,
|
||||
error: { code: 'ERROR', message: msg }
|
||||
}));
|
||||
const mockWithNormalizedProjectRoot = jest.fn((fn) => fn);
|
||||
|
||||
jest.mock('../../../../mcp-server/src/tools/utils.js', () => ({
|
||||
getProjectRootFromSession: mockGetProjectRootFromSession,
|
||||
handleApiResult: mockHandleApiResult,
|
||||
createErrorResponse: mockCreateErrorResponse,
|
||||
withNormalizedProjectRoot: mockWithNormalizedProjectRoot
|
||||
}));
|
||||
|
||||
// Mock the z object from zod
|
||||
const mockZod = {
|
||||
object: jest.fn(() => mockZod),
|
||||
string: jest.fn(() => mockZod),
|
||||
number: jest.fn(() => mockZod),
|
||||
boolean: jest.fn(() => mockZod),
|
||||
optional: jest.fn(() => mockZod),
|
||||
describe: jest.fn(() => mockZod),
|
||||
_def: {
|
||||
shape: () => ({
|
||||
num: {},
|
||||
research: {},
|
||||
prompt: {},
|
||||
force: {},
|
||||
tag: {},
|
||||
projectRoot: {}
|
||||
})
|
||||
}
|
||||
};
|
||||
|
||||
jest.mock('zod', () => ({
|
||||
z: mockZod
|
||||
}));
|
||||
|
||||
// DO NOT import the real module - create a fake implementation
|
||||
// This is the fake implementation of registerExpandAllTool
|
||||
const registerExpandAllTool = (server) => {
|
||||
// Create simplified version of the tool config
|
||||
const toolConfig = {
|
||||
name: 'expand_all',
|
||||
description: 'Use Taskmaster to expand all eligible pending tasks',
|
||||
parameters: mockZod,
|
||||
|
||||
// Create a simplified mock of the execute function
|
||||
execute: mockWithNormalizedProjectRoot(async (args, context) => {
|
||||
const { log, session } = context;
|
||||
|
||||
try {
|
||||
log.info &&
|
||||
log.info(`Starting expand-all with args: ${JSON.stringify(args)}`);
|
||||
|
||||
// Call expandAllTasksDirect
|
||||
const result = await mockExpandAllTasksDirect(args, log, { session });
|
||||
|
||||
// Handle result
|
||||
return mockHandleApiResult(result, log);
|
||||
} catch (error) {
|
||||
log.error && log.error(`Error in expand-all tool: ${error.message}`);
|
||||
return mockCreateErrorResponse(error.message);
|
||||
}
|
||||
})
|
||||
};
|
||||
|
||||
// Register the tool with the server
|
||||
server.addTool(toolConfig);
|
||||
};
|
||||
|
||||
describe('MCP Tool: expand-all', () => {
|
||||
// Create mock server
|
||||
let mockServer;
|
||||
let executeFunction;
|
||||
|
||||
// Create mock logger
|
||||
const mockLogger = {
|
||||
debug: jest.fn(),
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
error: jest.fn()
|
||||
};
|
||||
|
||||
// Test data
|
||||
const validArgs = {
|
||||
num: 3,
|
||||
research: true,
|
||||
prompt: 'additional context',
|
||||
force: false,
|
||||
tag: 'master',
|
||||
projectRoot: '/test/project'
|
||||
};
|
||||
|
||||
// Standard responses
|
||||
const successResponse = {
|
||||
success: true,
|
||||
data: {
|
||||
message:
|
||||
'Expand all operation completed. Expanded: 2, Failed: 0, Skipped: 1',
|
||||
details: {
|
||||
expandedCount: 2,
|
||||
failedCount: 0,
|
||||
skippedCount: 1,
|
||||
tasksToExpand: 3,
|
||||
telemetryData: {
|
||||
commandName: 'expand-all-tasks',
|
||||
totalCost: 0.15,
|
||||
totalTokens: 2500
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const errorResponse = {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'EXPAND_ALL_ERROR',
|
||||
message: 'Failed to expand tasks'
|
||||
}
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
// Reset all mocks
|
||||
jest.clearAllMocks();
|
||||
|
||||
// Create mock server
|
||||
mockServer = {
|
||||
addTool: jest.fn((config) => {
|
||||
executeFunction = config.execute;
|
||||
})
|
||||
};
|
||||
|
||||
// Setup default successful response
|
||||
mockExpandAllTasksDirect.mockResolvedValue(successResponse);
|
||||
|
||||
// Register the tool
|
||||
registerExpandAllTool(mockServer);
|
||||
});
|
||||
|
||||
test('should register the tool correctly', () => {
|
||||
// Verify tool was registered
|
||||
expect(mockServer.addTool).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
name: 'expand_all',
|
||||
description: expect.stringContaining('expand all eligible pending'),
|
||||
parameters: expect.any(Object),
|
||||
execute: expect.any(Function)
|
||||
})
|
||||
);
|
||||
|
||||
// Verify the tool config was passed
|
||||
const toolConfig = mockServer.addTool.mock.calls[0][0];
|
||||
expect(toolConfig).toHaveProperty('parameters');
|
||||
expect(toolConfig).toHaveProperty('execute');
|
||||
});
|
||||
|
||||
test('should execute the tool with valid parameters', async () => {
|
||||
// Setup context
|
||||
const mockContext = {
|
||||
log: mockLogger,
|
||||
session: { workingDirectory: '/mock/dir' }
|
||||
};
|
||||
|
||||
// Execute the function
|
||||
const result = await executeFunction(validArgs, mockContext);
|
||||
|
||||
// Verify expandAllTasksDirect was called with correct arguments
|
||||
expect(mockExpandAllTasksDirect).toHaveBeenCalledWith(
|
||||
validArgs,
|
||||
mockLogger,
|
||||
{ session: mockContext.session }
|
||||
);
|
||||
|
||||
// Verify handleApiResult was called
|
||||
expect(mockHandleApiResult).toHaveBeenCalledWith(
|
||||
successResponse,
|
||||
mockLogger
|
||||
);
|
||||
expect(result).toEqual(successResponse);
|
||||
});
|
||||
|
||||
test('should handle expand all with no eligible tasks', async () => {
|
||||
// Arrange
|
||||
const mockDirectResult = {
|
||||
success: true,
|
||||
data: {
|
||||
message:
|
||||
'Expand all operation completed. Expanded: 0, Failed: 0, Skipped: 0',
|
||||
details: {
|
||||
expandedCount: 0,
|
||||
failedCount: 0,
|
||||
skippedCount: 0,
|
||||
tasksToExpand: 0,
|
||||
telemetryData: null
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
mockExpandAllTasksDirect.mockResolvedValue(mockDirectResult);
|
||||
mockHandleApiResult.mockReturnValue({
|
||||
success: true,
|
||||
data: mockDirectResult.data
|
||||
});
|
||||
|
||||
// Act
|
||||
const result = await executeFunction(validArgs, {
|
||||
log: mockLogger,
|
||||
session: { workingDirectory: '/test' }
|
||||
});
|
||||
|
||||
// Assert
|
||||
expect(result.success).toBe(true);
|
||||
expect(result.data.details.expandedCount).toBe(0);
|
||||
expect(result.data.details.tasksToExpand).toBe(0);
|
||||
});
|
||||
|
||||
test('should handle expand all with mixed success/failure', async () => {
|
||||
// Arrange
|
||||
const mockDirectResult = {
|
||||
success: true,
|
||||
data: {
|
||||
message:
|
||||
'Expand all operation completed. Expanded: 2, Failed: 1, Skipped: 0',
|
||||
details: {
|
||||
expandedCount: 2,
|
||||
failedCount: 1,
|
||||
skippedCount: 0,
|
||||
tasksToExpand: 3,
|
||||
telemetryData: {
|
||||
commandName: 'expand-all-tasks',
|
||||
totalCost: 0.1,
|
||||
totalTokens: 1500
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
mockExpandAllTasksDirect.mockResolvedValue(mockDirectResult);
|
||||
mockHandleApiResult.mockReturnValue({
|
||||
success: true,
|
||||
data: mockDirectResult.data
|
||||
});
|
||||
|
||||
// Act
|
||||
const result = await executeFunction(validArgs, {
|
||||
log: mockLogger,
|
||||
session: { workingDirectory: '/test' }
|
||||
});
|
||||
|
||||
// Assert
|
||||
expect(result.success).toBe(true);
|
||||
expect(result.data.details.expandedCount).toBe(2);
|
||||
expect(result.data.details.failedCount).toBe(1);
|
||||
});
|
||||
|
||||
test('should handle errors from expandAllTasksDirect', async () => {
|
||||
// Arrange
|
||||
mockExpandAllTasksDirect.mockRejectedValue(
|
||||
new Error('Direct function error')
|
||||
);
|
||||
|
||||
// Act
|
||||
const result = await executeFunction(validArgs, {
|
||||
log: mockLogger,
|
||||
session: { workingDirectory: '/test' }
|
||||
});
|
||||
|
||||
// Assert
|
||||
expect(mockLogger.error).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Error in expand-all tool')
|
||||
);
|
||||
expect(mockCreateErrorResponse).toHaveBeenCalledWith(
|
||||
'Direct function error'
|
||||
);
|
||||
});
|
||||
|
||||
test('should handle different argument combinations', async () => {
|
||||
// Test with minimal args
|
||||
const minimalArgs = {
|
||||
projectRoot: '/test/project'
|
||||
};
|
||||
|
||||
// Act
|
||||
await executeFunction(minimalArgs, {
|
||||
log: mockLogger,
|
||||
session: { workingDirectory: '/test' }
|
||||
});
|
||||
|
||||
// Assert
|
||||
expect(mockExpandAllTasksDirect).toHaveBeenCalledWith(
|
||||
minimalArgs,
|
||||
mockLogger,
|
||||
expect.any(Object)
|
||||
);
|
||||
});
|
||||
|
||||
test('should use withNormalizedProjectRoot wrapper correctly', () => {
|
||||
// Verify that the execute function is wrapped with withNormalizedProjectRoot
|
||||
expect(mockWithNormalizedProjectRoot).toHaveBeenCalledWith(
|
||||
expect.any(Function)
|
||||
);
|
||||
});
|
||||
});
|
||||
502
tests/unit/scripts/modules/task-manager/expand-all-tasks.test.js
Normal file
502
tests/unit/scripts/modules/task-manager/expand-all-tasks.test.js
Normal file
@@ -0,0 +1,502 @@
|
||||
/**
|
||||
* Tests for the expand-all-tasks.js module
|
||||
*/
|
||||
import { jest } from '@jest/globals';
|
||||
|
||||
// Mock the dependencies before importing the module under test
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/task-manager/expand-task.js',
|
||||
() => ({
|
||||
default: jest.fn()
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule('../../../../../scripts/modules/utils.js', () => ({
|
||||
readJSON: jest.fn(),
|
||||
log: jest.fn(),
|
||||
isSilentMode: jest.fn(() => false),
|
||||
findProjectRoot: jest.fn(() => '/test/project'),
|
||||
aggregateTelemetry: jest.fn()
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/config-manager.js',
|
||||
() => ({
|
||||
getDebugFlag: jest.fn(() => false)
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule('../../../../../scripts/modules/ui.js', () => ({
|
||||
startLoadingIndicator: jest.fn(),
|
||||
stopLoadingIndicator: jest.fn(),
|
||||
displayAiUsageSummary: jest.fn()
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule('chalk', () => ({
|
||||
default: {
|
||||
white: { bold: jest.fn((text) => text) },
|
||||
cyan: jest.fn((text) => text),
|
||||
green: jest.fn((text) => text),
|
||||
gray: jest.fn((text) => text),
|
||||
red: jest.fn((text) => text),
|
||||
bold: jest.fn((text) => text)
|
||||
}
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule('boxen', () => ({
|
||||
default: jest.fn((text) => text)
|
||||
}));
|
||||
|
||||
// Import the mocked modules
|
||||
const { default: expandTask } = await import(
|
||||
'../../../../../scripts/modules/task-manager/expand-task.js'
|
||||
);
|
||||
const { readJSON, aggregateTelemetry, findProjectRoot } = await import(
|
||||
'../../../../../scripts/modules/utils.js'
|
||||
);
|
||||
|
||||
// Import the module under test
|
||||
const { default: expandAllTasks } = await import(
|
||||
'../../../../../scripts/modules/task-manager/expand-all-tasks.js'
|
||||
);
|
||||
|
||||
const mockExpandTask = expandTask;
|
||||
const mockReadJSON = readJSON;
|
||||
const mockAggregateTelemetry = aggregateTelemetry;
|
||||
const mockFindProjectRoot = findProjectRoot;
|
||||
|
||||
describe('expandAllTasks', () => {
|
||||
const mockTasksPath = '/test/tasks.json';
|
||||
const mockProjectRoot = '/test/project';
|
||||
const mockSession = { userId: 'test-user' };
|
||||
const mockMcpLog = {
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
error: jest.fn(),
|
||||
debug: jest.fn()
|
||||
};
|
||||
|
||||
const sampleTasksData = {
|
||||
tag: 'master',
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Pending Task 1',
|
||||
status: 'pending',
|
||||
subtasks: []
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'In Progress Task',
|
||||
status: 'in-progress',
|
||||
subtasks: []
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
title: 'Done Task',
|
||||
status: 'done',
|
||||
subtasks: []
|
||||
},
|
||||
{
|
||||
id: 4,
|
||||
title: 'Task with Subtasks',
|
||||
status: 'pending',
|
||||
subtasks: [{ id: '4.1', title: 'Existing subtask' }]
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
mockReadJSON.mockReturnValue(sampleTasksData);
|
||||
mockAggregateTelemetry.mockReturnValue({
|
||||
timestamp: '2024-01-01T00:00:00.000Z',
|
||||
commandName: 'expand-all-tasks',
|
||||
totalCost: 0.1,
|
||||
totalTokens: 2000,
|
||||
inputTokens: 1200,
|
||||
outputTokens: 800
|
||||
});
|
||||
});
|
||||
|
||||
describe('successful expansion', () => {
|
||||
test('should expand all eligible pending tasks', async () => {
|
||||
// Arrange
|
||||
const mockTelemetryData = {
|
||||
timestamp: '2024-01-01T00:00:00.000Z',
|
||||
commandName: 'expand-task',
|
||||
totalCost: 0.05,
|
||||
totalTokens: 1000
|
||||
};
|
||||
|
||||
mockExpandTask.mockResolvedValue({
|
||||
telemetryData: mockTelemetryData
|
||||
});
|
||||
|
||||
// Act
|
||||
const result = await expandAllTasks(
|
||||
mockTasksPath,
|
||||
3, // numSubtasks
|
||||
false, // useResearch
|
||||
'test context', // additionalContext
|
||||
false, // force
|
||||
{
|
||||
session: mockSession,
|
||||
mcpLog: mockMcpLog,
|
||||
projectRoot: mockProjectRoot,
|
||||
tag: 'master'
|
||||
},
|
||||
'json' // outputFormat
|
||||
);
|
||||
|
||||
// Assert
|
||||
expect(result.success).toBe(true);
|
||||
expect(result.expandedCount).toBe(2); // Tasks 1 and 2 (pending and in-progress)
|
||||
expect(result.failedCount).toBe(0);
|
||||
expect(result.skippedCount).toBe(0);
|
||||
expect(result.tasksToExpand).toBe(2);
|
||||
expect(result.telemetryData).toBeDefined();
|
||||
|
||||
// Verify readJSON was called correctly
|
||||
expect(mockReadJSON).toHaveBeenCalledWith(
|
||||
mockTasksPath,
|
||||
mockProjectRoot,
|
||||
'master'
|
||||
);
|
||||
|
||||
// Verify expandTask was called for eligible tasks
|
||||
expect(mockExpandTask).toHaveBeenCalledTimes(2);
|
||||
expect(mockExpandTask).toHaveBeenCalledWith(
|
||||
mockTasksPath,
|
||||
1,
|
||||
3,
|
||||
false,
|
||||
'test context',
|
||||
expect.objectContaining({
|
||||
session: mockSession,
|
||||
mcpLog: mockMcpLog,
|
||||
projectRoot: mockProjectRoot,
|
||||
tag: 'master'
|
||||
}),
|
||||
false
|
||||
);
|
||||
});
|
||||
|
||||
test('should handle force flag to expand tasks with existing subtasks', async () => {
|
||||
// Arrange
|
||||
mockExpandTask.mockResolvedValue({
|
||||
telemetryData: { commandName: 'expand-task', totalCost: 0.05 }
|
||||
});
|
||||
|
||||
// Act
|
||||
const result = await expandAllTasks(
|
||||
mockTasksPath,
|
||||
2,
|
||||
false,
|
||||
'',
|
||||
true, // force = true
|
||||
{
|
||||
session: mockSession,
|
||||
mcpLog: mockMcpLog,
|
||||
projectRoot: mockProjectRoot
|
||||
},
|
||||
'json'
|
||||
);
|
||||
|
||||
// Assert
|
||||
expect(result.expandedCount).toBe(3); // Tasks 1, 2, and 4 (including task with existing subtasks)
|
||||
expect(mockExpandTask).toHaveBeenCalledTimes(3);
|
||||
});
|
||||
|
||||
test('should handle research flag', async () => {
|
||||
// Arrange
|
||||
mockExpandTask.mockResolvedValue({
|
||||
telemetryData: { commandName: 'expand-task', totalCost: 0.08 }
|
||||
});
|
||||
|
||||
// Act
|
||||
const result = await expandAllTasks(
|
||||
mockTasksPath,
|
||||
undefined, // numSubtasks not specified
|
||||
true, // useResearch = true
|
||||
'research context',
|
||||
false,
|
||||
{
|
||||
session: mockSession,
|
||||
mcpLog: mockMcpLog,
|
||||
projectRoot: mockProjectRoot
|
||||
},
|
||||
'json'
|
||||
);
|
||||
|
||||
// Assert
|
||||
expect(result.success).toBe(true);
|
||||
expect(mockExpandTask).toHaveBeenCalledWith(
|
||||
mockTasksPath,
|
||||
expect.any(Number),
|
||||
undefined,
|
||||
true, // research flag passed correctly
|
||||
'research context',
|
||||
expect.any(Object),
|
||||
false
|
||||
);
|
||||
});
|
||||
|
||||
test('should return success with message when no tasks are eligible', async () => {
|
||||
// Arrange - Mock tasks data with no eligible tasks
|
||||
const noEligibleTasksData = {
|
||||
tag: 'master',
|
||||
tasks: [
|
||||
{ id: 1, status: 'done', subtasks: [] },
|
||||
{
|
||||
id: 2,
|
||||
status: 'pending',
|
||||
subtasks: [{ id: '2.1', title: 'existing' }]
|
||||
}
|
||||
]
|
||||
};
|
||||
mockReadJSON.mockReturnValue(noEligibleTasksData);
|
||||
|
||||
// Act
|
||||
const result = await expandAllTasks(
|
||||
mockTasksPath,
|
||||
3,
|
||||
false,
|
||||
'',
|
||||
false, // force = false, so task with subtasks won't be expanded
|
||||
{
|
||||
session: mockSession,
|
||||
mcpLog: mockMcpLog,
|
||||
projectRoot: mockProjectRoot
|
||||
},
|
||||
'json'
|
||||
);
|
||||
|
||||
// Assert
|
||||
expect(result.success).toBe(true);
|
||||
expect(result.expandedCount).toBe(0);
|
||||
expect(result.failedCount).toBe(0);
|
||||
expect(result.skippedCount).toBe(0);
|
||||
expect(result.tasksToExpand).toBe(0);
|
||||
expect(result.message).toBe('No tasks eligible for expansion.');
|
||||
expect(mockExpandTask).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('error handling', () => {
|
||||
test('should handle expandTask failures gracefully', async () => {
|
||||
// Arrange
|
||||
mockExpandTask
|
||||
.mockResolvedValueOnce({ telemetryData: { totalCost: 0.05 } }) // First task succeeds
|
||||
.mockRejectedValueOnce(new Error('AI service error')); // Second task fails
|
||||
|
||||
// Act
|
||||
const result = await expandAllTasks(
|
||||
mockTasksPath,
|
||||
3,
|
||||
false,
|
||||
'',
|
||||
false,
|
||||
{
|
||||
session: mockSession,
|
||||
mcpLog: mockMcpLog,
|
||||
projectRoot: mockProjectRoot
|
||||
},
|
||||
'json'
|
||||
);
|
||||
|
||||
// Assert
|
||||
expect(result.success).toBe(true);
|
||||
expect(result.expandedCount).toBe(1);
|
||||
expect(result.failedCount).toBe(1);
|
||||
});
|
||||
|
||||
test('should throw error when tasks.json is invalid', async () => {
|
||||
// Arrange
|
||||
mockReadJSON.mockReturnValue(null);
|
||||
|
||||
// Act & Assert
|
||||
await expect(
|
||||
expandAllTasks(
|
||||
mockTasksPath,
|
||||
3,
|
||||
false,
|
||||
'',
|
||||
false,
|
||||
{
|
||||
session: mockSession,
|
||||
mcpLog: mockMcpLog,
|
||||
projectRoot: mockProjectRoot
|
||||
},
|
||||
'json'
|
||||
)
|
||||
).rejects.toThrow('Invalid tasks data');
|
||||
});
|
||||
|
||||
test('should throw error when project root cannot be determined', async () => {
|
||||
// Arrange - Mock findProjectRoot to return null for this test
|
||||
mockFindProjectRoot.mockReturnValueOnce(null);
|
||||
|
||||
// Act & Assert
|
||||
await expect(
|
||||
expandAllTasks(
|
||||
mockTasksPath,
|
||||
3,
|
||||
false,
|
||||
'',
|
||||
false,
|
||||
{
|
||||
session: mockSession,
|
||||
mcpLog: mockMcpLog
|
||||
// No projectRoot provided, and findProjectRoot will return null
|
||||
},
|
||||
'json'
|
||||
)
|
||||
).rejects.toThrow('Could not determine project root directory');
|
||||
});
|
||||
});
|
||||
|
||||
describe('telemetry aggregation', () => {
|
||||
test('should aggregate telemetry data from multiple expand operations', async () => {
|
||||
// Arrange
|
||||
const telemetryData1 = {
|
||||
commandName: 'expand-task',
|
||||
totalCost: 0.03,
|
||||
totalTokens: 600
|
||||
};
|
||||
const telemetryData2 = {
|
||||
commandName: 'expand-task',
|
||||
totalCost: 0.04,
|
||||
totalTokens: 800
|
||||
};
|
||||
|
||||
mockExpandTask
|
||||
.mockResolvedValueOnce({ telemetryData: telemetryData1 })
|
||||
.mockResolvedValueOnce({ telemetryData: telemetryData2 });
|
||||
|
||||
// Act
|
||||
const result = await expandAllTasks(
|
||||
mockTasksPath,
|
||||
3,
|
||||
false,
|
||||
'',
|
||||
false,
|
||||
{
|
||||
session: mockSession,
|
||||
mcpLog: mockMcpLog,
|
||||
projectRoot: mockProjectRoot
|
||||
},
|
||||
'json'
|
||||
);
|
||||
|
||||
// Assert
|
||||
expect(mockAggregateTelemetry).toHaveBeenCalledWith(
|
||||
[telemetryData1, telemetryData2],
|
||||
'expand-all-tasks'
|
||||
);
|
||||
expect(result.telemetryData).toBeDefined();
|
||||
expect(result.telemetryData.commandName).toBe('expand-all-tasks');
|
||||
});
|
||||
|
||||
test('should handle missing telemetry data gracefully', async () => {
|
||||
// Arrange
|
||||
mockExpandTask.mockResolvedValue({}); // No telemetryData
|
||||
|
||||
// Act
|
||||
const result = await expandAllTasks(
|
||||
mockTasksPath,
|
||||
3,
|
||||
false,
|
||||
'',
|
||||
false,
|
||||
{
|
||||
session: mockSession,
|
||||
mcpLog: mockMcpLog,
|
||||
projectRoot: mockProjectRoot
|
||||
},
|
||||
'json'
|
||||
);
|
||||
|
||||
// Assert
|
||||
expect(result.success).toBe(true);
|
||||
expect(mockAggregateTelemetry).toHaveBeenCalledWith(
|
||||
[],
|
||||
'expand-all-tasks'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('output format handling', () => {
|
||||
test('should use text output format for CLI calls', async () => {
|
||||
// Arrange
|
||||
mockExpandTask.mockResolvedValue({
|
||||
telemetryData: { commandName: 'expand-task', totalCost: 0.05 }
|
||||
});
|
||||
|
||||
// Act
|
||||
const result = await expandAllTasks(
|
||||
mockTasksPath,
|
||||
3,
|
||||
false,
|
||||
'',
|
||||
false,
|
||||
{
|
||||
projectRoot: mockProjectRoot
|
||||
// No mcpLog provided, should use CLI logger
|
||||
},
|
||||
'text' // CLI output format
|
||||
);
|
||||
|
||||
// Assert
|
||||
expect(result.success).toBe(true);
|
||||
// In text mode, loading indicators and console output would be used
|
||||
// This is harder to test directly but we can verify the result structure
|
||||
});
|
||||
|
||||
test('should handle context tag properly', async () => {
|
||||
// Arrange
|
||||
const taggedTasksData = {
|
||||
...sampleTasksData,
|
||||
tag: 'feature-branch'
|
||||
};
|
||||
mockReadJSON.mockReturnValue(taggedTasksData);
|
||||
mockExpandTask.mockResolvedValue({
|
||||
telemetryData: { commandName: 'expand-task', totalCost: 0.05 }
|
||||
});
|
||||
|
||||
// Act
|
||||
const result = await expandAllTasks(
|
||||
mockTasksPath,
|
||||
3,
|
||||
false,
|
||||
'',
|
||||
false,
|
||||
{
|
||||
session: mockSession,
|
||||
mcpLog: mockMcpLog,
|
||||
projectRoot: mockProjectRoot,
|
||||
tag: 'feature-branch'
|
||||
},
|
||||
'json'
|
||||
);
|
||||
|
||||
// Assert
|
||||
expect(mockReadJSON).toHaveBeenCalledWith(
|
||||
mockTasksPath,
|
||||
mockProjectRoot,
|
||||
'feature-branch'
|
||||
);
|
||||
expect(mockExpandTask).toHaveBeenCalledWith(
|
||||
mockTasksPath,
|
||||
expect.any(Number),
|
||||
3,
|
||||
false,
|
||||
'',
|
||||
expect.objectContaining({
|
||||
tag: 'feature-branch'
|
||||
}),
|
||||
false
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
888
tests/unit/scripts/modules/task-manager/expand-task.test.js
Normal file
888
tests/unit/scripts/modules/task-manager/expand-task.test.js
Normal file
@@ -0,0 +1,888 @@
|
||||
/**
|
||||
* Tests for the expand-task.js module
|
||||
*/
|
||||
import { jest } from '@jest/globals';
|
||||
import fs from 'fs';
|
||||
|
||||
// Mock the dependencies before importing the module under test
|
||||
jest.unstable_mockModule('../../../../../scripts/modules/utils.js', () => ({
|
||||
readJSON: jest.fn(),
|
||||
writeJSON: jest.fn(),
|
||||
log: jest.fn(),
|
||||
CONFIG: {
|
||||
model: 'mock-claude-model',
|
||||
maxTokens: 4000,
|
||||
temperature: 0.7,
|
||||
debug: false
|
||||
},
|
||||
sanitizePrompt: jest.fn((prompt) => prompt),
|
||||
truncate: jest.fn((text) => text),
|
||||
isSilentMode: jest.fn(() => false),
|
||||
findTaskById: jest.fn(),
|
||||
findProjectRoot: jest.fn((tasksPath) => '/mock/project/root'),
|
||||
getCurrentTag: jest.fn(() => 'master'),
|
||||
ensureTagMetadata: jest.fn((tagObj) => tagObj),
|
||||
flattenTasksWithSubtasks: jest.fn((tasks) => {
|
||||
const allTasks = [];
|
||||
const queue = [...(tasks || [])];
|
||||
while (queue.length > 0) {
|
||||
const task = queue.shift();
|
||||
allTasks.push(task);
|
||||
if (task.subtasks) {
|
||||
for (const subtask of task.subtasks) {
|
||||
queue.push({ ...subtask, id: `${task.id}.${subtask.id}` });
|
||||
}
|
||||
}
|
||||
}
|
||||
return allTasks;
|
||||
}),
|
||||
readComplexityReport: jest.fn(),
|
||||
markMigrationForNotice: jest.fn(),
|
||||
performCompleteTagMigration: jest.fn(),
|
||||
setTasksForTag: jest.fn(),
|
||||
getTasksForTag: jest.fn((data, tag) => data[tag]?.tasks || [])
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule('../../../../../scripts/modules/ui.js', () => ({
|
||||
displayBanner: jest.fn(),
|
||||
getStatusWithColor: jest.fn((status) => status),
|
||||
startLoadingIndicator: jest.fn(),
|
||||
stopLoadingIndicator: jest.fn(),
|
||||
succeedLoadingIndicator: jest.fn(),
|
||||
failLoadingIndicator: jest.fn(),
|
||||
warnLoadingIndicator: jest.fn(),
|
||||
infoLoadingIndicator: jest.fn(),
|
||||
displayAiUsageSummary: jest.fn(),
|
||||
displayContextAnalysis: jest.fn()
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/ai-services-unified.js',
|
||||
() => ({
|
||||
generateTextService: jest.fn().mockResolvedValue({
|
||||
mainResult: JSON.stringify({
|
||||
subtasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Set up project structure',
|
||||
description:
|
||||
'Create the basic project directory structure and configuration files',
|
||||
dependencies: [],
|
||||
details:
|
||||
'Initialize package.json, create src/ and test/ directories, set up linting configuration',
|
||||
status: 'pending',
|
||||
testStrategy:
|
||||
'Verify all expected files and directories are created'
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Implement core functionality',
|
||||
description: 'Develop the main application logic and core features',
|
||||
dependencies: [1],
|
||||
details:
|
||||
'Create main classes, implement business logic, set up data models',
|
||||
status: 'pending',
|
||||
testStrategy: 'Unit tests for all core functions and classes'
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
title: 'Add user interface',
|
||||
description: 'Create the user interface components and layouts',
|
||||
dependencies: [2],
|
||||
details:
|
||||
'Design UI components, implement responsive layouts, add user interactions',
|
||||
status: 'pending',
|
||||
testStrategy: 'UI tests and visual regression testing'
|
||||
}
|
||||
]
|
||||
}),
|
||||
telemetryData: {
|
||||
timestamp: new Date().toISOString(),
|
||||
userId: '1234567890',
|
||||
commandName: 'expand-task',
|
||||
modelUsed: 'claude-3-5-sonnet',
|
||||
providerName: 'anthropic',
|
||||
inputTokens: 1000,
|
||||
outputTokens: 500,
|
||||
totalTokens: 1500,
|
||||
totalCost: 0.012414,
|
||||
currency: 'USD'
|
||||
}
|
||||
})
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/config-manager.js',
|
||||
() => ({
|
||||
getDefaultSubtasks: jest.fn(() => 3),
|
||||
getDebugFlag: jest.fn(() => false)
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/utils/contextGatherer.js',
|
||||
() => ({
|
||||
ContextGatherer: jest.fn().mockImplementation(() => ({
|
||||
gather: jest.fn().mockResolvedValue({
|
||||
contextSummary: 'Mock context summary',
|
||||
allRelatedTaskIds: [],
|
||||
graphVisualization: 'Mock graph'
|
||||
})
|
||||
}))
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/task-manager/generate-task-files.js',
|
||||
() => ({
|
||||
default: jest.fn().mockResolvedValue()
|
||||
})
|
||||
);
|
||||
|
||||
// Mock external UI libraries
|
||||
jest.unstable_mockModule('chalk', () => ({
|
||||
default: {
|
||||
white: { bold: jest.fn((text) => text) },
|
||||
cyan: Object.assign(
|
||||
jest.fn((text) => text),
|
||||
{
|
||||
bold: jest.fn((text) => text)
|
||||
}
|
||||
),
|
||||
green: jest.fn((text) => text),
|
||||
yellow: jest.fn((text) => text),
|
||||
bold: jest.fn((text) => text)
|
||||
}
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule('boxen', () => ({
|
||||
default: jest.fn((text) => text)
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule('cli-table3', () => ({
|
||||
default: jest.fn().mockImplementation(() => ({
|
||||
push: jest.fn(),
|
||||
toString: jest.fn(() => 'mocked table')
|
||||
}))
|
||||
}));
|
||||
|
||||
// Mock process.exit to prevent Jest worker crashes
|
||||
const mockExit = jest.spyOn(process, 'exit').mockImplementation((code) => {
|
||||
throw new Error(`process.exit called with "${code}"`);
|
||||
});
|
||||
|
||||
// Import the mocked modules
|
||||
const {
|
||||
readJSON,
|
||||
writeJSON,
|
||||
log,
|
||||
findTaskById,
|
||||
ensureTagMetadata,
|
||||
readComplexityReport,
|
||||
findProjectRoot
|
||||
} = await import('../../../../../scripts/modules/utils.js');
|
||||
|
||||
const { generateTextService } = await import(
|
||||
'../../../../../scripts/modules/ai-services-unified.js'
|
||||
);
|
||||
|
||||
const generateTaskFiles = (
|
||||
await import(
|
||||
'../../../../../scripts/modules/task-manager/generate-task-files.js'
|
||||
)
|
||||
).default;
|
||||
|
||||
// Import the module under test
|
||||
const { default: expandTask } = await import(
|
||||
'../../../../../scripts/modules/task-manager/expand-task.js'
|
||||
);
|
||||
|
||||
describe('expandTask', () => {
|
||||
const sampleTasks = {
|
||||
master: {
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Task 1',
|
||||
description: 'First task',
|
||||
status: 'done',
|
||||
dependencies: [],
|
||||
details: 'Already completed task',
|
||||
subtasks: []
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Task 2',
|
||||
description: 'Second task',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
details: 'Task ready for expansion',
|
||||
subtasks: []
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
title: 'Complex Task',
|
||||
description: 'A complex task that needs breakdown',
|
||||
status: 'pending',
|
||||
dependencies: [1],
|
||||
details: 'This task involves multiple steps',
|
||||
subtasks: []
|
||||
},
|
||||
{
|
||||
id: 4,
|
||||
title: 'Task with existing subtasks',
|
||||
description: 'Task that already has subtasks',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
details: 'Has existing subtasks',
|
||||
subtasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Existing subtask',
|
||||
description: 'Already exists',
|
||||
status: 'pending',
|
||||
dependencies: []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
'feature-branch': {
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Feature Task 1',
|
||||
description: 'Task in feature branch',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
details: 'Feature-specific task',
|
||||
subtasks: []
|
||||
}
|
||||
]
|
||||
}
|
||||
};
|
||||
|
||||
// Create a helper function for consistent mcpLog mock
|
||||
const createMcpLogMock = () => ({
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
error: jest.fn(),
|
||||
debug: jest.fn(),
|
||||
success: jest.fn()
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
mockExit.mockClear();
|
||||
|
||||
// Default readJSON implementation - returns tagged structure
|
||||
readJSON.mockImplementation((tasksPath, projectRoot, tag) => {
|
||||
const sampleTasksCopy = JSON.parse(JSON.stringify(sampleTasks));
|
||||
const selectedTag = tag || 'master';
|
||||
return {
|
||||
...sampleTasksCopy[selectedTag],
|
||||
tag: selectedTag,
|
||||
_rawTaggedData: sampleTasksCopy
|
||||
};
|
||||
});
|
||||
|
||||
// Default findTaskById implementation
|
||||
findTaskById.mockImplementation((tasks, taskId) => {
|
||||
const id = parseInt(taskId, 10);
|
||||
return tasks.find((t) => t.id === id);
|
||||
});
|
||||
|
||||
// Default complexity report (no report available)
|
||||
readComplexityReport.mockReturnValue(null);
|
||||
|
||||
// Mock findProjectRoot to return consistent path for complexity report
|
||||
findProjectRoot.mockReturnValue('/mock/project/root');
|
||||
|
||||
writeJSON.mockResolvedValue();
|
||||
generateTaskFiles.mockResolvedValue();
|
||||
log.mockImplementation(() => {});
|
||||
|
||||
// Mock console.log to avoid output during tests
|
||||
jest.spyOn(console, 'log').mockImplementation(() => {});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
console.log.mockRestore();
|
||||
});
|
||||
|
||||
describe('Basic Functionality', () => {
|
||||
test('should expand a task with AI-generated subtasks', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const taskId = '2';
|
||||
const numSubtasks = 3;
|
||||
const context = {
|
||||
mcpLog: createMcpLogMock(),
|
||||
projectRoot: '/mock/project/root'
|
||||
};
|
||||
|
||||
// Act
|
||||
const result = await expandTask(
|
||||
tasksPath,
|
||||
taskId,
|
||||
numSubtasks,
|
||||
false,
|
||||
'',
|
||||
context,
|
||||
false
|
||||
);
|
||||
|
||||
// Assert
|
||||
expect(readJSON).toHaveBeenCalledWith(
|
||||
tasksPath,
|
||||
'/mock/project/root',
|
||||
undefined
|
||||
);
|
||||
expect(generateTextService).toHaveBeenCalledWith(expect.any(Object));
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
tasksPath,
|
||||
expect.objectContaining({
|
||||
tasks: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
id: 2,
|
||||
subtasks: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
id: 1,
|
||||
title: 'Set up project structure',
|
||||
status: 'pending'
|
||||
}),
|
||||
expect.objectContaining({
|
||||
id: 2,
|
||||
title: 'Implement core functionality',
|
||||
status: 'pending'
|
||||
}),
|
||||
expect.objectContaining({
|
||||
id: 3,
|
||||
title: 'Add user interface',
|
||||
status: 'pending'
|
||||
})
|
||||
])
|
||||
})
|
||||
]),
|
||||
tag: 'master',
|
||||
_rawTaggedData: expect.objectContaining({
|
||||
master: expect.objectContaining({
|
||||
tasks: expect.any(Array)
|
||||
})
|
||||
})
|
||||
}),
|
||||
'/mock/project/root',
|
||||
undefined
|
||||
);
|
||||
expect(result).toEqual(
|
||||
expect.objectContaining({
|
||||
task: expect.objectContaining({
|
||||
id: 2,
|
||||
subtasks: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
id: 1,
|
||||
title: 'Set up project structure',
|
||||
status: 'pending'
|
||||
}),
|
||||
expect.objectContaining({
|
||||
id: 2,
|
||||
title: 'Implement core functionality',
|
||||
status: 'pending'
|
||||
}),
|
||||
expect.objectContaining({
|
||||
id: 3,
|
||||
title: 'Add user interface',
|
||||
status: 'pending'
|
||||
})
|
||||
])
|
||||
}),
|
||||
telemetryData: expect.any(Object)
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
test('should handle research flag correctly', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const taskId = '2';
|
||||
const numSubtasks = 3;
|
||||
const context = {
|
||||
mcpLog: createMcpLogMock(),
|
||||
projectRoot: '/mock/project/root'
|
||||
};
|
||||
|
||||
// Act
|
||||
await expandTask(
|
||||
tasksPath,
|
||||
taskId,
|
||||
numSubtasks,
|
||||
true, // useResearch = true
|
||||
'Additional context for research',
|
||||
context,
|
||||
false
|
||||
);
|
||||
|
||||
// Assert
|
||||
expect(generateTextService).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
role: 'research',
|
||||
commandName: expect.any(String)
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
test('should handle complexity report integration without errors', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const taskId = '2';
|
||||
const context = {
|
||||
mcpLog: createMcpLogMock(),
|
||||
projectRoot: '/mock/project/root'
|
||||
};
|
||||
|
||||
// Act & Assert - Should complete without errors
|
||||
const result = await expandTask(
|
||||
tasksPath,
|
||||
taskId,
|
||||
undefined, // numSubtasks not specified
|
||||
false,
|
||||
'',
|
||||
context,
|
||||
false
|
||||
);
|
||||
|
||||
// Assert - Should successfully expand and return expected structure
|
||||
expect(result).toEqual(
|
||||
expect.objectContaining({
|
||||
task: expect.objectContaining({
|
||||
id: 2,
|
||||
subtasks: expect.any(Array)
|
||||
}),
|
||||
telemetryData: expect.any(Object)
|
||||
})
|
||||
);
|
||||
expect(generateTextService).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Tag Handling (The Critical Bug Fix)', () => {
|
||||
test('should preserve tagged structure when expanding with default tag', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const taskId = '2';
|
||||
const context = {
|
||||
mcpLog: createMcpLogMock(),
|
||||
projectRoot: '/mock/project/root',
|
||||
tag: 'master' // Explicit tag context
|
||||
};
|
||||
|
||||
// Act
|
||||
await expandTask(tasksPath, taskId, 3, false, '', context, false);
|
||||
|
||||
// Assert - CRITICAL: Check tag is passed to readJSON and writeJSON
|
||||
expect(readJSON).toHaveBeenCalledWith(
|
||||
tasksPath,
|
||||
'/mock/project/root',
|
||||
'master'
|
||||
);
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
tasksPath,
|
||||
expect.objectContaining({
|
||||
tag: 'master',
|
||||
_rawTaggedData: expect.objectContaining({
|
||||
master: expect.any(Object),
|
||||
'feature-branch': expect.any(Object)
|
||||
})
|
||||
}),
|
||||
'/mock/project/root',
|
||||
'master' // CRITICAL: Tag must be passed to writeJSON
|
||||
);
|
||||
});
|
||||
|
||||
test('should preserve tagged structure when expanding with non-default tag', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const taskId = '1'; // Task in feature-branch
|
||||
const context = {
|
||||
mcpLog: createMcpLogMock(),
|
||||
projectRoot: '/mock/project/root',
|
||||
tag: 'feature-branch' // Different tag context
|
||||
};
|
||||
|
||||
// Configure readJSON to return feature-branch data
|
||||
readJSON.mockImplementation((tasksPath, projectRoot, tag) => {
|
||||
const sampleTasksCopy = JSON.parse(JSON.stringify(sampleTasks));
|
||||
return {
|
||||
...sampleTasksCopy['feature-branch'],
|
||||
tag: 'feature-branch',
|
||||
_rawTaggedData: sampleTasksCopy
|
||||
};
|
||||
});
|
||||
|
||||
// Act
|
||||
await expandTask(tasksPath, taskId, 3, false, '', context, false);
|
||||
|
||||
// Assert - CRITICAL: Check tag preservation for non-default tag
|
||||
expect(readJSON).toHaveBeenCalledWith(
|
||||
tasksPath,
|
||||
'/mock/project/root',
|
||||
'feature-branch'
|
||||
);
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
tasksPath,
|
||||
expect.objectContaining({
|
||||
tag: 'feature-branch',
|
||||
_rawTaggedData: expect.objectContaining({
|
||||
master: expect.any(Object),
|
||||
'feature-branch': expect.any(Object)
|
||||
})
|
||||
}),
|
||||
'/mock/project/root',
|
||||
'feature-branch' // CRITICAL: Correct tag passed to writeJSON
|
||||
);
|
||||
});
|
||||
|
||||
test('should NOT corrupt tagged structure when tag is undefined', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const taskId = '2';
|
||||
const context = {
|
||||
mcpLog: createMcpLogMock(),
|
||||
projectRoot: '/mock/project/root'
|
||||
// No tag specified - should default gracefully
|
||||
};
|
||||
|
||||
// Act
|
||||
await expandTask(tasksPath, taskId, 3, false, '', context, false);
|
||||
|
||||
// Assert - Should still preserve structure with undefined tag
|
||||
expect(readJSON).toHaveBeenCalledWith(
|
||||
tasksPath,
|
||||
'/mock/project/root',
|
||||
undefined
|
||||
);
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
tasksPath,
|
||||
expect.objectContaining({
|
||||
_rawTaggedData: expect.objectContaining({
|
||||
master: expect.any(Object)
|
||||
})
|
||||
}),
|
||||
'/mock/project/root',
|
||||
undefined
|
||||
);
|
||||
|
||||
// CRITICAL: Verify structure is NOT flattened to old format
|
||||
const writeCallArgs = writeJSON.mock.calls[0][1];
|
||||
expect(writeCallArgs).toHaveProperty('tasks'); // Should have tasks property from readJSON mock
|
||||
expect(writeCallArgs).toHaveProperty('_rawTaggedData'); // Should preserve tagged structure
|
||||
});
|
||||
});
|
||||
|
||||
describe('Force Flag Handling', () => {
|
||||
test('should replace existing subtasks when force=true', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const taskId = '4'; // Task with existing subtasks
|
||||
const context = {
|
||||
mcpLog: createMcpLogMock(),
|
||||
projectRoot: '/mock/project/root'
|
||||
};
|
||||
|
||||
// Act
|
||||
await expandTask(tasksPath, taskId, 3, false, '', context, true);
|
||||
|
||||
// Assert - Should replace existing subtasks
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
tasksPath,
|
||||
expect.objectContaining({
|
||||
tasks: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
id: 4,
|
||||
subtasks: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
id: 1,
|
||||
title: 'Set up project structure'
|
||||
})
|
||||
])
|
||||
})
|
||||
])
|
||||
}),
|
||||
'/mock/project/root',
|
||||
undefined
|
||||
);
|
||||
});
|
||||
|
||||
test('should append to existing subtasks when force=false', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const taskId = '4'; // Task with existing subtasks
|
||||
const context = {
|
||||
mcpLog: createMcpLogMock(),
|
||||
projectRoot: '/mock/project/root'
|
||||
};
|
||||
|
||||
// Act
|
||||
await expandTask(tasksPath, taskId, 3, false, '', context, false);
|
||||
|
||||
// Assert - Should append to existing subtasks with proper ID increments
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
tasksPath,
|
||||
expect.objectContaining({
|
||||
tasks: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
id: 4,
|
||||
subtasks: expect.arrayContaining([
|
||||
// Should contain both existing and new subtasks
|
||||
expect.any(Object),
|
||||
expect.any(Object),
|
||||
expect.any(Object),
|
||||
expect.any(Object) // 1 existing + 3 new = 4 total
|
||||
])
|
||||
})
|
||||
])
|
||||
}),
|
||||
'/mock/project/root',
|
||||
undefined
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Error Handling', () => {
|
||||
test('should handle non-existent task ID', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const taskId = '999'; // Non-existent task
|
||||
const context = {
|
||||
mcpLog: createMcpLogMock(),
|
||||
projectRoot: '/mock/project/root'
|
||||
};
|
||||
|
||||
findTaskById.mockReturnValue(null);
|
||||
|
||||
// Act & Assert
|
||||
await expect(
|
||||
expandTask(tasksPath, taskId, 3, false, '', context, false)
|
||||
).rejects.toThrow('Task 999 not found');
|
||||
|
||||
expect(writeJSON).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should expand tasks regardless of status (including done tasks)', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const taskId = '1'; // Task with 'done' status
|
||||
const context = {
|
||||
mcpLog: createMcpLogMock(),
|
||||
projectRoot: '/mock/project/root'
|
||||
};
|
||||
|
||||
// Act
|
||||
const result = await expandTask(
|
||||
tasksPath,
|
||||
taskId,
|
||||
3,
|
||||
false,
|
||||
'',
|
||||
context,
|
||||
false
|
||||
);
|
||||
|
||||
// Assert - Should successfully expand even 'done' tasks
|
||||
expect(writeJSON).toHaveBeenCalled();
|
||||
expect(result).toEqual(
|
||||
expect.objectContaining({
|
||||
task: expect.objectContaining({
|
||||
id: 1,
|
||||
status: 'done', // Status unchanged
|
||||
subtasks: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
id: 1,
|
||||
title: 'Set up project structure',
|
||||
status: 'pending'
|
||||
})
|
||||
])
|
||||
}),
|
||||
telemetryData: expect.any(Object)
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
test('should handle AI service failures', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const taskId = '2';
|
||||
const context = {
|
||||
mcpLog: createMcpLogMock(),
|
||||
projectRoot: '/mock/project/root'
|
||||
};
|
||||
|
||||
generateTextService.mockRejectedValueOnce(new Error('AI service error'));
|
||||
|
||||
// Act & Assert
|
||||
await expect(
|
||||
expandTask(tasksPath, taskId, 3, false, '', context, false)
|
||||
).rejects.toThrow('AI service error');
|
||||
|
||||
expect(writeJSON).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should handle file read errors', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const taskId = '2';
|
||||
const context = {
|
||||
mcpLog: createMcpLogMock(),
|
||||
projectRoot: '/mock/project/root'
|
||||
};
|
||||
|
||||
readJSON.mockImplementation(() => {
|
||||
throw new Error('File read failed');
|
||||
});
|
||||
|
||||
// Act & Assert
|
||||
await expect(
|
||||
expandTask(tasksPath, taskId, 3, false, '', context, false)
|
||||
).rejects.toThrow('File read failed');
|
||||
|
||||
expect(writeJSON).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should handle invalid tasks data', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const taskId = '2';
|
||||
const context = {
|
||||
mcpLog: createMcpLogMock(),
|
||||
projectRoot: '/mock/project/root'
|
||||
};
|
||||
|
||||
readJSON.mockReturnValue(null);
|
||||
|
||||
// Act & Assert
|
||||
await expect(
|
||||
expandTask(tasksPath, taskId, 3, false, '', context, false)
|
||||
).rejects.toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Output Format Handling', () => {
|
||||
test('should display telemetry for CLI output format', async () => {
|
||||
// Arrange
|
||||
const { displayAiUsageSummary } = await import(
|
||||
'../../../../../scripts/modules/ui.js'
|
||||
);
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const taskId = '2';
|
||||
const context = {
|
||||
projectRoot: '/mock/project/root'
|
||||
// No mcpLog - should trigger CLI mode
|
||||
};
|
||||
|
||||
// Act
|
||||
await expandTask(tasksPath, taskId, 3, false, '', context, false);
|
||||
|
||||
// Assert - Should display telemetry for CLI users
|
||||
expect(displayAiUsageSummary).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
commandName: 'expand-task',
|
||||
modelUsed: 'claude-3-5-sonnet',
|
||||
totalCost: 0.012414
|
||||
}),
|
||||
'cli'
|
||||
);
|
||||
});
|
||||
|
||||
test('should not display telemetry for MCP output format', async () => {
|
||||
// Arrange
|
||||
const { displayAiUsageSummary } = await import(
|
||||
'../../../../../scripts/modules/ui.js'
|
||||
);
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const taskId = '2';
|
||||
const context = {
|
||||
mcpLog: createMcpLogMock(),
|
||||
projectRoot: '/mock/project/root'
|
||||
};
|
||||
|
||||
// Act
|
||||
await expandTask(tasksPath, taskId, 3, false, '', context, false);
|
||||
|
||||
// Assert - Should NOT display telemetry for MCP (handled at higher level)
|
||||
expect(displayAiUsageSummary).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Edge Cases', () => {
|
||||
test('should handle empty additional context', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const taskId = '2';
|
||||
const context = {
|
||||
mcpLog: createMcpLogMock(),
|
||||
projectRoot: '/mock/project/root'
|
||||
};
|
||||
|
||||
// Act
|
||||
await expandTask(tasksPath, taskId, 3, false, '', context, false);
|
||||
|
||||
// Assert - Should work with empty context (but may include project context)
|
||||
expect(generateTextService).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
prompt: expect.stringMatching(/.*/) // Just ensure prompt exists
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
test('should handle additional context correctly', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const taskId = '2';
|
||||
const additionalContext = 'Use React hooks and TypeScript';
|
||||
const context = {
|
||||
mcpLog: createMcpLogMock(),
|
||||
projectRoot: '/mock/project/root'
|
||||
};
|
||||
|
||||
// Act
|
||||
await expandTask(
|
||||
tasksPath,
|
||||
taskId,
|
||||
3,
|
||||
false,
|
||||
additionalContext,
|
||||
context,
|
||||
false
|
||||
);
|
||||
|
||||
// Assert - Should include additional context in prompt
|
||||
expect(generateTextService).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
prompt: expect.stringContaining('Use React hooks and TypeScript')
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
test('should handle missing project root in context', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const taskId = '2';
|
||||
const context = {
|
||||
mcpLog: createMcpLogMock()
|
||||
// No projectRoot in context
|
||||
};
|
||||
|
||||
// Act
|
||||
await expandTask(tasksPath, taskId, 3, false, '', context, false);
|
||||
|
||||
// Assert - Should derive project root from tasksPath
|
||||
expect(findProjectRoot).toHaveBeenCalledWith(tasksPath);
|
||||
expect(readJSON).toHaveBeenCalledWith(
|
||||
tasksPath,
|
||||
'/mock/project/root',
|
||||
undefined
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user