Compare commits
12 Commits
v0.15.0
...
feat/add-n
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5958a272ce | ||
|
|
72a2154b01 | ||
|
|
471f152c1c | ||
|
|
f6142e56ca | ||
|
|
f058543888 | ||
|
|
acd5c1ea3d | ||
|
|
682b54e103 | ||
|
|
6a8a68e1a3 | ||
|
|
80735f9e60 | ||
|
|
48732d5423 | ||
|
|
2d520de269 | ||
|
|
b60e1cf835 |
5
.changeset/hungry-geese-work.md
Normal file
5
.changeset/hungry-geese-work.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
'task-master-ai': minor
|
||||
---
|
||||
|
||||
Add AWS bedrock support
|
||||
13
.changeset/itchy-taxes-sip.md
Normal file
13
.changeset/itchy-taxes-sip.md
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
'task-master-ai': minor
|
||||
---
|
||||
|
||||
# Add Google Vertex AI Provider Integration
|
||||
|
||||
- Implemented `VertexAIProvider` class extending BaseAIProvider
|
||||
- Added authentication and configuration handling for Vertex AI
|
||||
- Updated configuration manager with Vertex-specific getters
|
||||
- Modified AI services unified system to integrate the provider
|
||||
- Added documentation for Vertex AI setup and configuration
|
||||
- Updated environment variable examples for Vertex AI support
|
||||
- Implemented specialized error handling for Vertex-specific issues
|
||||
5
.changeset/lemon-apes-sort.md
Normal file
5
.changeset/lemon-apes-sort.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
'task-master-ai': minor
|
||||
---
|
||||
|
||||
Add support for Azure
|
||||
5
.changeset/new-colts-flow.md
Normal file
5
.changeset/new-colts-flow.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
Increased minimum required node version to > 18 (was > 14)
|
||||
5
.changeset/plain-bottles-stand.md
Normal file
5
.changeset/plain-bottles-stand.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
'task-master-ai': minor
|
||||
---
|
||||
|
||||
Renamed baseUrl to baseURL
|
||||
5
.changeset/shaggy-rice-exist.md
Normal file
5
.changeset/shaggy-rice-exist.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
'task-master-ai': patch
|
||||
---
|
||||
|
||||
Fix max_tokens error when trying to use claude-sonnet-4 and claude-opus-4
|
||||
7
.changeset/sharp-flies-call.md
Normal file
7
.changeset/sharp-flies-call.md
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
'task-master-ai': minor
|
||||
---
|
||||
|
||||
Add TASK_MASTER_PROJECT_ROOT env variable supported in mcp.json and .env for project root resolution
|
||||
|
||||
- Some users were having issues where the MCP wasn't able to detect the location of their project root, you can now set the `TASK_MASTER_PROJECT_ROOT` environment variable to the root of your project.
|
||||
5
.changeset/tidy-seals-rule.md
Normal file
5
.changeset/tidy-seals-rule.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
'task-master-ai': patch
|
||||
---
|
||||
|
||||
Fix add-task MCP command causing an error
|
||||
@@ -7,3 +7,9 @@ MISTRAL_API_KEY=YOUR_MISTRAL_KEY_HERE
|
||||
OPENROUTER_API_KEY=YOUR_OPENROUTER_KEY_HERE
|
||||
XAI_API_KEY=YOUR_XAI_KEY_HERE
|
||||
AZURE_OPENAI_API_KEY=YOUR_AZURE_KEY_HERE
|
||||
|
||||
# Google Vertex AI Configuration
|
||||
VERTEX_PROJECT_ID=your-gcp-project-id
|
||||
VERTEX_LOCATION=us-central1
|
||||
# Optional: Path to service account credentials JSON file (alternative to API key)
|
||||
GOOGLE_APPLICATION_CREDENTIALS=/path/to/service-account-credentials.json
|
||||
|
||||
40
.github/workflows/update-models-md.yml
vendored
Normal file
40
.github/workflows/update-models-md.yml
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
name: Update models.md from supported-models.json
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- next
|
||||
paths:
|
||||
- 'scripts/modules/supported-models.json'
|
||||
- 'docs/scripts/models-json-to-markdown.js'
|
||||
|
||||
jobs:
|
||||
update_markdown:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Run transformation script
|
||||
run: node docs/scripts/models-json-to-markdown.js
|
||||
|
||||
- name: Format Markdown with Prettier
|
||||
run: npx prettier --write docs/models.md
|
||||
|
||||
- name: Stage docs/models.md
|
||||
run: git add docs/models.md
|
||||
|
||||
- name: Commit & Push docs/models.md
|
||||
uses: actions-js/push@master
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
branch: ${{ github.ref_name }}
|
||||
message: 'docs: Auto-update and format models.md'
|
||||
author_name: 'github-actions[bot]'
|
||||
author_email: 'github-actions[bot]@users.noreply.github.com'
|
||||
@@ -1,7 +0,0 @@
|
||||
# Ignore artifacts:
|
||||
build
|
||||
coverage
|
||||
.changeset
|
||||
tasks
|
||||
package-lock.json
|
||||
tests/fixture/*.json
|
||||
11
.prettierrc
11
.prettierrc
@@ -1,11 +0,0 @@
|
||||
{
|
||||
"printWidth": 80,
|
||||
"tabWidth": 2,
|
||||
"useTabs": true,
|
||||
"semi": true,
|
||||
"singleQuote": true,
|
||||
"trailingComma": "none",
|
||||
"bracketSpacing": true,
|
||||
"arrowParens": "always",
|
||||
"endOfLine": "lf"
|
||||
}
|
||||
@@ -25,8 +25,8 @@
|
||||
"defaultSubtasks": 5,
|
||||
"defaultPriority": "medium",
|
||||
"projectName": "Taskmaster",
|
||||
"ollamaBaseUrl": "http://localhost:11434/api",
|
||||
"ollamaBaseURL": "http://localhost:11434/api",
|
||||
"userId": "1234567890",
|
||||
"azureOpenaiBaseUrl": "https://your-endpoint.openai.azure.com/"
|
||||
"azureBaseURL": "https://your-endpoint.azure.com/"
|
||||
}
|
||||
}
|
||||
|
||||
335
CONTRIBUTING.md
Normal file
335
CONTRIBUTING.md
Normal file
@@ -0,0 +1,335 @@
|
||||
# Contributing to Task Master
|
||||
|
||||
Thank you for your interest in contributing to Task Master! We're excited to work with you and appreciate your help in making this project better. 🚀
|
||||
|
||||
## 🤝 Our Collaborative Approach
|
||||
|
||||
We're a **PR-friendly team** that values collaboration:
|
||||
|
||||
- ✅ **We review PRs quickly** - Usually within hours, not days
|
||||
- ✅ **We're super reactive** - Expect fast feedback and engagement
|
||||
- ✅ **We sometimes take over PRs** - If your contribution is valuable but needs cleanup, we might jump in to help finish it
|
||||
- ✅ **We're open to all contributions** - From bug fixes to major features
|
||||
|
||||
**We don't mind AI-generated code**, but we do expect you to:
|
||||
|
||||
- ✅ **Review and understand** what the AI generated
|
||||
- ✅ **Test the code thoroughly** before submitting
|
||||
- ✅ **Ensure it's well-written** and follows our patterns
|
||||
- ❌ **Don't submit "AI slop"** - untested, unreviewed AI output
|
||||
|
||||
> **Why this matters**: We spend significant time reviewing PRs. Help us help you by submitting quality contributions that save everyone time!
|
||||
|
||||
## 🚀 Quick Start for Contributors
|
||||
|
||||
### 1. Fork and Clone
|
||||
|
||||
```bash
|
||||
git clone https://github.com/YOUR_USERNAME/claude-task-master.git
|
||||
cd claude-task-master
|
||||
npm install
|
||||
```
|
||||
|
||||
### 2. Create a Feature Branch
|
||||
|
||||
**Important**: Always target the `next` branch, not `main`:
|
||||
|
||||
```bash
|
||||
git checkout next
|
||||
git pull origin next
|
||||
git checkout -b feature/your-feature-name
|
||||
```
|
||||
|
||||
### 3. Make Your Changes
|
||||
|
||||
Follow our development guidelines below.
|
||||
|
||||
### 4. Test Everything Yourself
|
||||
|
||||
**Before submitting your PR**, ensure:
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
npm test
|
||||
|
||||
# Check formatting
|
||||
npm run format-check
|
||||
|
||||
# Fix formatting if needed
|
||||
npm run format
|
||||
```
|
||||
|
||||
### 5. Create a Changeset
|
||||
|
||||
**Required for most changes**:
|
||||
|
||||
```bash
|
||||
npm run changeset
|
||||
```
|
||||
|
||||
See the [Changeset Guidelines](#changeset-guidelines) below for details.
|
||||
|
||||
### 6. Submit Your PR
|
||||
|
||||
- Target the `next` branch
|
||||
- Write a clear description
|
||||
- Reference any related issues
|
||||
|
||||
## 📋 Development Guidelines
|
||||
|
||||
### Branch Strategy
|
||||
|
||||
- **`main`**: Production-ready code
|
||||
- **`next`**: Development branch - **target this for PRs**
|
||||
- **Feature branches**: `feature/description` or `fix/description`
|
||||
|
||||
### Code Quality Standards
|
||||
|
||||
1. **Write tests** for new functionality
|
||||
2. **Follow existing patterns** in the codebase
|
||||
3. **Add JSDoc comments** for functions
|
||||
4. **Keep functions focused** and single-purpose
|
||||
|
||||
### Testing Requirements
|
||||
|
||||
Your PR **must pass all CI checks**:
|
||||
|
||||
- ✅ **Unit tests**: `npm test`
|
||||
- ✅ **Format check**: `npm run format-check`
|
||||
|
||||
**Test your changes locally first** - this saves review time and shows you care about quality.
|
||||
|
||||
## 📦 Changeset Guidelines
|
||||
|
||||
We use [Changesets](https://github.com/changesets/changesets) to manage versioning and generate changelogs.
|
||||
|
||||
### When to Create a Changeset
|
||||
|
||||
**Always create a changeset for**:
|
||||
|
||||
- ✅ New features
|
||||
- ✅ Bug fixes
|
||||
- ✅ Breaking changes
|
||||
- ✅ Performance improvements
|
||||
- ✅ User-facing documentation updates
|
||||
- ✅ Dependency updates that affect functionality
|
||||
|
||||
**Skip changesets for**:
|
||||
|
||||
- ❌ Internal documentation only
|
||||
- ❌ Test-only changes
|
||||
- ❌ Code formatting/linting
|
||||
- ❌ Development tooling that doesn't affect users
|
||||
|
||||
### How to Create a Changeset
|
||||
|
||||
1. **After making your changes**:
|
||||
|
||||
```bash
|
||||
npm run changeset
|
||||
```
|
||||
|
||||
2. **Choose the bump type**:
|
||||
|
||||
- **Major**: Breaking changes
|
||||
- **Minor**: New features
|
||||
- **Patch**: Bug fixes, docs, performance improvements
|
||||
|
||||
3. **Write a clear summary**:
|
||||
|
||||
```
|
||||
Add support for custom AI models in MCP configuration
|
||||
```
|
||||
|
||||
4. **Commit the changeset file** with your changes:
|
||||
```bash
|
||||
git add .changeset/*.md
|
||||
git commit -m "feat: add custom AI model support"
|
||||
```
|
||||
|
||||
### Changeset vs Git Commit Messages
|
||||
|
||||
- **Changeset summary**: User-facing, goes in CHANGELOG.md
|
||||
- **Git commit**: Developer-facing, explains the technical change
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
# Changeset summary (user-facing)
|
||||
"Add support for custom Ollama models"
|
||||
|
||||
# Git commit message (developer-facing)
|
||||
"feat(models): implement custom Ollama model validation
|
||||
|
||||
- Add model validation for custom Ollama endpoints
|
||||
- Update configuration schema to support custom models
|
||||
- Add tests for new validation logic"
|
||||
```
|
||||
|
||||
## 🔧 Development Setup
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Node.js 14+
|
||||
- npm or yarn
|
||||
|
||||
### Environment Setup
|
||||
|
||||
1. **Copy environment template**:
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
2. **Add your API keys** (for testing AI features):
|
||||
```bash
|
||||
ANTHROPIC_API_KEY=your_key_here
|
||||
OPENAI_API_KEY=your_key_here
|
||||
# Add others as needed
|
||||
```
|
||||
|
||||
### Running Tests
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
npm test
|
||||
|
||||
# Run tests in watch mode
|
||||
npm run test:watch
|
||||
|
||||
# Run with coverage
|
||||
npm run test:coverage
|
||||
|
||||
# Run E2E tests
|
||||
npm run test:e2e
|
||||
```
|
||||
|
||||
### Code Formatting
|
||||
|
||||
We use Prettier for consistent formatting:
|
||||
|
||||
```bash
|
||||
# Check formatting
|
||||
npm run format-check
|
||||
|
||||
# Fix formatting
|
||||
npm run format
|
||||
```
|
||||
|
||||
## 📝 PR Guidelines
|
||||
|
||||
### Before Submitting
|
||||
|
||||
- [ ] **Target the `next` branch**
|
||||
- [ ] **Test everything locally**
|
||||
- [ ] **Run the full test suite**
|
||||
- [ ] **Check code formatting**
|
||||
- [ ] **Create a changeset** (if needed)
|
||||
- [ ] **Re-read your changes** - ensure they're clean and well-thought-out
|
||||
|
||||
### PR Description Template
|
||||
|
||||
```markdown
|
||||
## Description
|
||||
|
||||
Brief description of what this PR does.
|
||||
|
||||
## Type of Change
|
||||
|
||||
- [ ] Bug fix
|
||||
- [ ] New feature
|
||||
- [ ] Breaking change
|
||||
- [ ] Documentation update
|
||||
|
||||
## Testing
|
||||
|
||||
- [ ] I have tested this locally
|
||||
- [ ] All existing tests pass
|
||||
- [ ] I have added tests for new functionality
|
||||
|
||||
## Changeset
|
||||
|
||||
- [ ] I have created a changeset (or this change doesn't need one)
|
||||
|
||||
## Additional Notes
|
||||
|
||||
Any additional context or notes for reviewers.
|
||||
```
|
||||
|
||||
### What We Look For
|
||||
|
||||
✅ **Good PRs**:
|
||||
|
||||
- Clear, focused changes
|
||||
- Comprehensive testing
|
||||
- Good commit messages
|
||||
- Proper changeset (when needed)
|
||||
- Self-reviewed code
|
||||
|
||||
❌ **Avoid**:
|
||||
|
||||
- Massive PRs that change everything
|
||||
- Untested code
|
||||
- Formatting issues
|
||||
- Missing changesets for user-facing changes
|
||||
- AI-generated code that wasn't reviewed
|
||||
|
||||
## 🏗️ Project Structure
|
||||
|
||||
```
|
||||
claude-task-master/
|
||||
├── bin/ # CLI executables
|
||||
├── mcp-server/ # MCP server implementation
|
||||
├── scripts/ # Core task management logic
|
||||
├── src/ # Shared utilities and providers and well refactored code (we are slowly moving everything here)
|
||||
├── tests/ # Test files
|
||||
├── docs/ # Documentation
|
||||
└── .cursor/ # Cursor IDE rules and configuration
|
||||
└── assets/ # Assets like rules and configuration for all IDEs
|
||||
```
|
||||
|
||||
### Key Areas for Contribution
|
||||
|
||||
- **CLI Commands**: `scripts/modules/commands.js`
|
||||
- **MCP Tools**: `mcp-server/src/tools/`
|
||||
- **Core Logic**: `scripts/modules/task-manager/`
|
||||
- **AI Providers**: `src/ai-providers/`
|
||||
- **Tests**: `tests/`
|
||||
|
||||
## 🐛 Reporting Issues
|
||||
|
||||
### Bug Reports
|
||||
|
||||
Include:
|
||||
|
||||
- Task Master version
|
||||
- Node.js version
|
||||
- Operating system
|
||||
- Steps to reproduce
|
||||
- Expected vs actual behavior
|
||||
- Error messages/logs
|
||||
|
||||
### Feature Requests
|
||||
|
||||
Include:
|
||||
|
||||
- Clear description of the feature
|
||||
- Use case/motivation
|
||||
- Proposed implementation (if you have ideas)
|
||||
- Willingness to contribute
|
||||
|
||||
## 💬 Getting Help
|
||||
|
||||
- **Discord**: [Join our community](https://discord.gg/taskmasterai)
|
||||
- **Issues**: [GitHub Issues](https://github.com/eyaltoledano/claude-task-master/issues)
|
||||
- **Discussions**: [GitHub Discussions](https://github.com/eyaltoledano/claude-task-master/discussions)
|
||||
|
||||
## 📄 License
|
||||
|
||||
By contributing, you agree that your contributions will be licensed under the same license as the project (MIT with Commons Clause).
|
||||
|
||||
---
|
||||
|
||||
**Thank you for contributing to Task Master!** 🎉
|
||||
|
||||
Your contributions help make AI-driven development more accessible and efficient for everyone.
|
||||
87
README.md
87
README.md
@@ -28,13 +28,22 @@ Using the research model is optional but highly recommended. You will need at le
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Option 1 | MCP (Recommended):
|
||||
### Option 1: MCP (Recommended)
|
||||
|
||||
MCP (Model Control Protocol) provides the easiest way to get started with Task Master directly in your editor.
|
||||
MCP (Model Control Protocol) lets you run Task Master directly from your editor.
|
||||
|
||||
1. **Add the MCP config to your editor** (Cursor recommended, but it works with other text editors):
|
||||
#### 1. Add your MCP config at the following path depending on your editor
|
||||
|
||||
```json
|
||||
| Editor | Scope | Linux/macOS Path | Windows Path | Key |
|
||||
| ------------ | ------- | ------------------------------------- | ------------------------------------------------- | ------------ |
|
||||
| **Cursor** | Global | `~/.cursor/mcp.json` | `%USERPROFILE%\.cursor\mcp.json` | `mcpServers` |
|
||||
| | Project | `<project_folder>/.cursor/mcp.json` | `<project_folder>\.cursor\mcp.json` | `mcpServers` |
|
||||
| **Windsurf** | Global | `~/.codeium/windsurf/mcp_config.json` | `%USERPROFILE%\.codeium\windsurf\mcp_config.json` | `mcpServers` |
|
||||
| **VS Code** | Project | `<project_folder>/.vscode/mcp.json` | `<project_folder>\.vscode\mcp.json` | `servers` |
|
||||
|
||||
##### Cursor & Windsurf (`mcpServers`)
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"mcpServers": {
|
||||
"taskmaster-ai": {
|
||||
@@ -56,23 +65,75 @@ MCP (Model Control Protocol) provides the easiest way to get started with Task M
|
||||
}
|
||||
```
|
||||
|
||||
2. **Enable the MCP** in your editor
|
||||
> 🔑 Replace `YOUR_…_KEY_HERE` with your real API keys. You can remove keys you don't use.
|
||||
|
||||
3. **Prompt the AI** to initialize Task Master:
|
||||
##### VS Code (`servers` + `type`)
|
||||
|
||||
```
|
||||
Can you please initialize taskmaster-ai into my project?
|
||||
```jsonc
|
||||
{
|
||||
"servers": {
|
||||
"taskmaster-ai": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "--package=task-master-ai", "task-master-ai"],
|
||||
"env": {
|
||||
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
|
||||
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
|
||||
"OPENAI_API_KEY": "YOUR_OPENAI_KEY_HERE",
|
||||
"GOOGLE_API_KEY": "YOUR_GOOGLE_KEY_HERE",
|
||||
"MISTRAL_API_KEY": "YOUR_MISTRAL_KEY_HERE",
|
||||
"OPENROUTER_API_KEY": "YOUR_OPENROUTER_KEY_HERE",
|
||||
"XAI_API_KEY": "YOUR_XAI_KEY_HERE",
|
||||
"AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE"
|
||||
},
|
||||
"type": "stdio"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
4. **Use common commands** directly through your AI assistant:
|
||||
> 🔑 Replace `YOUR_…_KEY_HERE` with your real API keys. You can remove keys you don't use.
|
||||
|
||||
#### 2. (Cursor-only) Enable Taskmaster MCP
|
||||
|
||||
Open Cursor Settings (Ctrl+Shift+J) ➡ Click on MCP tab on the left ➡ Enable task-master-ai with the toggle
|
||||
|
||||
#### 3. (Optional) Configure the models you want to use
|
||||
|
||||
In your editor’s AI chat pane, say:
|
||||
|
||||
```txt
|
||||
Can you parse my PRD at scripts/prd.txt?
|
||||
What's the next task I should work on?
|
||||
Can you help me implement task 3?
|
||||
Can you help me expand task 4?
|
||||
Change the main, research and fallback models to <model_name>, <model_name> and <model_name> respectively.
|
||||
```
|
||||
|
||||
[Table of available models](docs/models.md)
|
||||
|
||||
#### 4. Initialize Task Master
|
||||
|
||||
In your editor’s AI chat pane, say:
|
||||
|
||||
```txt
|
||||
Initialize taskmaster-ai in my project
|
||||
```
|
||||
|
||||
#### 5. Make sure you have a PRD in `<project_folder>/scripts/prd.txt`
|
||||
|
||||
An example of a PRD is located into `<project_folder>/scripts/example_prd.txt`.
|
||||
|
||||
**Always start with a detailed PRD.**
|
||||
|
||||
The more detailed your PRD, the better the generated tasks will be.
|
||||
|
||||
#### 6. Common Commands
|
||||
|
||||
Use your AI assistant to:
|
||||
|
||||
- Parse requirements: `Can you parse my PRD at scripts/prd.txt?`
|
||||
- Plan next step: `What’s the next task I should work on?`
|
||||
- Implement a task: `Can you help me implement task 3?`
|
||||
- Expand a task: `Can you help me expand task 4?`
|
||||
|
||||
[More examples on how to use Task Master in chat](docs/examples.md)
|
||||
|
||||
### Option 2: Using Command Line
|
||||
|
||||
#### Installation
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
"defaultSubtasks": 5,
|
||||
"defaultPriority": "medium",
|
||||
"projectName": "Taskmaster",
|
||||
"ollamaBaseUrl": "http://localhost:11434/api",
|
||||
"azureOpenaiBaseUrl": "https://your-endpoint.openai.azure.com/"
|
||||
"ollamaBaseURL": "http://localhost:11434/api",
|
||||
"azureOpenaiBaseURL": "https://your-endpoint.openai.azure.com/"
|
||||
}
|
||||
}
|
||||
|
||||
47
biome.json
Normal file
47
biome.json
Normal file
@@ -0,0 +1,47 @@
|
||||
{
|
||||
"files": {
|
||||
"ignore": [
|
||||
"build",
|
||||
"coverage",
|
||||
".changeset",
|
||||
"tasks",
|
||||
"package-lock.json",
|
||||
"tests/fixture/*.json"
|
||||
]
|
||||
},
|
||||
"formatter": {
|
||||
"bracketSpacing": true,
|
||||
"enabled": true,
|
||||
"indentStyle": "tab",
|
||||
"lineWidth": 80
|
||||
},
|
||||
"javascript": {
|
||||
"formatter": {
|
||||
"arrowParentheses": "always",
|
||||
"quoteStyle": "single",
|
||||
"trailingCommas": "none"
|
||||
}
|
||||
},
|
||||
"linter": {
|
||||
"rules": {
|
||||
"complexity": {
|
||||
"noForEach": "off",
|
||||
"useOptionalChain": "off"
|
||||
},
|
||||
"correctness": {
|
||||
"noConstantCondition": "off",
|
||||
"noUnreachable": "off"
|
||||
},
|
||||
"suspicious": {
|
||||
"noDuplicateTestHooks": "off",
|
||||
"noPrototypeBuiltins": "off"
|
||||
},
|
||||
"style": {
|
||||
"noUselessElse": "off",
|
||||
"useNodejsImportProtocol": "off",
|
||||
"useNumberNamespace": "off",
|
||||
"noParameterAssign": "off"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -16,14 +16,14 @@ Taskmaster uses two primary methods for configuration:
|
||||
"modelId": "claude-3-7-sonnet-20250219",
|
||||
"maxTokens": 64000,
|
||||
"temperature": 0.2,
|
||||
"baseUrl": "https://api.anthropic.com/v1"
|
||||
"baseURL": "https://api.anthropic.com/v1"
|
||||
},
|
||||
"research": {
|
||||
"provider": "perplexity",
|
||||
"modelId": "sonar-pro",
|
||||
"maxTokens": 8700,
|
||||
"temperature": 0.1,
|
||||
"baseUrl": "https://api.perplexity.ai/v1"
|
||||
"baseURL": "https://api.perplexity.ai/v1"
|
||||
},
|
||||
"fallback": {
|
||||
"provider": "anthropic",
|
||||
@@ -38,8 +38,10 @@ Taskmaster uses two primary methods for configuration:
|
||||
"defaultSubtasks": 5,
|
||||
"defaultPriority": "medium",
|
||||
"projectName": "Your Project Name",
|
||||
"ollamaBaseUrl": "http://localhost:11434/api",
|
||||
"azureOpenaiBaseUrl": "https://your-endpoint.openai.azure.com/"
|
||||
"ollamaBaseURL": "http://localhost:11434/api",
|
||||
"azureBaseURL": "https://your-endpoint.azure.com/",
|
||||
"vertexProjectId": "your-gcp-project-id",
|
||||
"vertexLocation": "us-central1"
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -53,15 +55,18 @@ Taskmaster uses two primary methods for configuration:
|
||||
- `ANTHROPIC_API_KEY`: Your Anthropic API key.
|
||||
- `PERPLEXITY_API_KEY`: Your Perplexity API key.
|
||||
- `OPENAI_API_KEY`: Your OpenAI API key.
|
||||
- `GOOGLE_API_KEY`: Your Google API key.
|
||||
- `GOOGLE_API_KEY`: Your Google API key (also used for Vertex AI provider).
|
||||
- `MISTRAL_API_KEY`: Your Mistral API key.
|
||||
- `AZURE_OPENAI_API_KEY`: Your Azure OpenAI API key (also requires `AZURE_OPENAI_ENDPOINT`).
|
||||
- `OPENROUTER_API_KEY`: Your OpenRouter API key.
|
||||
- `XAI_API_KEY`: Your X-AI API key.
|
||||
- **Optional Endpoint Overrides:**
|
||||
- **Per-role `baseUrl` in `.taskmasterconfig`:** You can add a `baseUrl` property to any model role (`main`, `research`, `fallback`) to override the default API endpoint for that provider. If omitted, the provider's standard endpoint is used.
|
||||
- `AZURE_OPENAI_ENDPOINT`: Required if using Azure OpenAI key (can also be set as `baseUrl` for the Azure model role).
|
||||
- **Per-role `baseURL` in `.taskmasterconfig`:** You can add a `baseURL` property to any model role (`main`, `research`, `fallback`) to override the default API endpoint for that provider. If omitted, the provider's standard endpoint is used.
|
||||
- `AZURE_OPENAI_ENDPOINT`: Required if using Azure OpenAI key (can also be set as `baseURL` for the Azure model role).
|
||||
- `OLLAMA_BASE_URL`: Override the default Ollama API URL (Default: `http://localhost:11434/api`).
|
||||
- `VERTEX_PROJECT_ID`: Your Google Cloud project ID for Vertex AI. Required when using the 'vertex' provider.
|
||||
- `VERTEX_LOCATION`: Google Cloud region for Vertex AI (e.g., 'us-central1'). Default is 'us-central1'.
|
||||
- `GOOGLE_APPLICATION_CREDENTIALS`: Path to service account credentials JSON file for Google Cloud auth (alternative to API key for Vertex AI).
|
||||
|
||||
**Important:** Settings like model ID selections (`main`, `research`, `fallback`), `maxTokens`, `temperature`, `logLevel`, `defaultSubtasks`, `defaultPriority`, and `projectName` are **managed in `.taskmasterconfig`**, not environment variables.
|
||||
|
||||
@@ -78,6 +83,11 @@ PERPLEXITY_API_KEY=pplx-your-key-here
|
||||
# Optional Endpoint Overrides
|
||||
# AZURE_OPENAI_ENDPOINT=https://your-azure-endpoint.openai.azure.com/
|
||||
# OLLAMA_BASE_URL=http://custom-ollama-host:11434/api
|
||||
|
||||
# Google Vertex AI Configuration (Required if using 'vertex' provider)
|
||||
# VERTEX_PROJECT_ID=your-gcp-project-id
|
||||
# VERTEX_LOCATION=us-central1
|
||||
# GOOGLE_APPLICATION_CREDENTIALS=/path/to/service-account-credentials.json
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
@@ -102,3 +112,45 @@ git clone https://github.com/eyaltoledano/claude-task-master.git
|
||||
cd claude-task-master
|
||||
node scripts/init.js
|
||||
```
|
||||
|
||||
## Provider-Specific Configuration
|
||||
|
||||
### Google Vertex AI Configuration
|
||||
|
||||
Google Vertex AI is Google Cloud's enterprise AI platform and requires specific configuration:
|
||||
|
||||
1. **Prerequisites**:
|
||||
- A Google Cloud account with Vertex AI API enabled
|
||||
- Either a Google API key with Vertex AI permissions OR a service account with appropriate roles
|
||||
- A Google Cloud project ID
|
||||
2. **Authentication Options**:
|
||||
- **API Key**: Set the `GOOGLE_API_KEY` environment variable
|
||||
- **Service Account**: Set `GOOGLE_APPLICATION_CREDENTIALS` to point to your service account JSON file
|
||||
3. **Required Configuration**:
|
||||
- Set `VERTEX_PROJECT_ID` to your Google Cloud project ID
|
||||
- Set `VERTEX_LOCATION` to your preferred Google Cloud region (default: us-central1)
|
||||
4. **Example Setup**:
|
||||
|
||||
```bash
|
||||
# In .env file
|
||||
GOOGLE_API_KEY=AIzaSyXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
VERTEX_PROJECT_ID=my-gcp-project-123
|
||||
VERTEX_LOCATION=us-central1
|
||||
```
|
||||
|
||||
Or using service account:
|
||||
|
||||
```bash
|
||||
# In .env file
|
||||
GOOGLE_APPLICATION_CREDENTIALS=/path/to/service-account.json
|
||||
VERTEX_PROJECT_ID=my-gcp-project-123
|
||||
VERTEX_LOCATION=us-central1
|
||||
```
|
||||
|
||||
5. **In .taskmasterconfig**:
|
||||
```json
|
||||
"global": {
|
||||
"vertexProjectId": "my-gcp-project-123",
|
||||
"vertexLocation": "us-central1"
|
||||
}
|
||||
```
|
||||
|
||||
125
docs/models.md
Normal file
125
docs/models.md
Normal file
@@ -0,0 +1,125 @@
|
||||
# Available Models as of May 27, 2025
|
||||
|
||||
## Main Models
|
||||
|
||||
| Provider | Model Name | SWE Score | Input Cost | Output Cost |
|
||||
| ---------- | ---------------------------------------------- | --------- | ---------- | ----------- |
|
||||
| anthropic | claude-sonnet-4-20250514 | 0.727 | 3 | 15 |
|
||||
| anthropic | claude-opus-4-20250514 | 0.725 | 15 | 75 |
|
||||
| anthropic | claude-3-7-sonnet-20250219 | 0.623 | 3 | 15 |
|
||||
| anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 |
|
||||
| openai | gpt-4o | 0.332 | 2.5 | 10 |
|
||||
| openai | o1 | 0.489 | 15 | 60 |
|
||||
| openai | o3 | 0.5 | 10 | 40 |
|
||||
| openai | o3-mini | 0.493 | 1.1 | 4.4 |
|
||||
| openai | o4-mini | 0.45 | 1.1 | 4.4 |
|
||||
| openai | o1-mini | 0.4 | 1.1 | 4.4 |
|
||||
| openai | o1-pro | — | 150 | 600 |
|
||||
| openai | gpt-4-5-preview | 0.38 | 75 | 150 |
|
||||
| openai | gpt-4-1-mini | — | 0.4 | 1.6 |
|
||||
| openai | gpt-4-1-nano | — | 0.1 | 0.4 |
|
||||
| openai | gpt-4o-mini | 0.3 | 0.15 | 0.6 |
|
||||
| google | gemini-2.5-pro-preview-05-06 | 0.638 | — | — |
|
||||
| google | gemini-2.5-pro-preview-03-25 | 0.638 | — | — |
|
||||
| google | gemini-2.5-flash-preview-04-17 | — | — | — |
|
||||
| google | gemini-2.0-flash | 0.754 | 0.15 | 0.6 |
|
||||
| google | gemini-2.0-flash-lite | — | — | — |
|
||||
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
|
||||
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
|
||||
| xai | grok-3 | — | 3 | 15 |
|
||||
| xai | grok-3-fast | — | 5 | 25 |
|
||||
| ollama | devstral:latest | — | 0 | 0 |
|
||||
| ollama | qwen3:latest | — | 0 | 0 |
|
||||
| ollama | qwen3:14b | — | 0 | 0 |
|
||||
| ollama | qwen3:32b | — | 0 | 0 |
|
||||
| ollama | mistral-small3.1:latest | — | 0 | 0 |
|
||||
| ollama | llama3.3:latest | — | 0 | 0 |
|
||||
| ollama | phi4:latest | — | 0 | 0 |
|
||||
| openrouter | google/gemini-2.5-flash-preview-05-20 | — | 0.15 | 0.6 |
|
||||
| openrouter | google/gemini-2.5-flash-preview-05-20:thinking | — | 0.15 | 3.5 |
|
||||
| openrouter | google/gemini-2.5-pro-exp-03-25 | — | 0 | 0 |
|
||||
| openrouter | deepseek/deepseek-chat-v3-0324:free | — | 0 | 0 |
|
||||
| openrouter | deepseek/deepseek-chat-v3-0324 | — | 0.27 | 1.1 |
|
||||
| openrouter | openai/gpt-4.1 | — | 2 | 8 |
|
||||
| openrouter | openai/gpt-4.1-mini | — | 0.4 | 1.6 |
|
||||
| openrouter | openai/gpt-4.1-nano | — | 0.1 | 0.4 |
|
||||
| openrouter | openai/o3 | — | 10 | 40 |
|
||||
| openrouter | openai/codex-mini | — | 1.5 | 6 |
|
||||
| openrouter | openai/gpt-4o-mini | — | 0.15 | 0.6 |
|
||||
| openrouter | openai/o4-mini | 0.45 | 1.1 | 4.4 |
|
||||
| openrouter | openai/o4-mini-high | — | 1.1 | 4.4 |
|
||||
| openrouter | openai/o1-pro | — | 150 | 600 |
|
||||
| openrouter | meta-llama/llama-3.3-70b-instruct | — | 120 | 600 |
|
||||
| openrouter | meta-llama/llama-4-maverick | — | 0.18 | 0.6 |
|
||||
| openrouter | meta-llama/llama-4-scout | — | 0.08 | 0.3 |
|
||||
| openrouter | qwen/qwen-max | — | 1.6 | 6.4 |
|
||||
| openrouter | qwen/qwen-turbo | — | 0.05 | 0.2 |
|
||||
| openrouter | qwen/qwen3-235b-a22b | — | 0.14 | 2 |
|
||||
| openrouter | mistralai/mistral-small-3.1-24b-instruct:free | — | 0 | 0 |
|
||||
| openrouter | mistralai/mistral-small-3.1-24b-instruct | — | 0.1 | 0.3 |
|
||||
| openrouter | mistralai/devstral-small | — | 0.1 | 0.3 |
|
||||
| openrouter | mistralai/mistral-nemo | — | 0.03 | 0.07 |
|
||||
| openrouter | thudm/glm-4-32b:free | — | 0 | 0 |
|
||||
|
||||
## Research Models
|
||||
|
||||
| Provider | Model Name | SWE Score | Input Cost | Output Cost |
|
||||
| ---------- | -------------------------- | --------- | ---------- | ----------- |
|
||||
| openai | gpt-4o-search-preview | 0.33 | 2.5 | 10 |
|
||||
| openai | gpt-4o-mini-search-preview | 0.3 | 0.15 | 0.6 |
|
||||
| perplexity | sonar-pro | — | 3 | 15 |
|
||||
| perplexity | sonar | — | 1 | 1 |
|
||||
| perplexity | deep-research | 0.211 | 2 | 8 |
|
||||
| xai | grok-3 | — | 3 | 15 |
|
||||
| xai | grok-3-fast | — | 5 | 25 |
|
||||
|
||||
## Fallback Models
|
||||
|
||||
| Provider | Model Name | SWE Score | Input Cost | Output Cost |
|
||||
| ---------- | ---------------------------------------------- | --------- | ---------- | ----------- |
|
||||
| anthropic | claude-sonnet-4-20250514 | 0.727 | 3 | 15 |
|
||||
| anthropic | claude-opus-4-20250514 | 0.725 | 15 | 75 |
|
||||
| anthropic | claude-3-7-sonnet-20250219 | 0.623 | 3 | 15 |
|
||||
| anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 |
|
||||
| openai | gpt-4o | 0.332 | 2.5 | 10 |
|
||||
| openai | o3 | 0.5 | 10 | 40 |
|
||||
| openai | o4-mini | 0.45 | 1.1 | 4.4 |
|
||||
| google | gemini-2.5-pro-preview-05-06 | 0.638 | — | — |
|
||||
| google | gemini-2.5-pro-preview-03-25 | 0.638 | — | — |
|
||||
| google | gemini-2.5-flash-preview-04-17 | — | — | — |
|
||||
| google | gemini-2.0-flash | 0.754 | 0.15 | 0.6 |
|
||||
| google | gemini-2.0-flash-lite | — | — | — |
|
||||
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
|
||||
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
|
||||
| xai | grok-3 | — | 3 | 15 |
|
||||
| xai | grok-3-fast | — | 5 | 25 |
|
||||
| ollama | devstral:latest | — | 0 | 0 |
|
||||
| ollama | qwen3:latest | — | 0 | 0 |
|
||||
| ollama | qwen3:14b | — | 0 | 0 |
|
||||
| ollama | qwen3:32b | — | 0 | 0 |
|
||||
| ollama | mistral-small3.1:latest | — | 0 | 0 |
|
||||
| ollama | llama3.3:latest | — | 0 | 0 |
|
||||
| ollama | phi4:latest | — | 0 | 0 |
|
||||
| openrouter | google/gemini-2.5-flash-preview-05-20 | — | 0.15 | 0.6 |
|
||||
| openrouter | google/gemini-2.5-flash-preview-05-20:thinking | — | 0.15 | 3.5 |
|
||||
| openrouter | google/gemini-2.5-pro-exp-03-25 | — | 0 | 0 |
|
||||
| openrouter | deepseek/deepseek-chat-v3-0324:free | — | 0 | 0 |
|
||||
| openrouter | openai/gpt-4.1 | — | 2 | 8 |
|
||||
| openrouter | openai/gpt-4.1-mini | — | 0.4 | 1.6 |
|
||||
| openrouter | openai/gpt-4.1-nano | — | 0.1 | 0.4 |
|
||||
| openrouter | openai/o3 | — | 10 | 40 |
|
||||
| openrouter | openai/codex-mini | — | 1.5 | 6 |
|
||||
| openrouter | openai/gpt-4o-mini | — | 0.15 | 0.6 |
|
||||
| openrouter | openai/o4-mini | 0.45 | 1.1 | 4.4 |
|
||||
| openrouter | openai/o4-mini-high | — | 1.1 | 4.4 |
|
||||
| openrouter | openai/o1-pro | — | 150 | 600 |
|
||||
| openrouter | meta-llama/llama-3.3-70b-instruct | — | 120 | 600 |
|
||||
| openrouter | meta-llama/llama-4-maverick | — | 0.18 | 0.6 |
|
||||
| openrouter | meta-llama/llama-4-scout | — | 0.08 | 0.3 |
|
||||
| openrouter | qwen/qwen-max | — | 1.6 | 6.4 |
|
||||
| openrouter | qwen/qwen-turbo | — | 0.05 | 0.2 |
|
||||
| openrouter | qwen/qwen3-235b-a22b | — | 0.14 | 2 |
|
||||
| openrouter | mistralai/mistral-small-3.1-24b-instruct:free | — | 0 | 0 |
|
||||
| openrouter | mistralai/mistral-small-3.1-24b-instruct | — | 0.1 | 0.3 |
|
||||
| openrouter | mistralai/mistral-nemo | — | 0.03 | 0.07 |
|
||||
| openrouter | thudm/glm-4-32b:free | — | 0 | 0 |
|
||||
131
docs/scripts/models-json-to-markdown.js
Normal file
131
docs/scripts/models-json-to-markdown.js
Normal file
@@ -0,0 +1,131 @@
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
|
||||
const supportedModelsPath = path.join(
|
||||
__dirname,
|
||||
'..',
|
||||
'modules',
|
||||
'supported-models.json'
|
||||
);
|
||||
const outputMarkdownPath = path.join(
|
||||
__dirname,
|
||||
'..',
|
||||
'..',
|
||||
'docs',
|
||||
'models.md'
|
||||
);
|
||||
|
||||
function formatCost(cost) {
|
||||
if (cost === null || cost === undefined) {
|
||||
return '—';
|
||||
}
|
||||
return cost;
|
||||
}
|
||||
|
||||
function formatSweScore(score) {
|
||||
if (score === null || score === undefined || score === 0) {
|
||||
return '—';
|
||||
}
|
||||
return score.toString();
|
||||
}
|
||||
|
||||
function generateMarkdownTable(title, models) {
|
||||
if (!models || models.length === 0) {
|
||||
return `## ${title}\n\nNo models in this category.\n\n`;
|
||||
}
|
||||
let table = `## ${title}\n\n`;
|
||||
table += '| Provider | Model Name | SWE Score | Input Cost | Output Cost |\n';
|
||||
table += '|---|---|---|---|---|\n';
|
||||
models.forEach((model) => {
|
||||
table += `| ${model.provider} | ${model.modelName} | ${formatSweScore(model.sweScore)} | ${formatCost(model.inputCost)} | ${formatCost(model.outputCost)} |\n`;
|
||||
});
|
||||
table += '\n';
|
||||
return table;
|
||||
}
|
||||
|
||||
function main() {
|
||||
try {
|
||||
const correctSupportedModelsPath = path.join(
|
||||
__dirname,
|
||||
'..',
|
||||
'..',
|
||||
'scripts',
|
||||
'modules',
|
||||
'supported-models.json'
|
||||
);
|
||||
const correctOutputMarkdownPath = path.join(__dirname, '..', 'models.md');
|
||||
|
||||
const supportedModelsContent = fs.readFileSync(
|
||||
correctSupportedModelsPath,
|
||||
'utf8'
|
||||
);
|
||||
const supportedModels = JSON.parse(supportedModelsContent);
|
||||
|
||||
const mainModels = [];
|
||||
const researchModels = [];
|
||||
const fallbackModels = [];
|
||||
|
||||
for (const provider in supportedModels) {
|
||||
if (Object.hasOwnProperty.call(supportedModels, provider)) {
|
||||
const models = supportedModels[provider];
|
||||
models.forEach((model) => {
|
||||
const modelEntry = {
|
||||
provider: provider,
|
||||
modelName: model.id,
|
||||
sweScore: model.swe_score,
|
||||
inputCost: model.cost_per_1m_tokens
|
||||
? model.cost_per_1m_tokens.input
|
||||
: null,
|
||||
outputCost: model.cost_per_1m_tokens
|
||||
? model.cost_per_1m_tokens.output
|
||||
: null
|
||||
};
|
||||
|
||||
if (model.allowed_roles.includes('main')) {
|
||||
mainModels.push(modelEntry);
|
||||
}
|
||||
if (model.allowed_roles.includes('research')) {
|
||||
researchModels.push(modelEntry);
|
||||
}
|
||||
if (model.allowed_roles.includes('fallback')) {
|
||||
fallbackModels.push(modelEntry);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const date = new Date();
|
||||
const monthNames = [
|
||||
'January',
|
||||
'February',
|
||||
'March',
|
||||
'April',
|
||||
'May',
|
||||
'June',
|
||||
'July',
|
||||
'August',
|
||||
'September',
|
||||
'October',
|
||||
'November',
|
||||
'December'
|
||||
];
|
||||
const formattedDate = `${monthNames[date.getMonth()]} ${date.getDate()}, ${date.getFullYear()}`;
|
||||
|
||||
let markdownContent = `# Available Models as of ${formattedDate}\n\n`;
|
||||
markdownContent += generateMarkdownTable('Main Models', mainModels);
|
||||
markdownContent += generateMarkdownTable('Research Models', researchModels);
|
||||
markdownContent += generateMarkdownTable('Fallback Models', fallbackModels);
|
||||
|
||||
fs.writeFileSync(correctOutputMarkdownPath, markdownContent, 'utf8');
|
||||
console.log(`Successfully updated ${correctOutputMarkdownPath}`);
|
||||
} catch (error) {
|
||||
console.error('Error transforming models.json to models.md:', error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
main();
|
||||
@@ -22,7 +22,7 @@ import {
|
||||
*/
|
||||
function getProjectRoot(projectRootRaw, log) {
|
||||
// PRECEDENCE ORDER:
|
||||
// 1. Environment variable override
|
||||
// 1. Environment variable override (TASK_MASTER_PROJECT_ROOT)
|
||||
// 2. Explicitly provided projectRoot in args
|
||||
// 3. Previously found/cached project root
|
||||
// 4. Current directory if it has project markers
|
||||
@@ -578,6 +578,7 @@ function getRawProjectRootFromSession(session, log) {
|
||||
/**
|
||||
* Higher-order function to wrap MCP tool execute methods.
|
||||
* Ensures args.projectRoot is present and normalized before execution.
|
||||
* Uses TASK_MASTER_PROJECT_ROOT environment variable with proper precedence.
|
||||
* @param {Function} executeFn - The original async execute(args, context) function.
|
||||
* @returns {Function} The wrapped async execute function.
|
||||
*/
|
||||
@@ -588,31 +589,52 @@ function withNormalizedProjectRoot(executeFn) {
|
||||
let rootSource = 'unknown';
|
||||
|
||||
try {
|
||||
// Determine raw root: prioritize args, then session
|
||||
let rawRoot = args.projectRoot;
|
||||
if (!rawRoot) {
|
||||
rawRoot = getRawProjectRootFromSession(session, log);
|
||||
rootSource = 'session';
|
||||
} else {
|
||||
rootSource = 'args';
|
||||
}
|
||||
// PRECEDENCE ORDER:
|
||||
// 1. TASK_MASTER_PROJECT_ROOT environment variable (from process.env or session)
|
||||
// 2. args.projectRoot (explicitly provided)
|
||||
// 3. Session-based project root resolution
|
||||
// 4. Current directory fallback
|
||||
|
||||
if (!rawRoot) {
|
||||
log.error('Could not determine project root from args or session.');
|
||||
return createErrorResponse(
|
||||
'Could not determine project root. Please provide projectRoot argument or ensure session contains root info.'
|
||||
);
|
||||
// 1. Check for TASK_MASTER_PROJECT_ROOT environment variable first
|
||||
if (process.env.TASK_MASTER_PROJECT_ROOT) {
|
||||
const envRoot = process.env.TASK_MASTER_PROJECT_ROOT;
|
||||
normalizedRoot = path.isAbsolute(envRoot)
|
||||
? envRoot
|
||||
: path.resolve(process.cwd(), envRoot);
|
||||
rootSource = 'TASK_MASTER_PROJECT_ROOT environment variable';
|
||||
log.info(`Using project root from ${rootSource}: ${normalizedRoot}`);
|
||||
}
|
||||
// Also check session environment variables for TASK_MASTER_PROJECT_ROOT
|
||||
else if (session?.env?.TASK_MASTER_PROJECT_ROOT) {
|
||||
const envRoot = session.env.TASK_MASTER_PROJECT_ROOT;
|
||||
normalizedRoot = path.isAbsolute(envRoot)
|
||||
? envRoot
|
||||
: path.resolve(process.cwd(), envRoot);
|
||||
rootSource = 'TASK_MASTER_PROJECT_ROOT session environment variable';
|
||||
log.info(`Using project root from ${rootSource}: ${normalizedRoot}`);
|
||||
}
|
||||
// 2. If no environment variable, try args.projectRoot
|
||||
else if (args.projectRoot) {
|
||||
normalizedRoot = normalizeProjectRoot(args.projectRoot, log);
|
||||
rootSource = 'args.projectRoot';
|
||||
log.info(`Using project root from ${rootSource}: ${normalizedRoot}`);
|
||||
}
|
||||
// 3. If no args.projectRoot, try session-based resolution
|
||||
else {
|
||||
const sessionRoot = getProjectRootFromSession(session, log);
|
||||
if (sessionRoot) {
|
||||
normalizedRoot = sessionRoot; // getProjectRootFromSession already normalizes
|
||||
rootSource = 'session';
|
||||
log.info(`Using project root from ${rootSource}: ${normalizedRoot}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Normalize the determined raw root
|
||||
normalizedRoot = normalizeProjectRoot(rawRoot, log);
|
||||
|
||||
if (!normalizedRoot) {
|
||||
log.error(
|
||||
`Failed to normalize project root obtained from ${rootSource}: ${rawRoot}`
|
||||
'Could not determine project root from environment, args, or session.'
|
||||
);
|
||||
return createErrorResponse(
|
||||
`Invalid project root provided or derived from ${rootSource}: ${rawRoot}`
|
||||
'Could not determine project root. Please provide projectRoot argument or ensure TASK_MASTER_PROJECT_ROOT environment variable is set.'
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
21673
package-lock.json
generated
21673
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
13
package.json
13
package.json
@@ -21,8 +21,8 @@
|
||||
"release": "changeset publish",
|
||||
"inspector": "npx @modelcontextprotocol/inspector node mcp-server/server.js",
|
||||
"mcp-server": "node mcp-server/server.js",
|
||||
"format-check": "prettier --check .",
|
||||
"format": "prettier --write ."
|
||||
"format-check": "biome format .",
|
||||
"format": "biome format . --write"
|
||||
},
|
||||
"keywords": [
|
||||
"claude",
|
||||
@@ -39,14 +39,17 @@
|
||||
"author": "Eyal Toledano",
|
||||
"license": "MIT WITH Commons-Clause",
|
||||
"dependencies": {
|
||||
"@ai-sdk/amazon-bedrock": "^2.2.9",
|
||||
"@ai-sdk/anthropic": "^1.2.10",
|
||||
"@ai-sdk/azure": "^1.3.17",
|
||||
"@ai-sdk/google": "^1.2.13",
|
||||
"@ai-sdk/google-vertex": "^2.2.23",
|
||||
"@ai-sdk/mistral": "^1.2.7",
|
||||
"@ai-sdk/openai": "^1.3.20",
|
||||
"@ai-sdk/perplexity": "^1.1.7",
|
||||
"@ai-sdk/xai": "^1.2.15",
|
||||
"@anthropic-ai/sdk": "^0.39.0",
|
||||
"@aws-sdk/credential-providers": "^3.817.0",
|
||||
"@openrouter/ai-sdk-provider": "^0.4.5",
|
||||
"ai": "^4.3.10",
|
||||
"boxen": "^8.0.1",
|
||||
@@ -71,7 +74,7 @@
|
||||
"zod": "^3.23.8"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=14.0.0"
|
||||
"node": ">=18.0.0"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@@ -92,10 +95,11 @@
|
||||
"src/**"
|
||||
],
|
||||
"overrides": {
|
||||
"node-fetch": "^3.3.2",
|
||||
"node-fetch": "^2.6.12",
|
||||
"whatwg-url": "^11.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@biomejs/biome": "^1.9.4",
|
||||
"@changesets/changelog-github": "^0.5.1",
|
||||
"@changesets/cli": "^2.28.1",
|
||||
"@types/jest": "^29.5.14",
|
||||
@@ -104,7 +108,6 @@
|
||||
"jest": "^29.7.0",
|
||||
"jest-environment-node": "^29.7.0",
|
||||
"mock-fs": "^5.5.0",
|
||||
"node-fetch": "^3.3.2",
|
||||
"prettier": "^3.5.3",
|
||||
"react": "^18.3.1",
|
||||
"supertest": "^7.1.0",
|
||||
|
||||
@@ -19,18 +19,41 @@ import {
|
||||
MODEL_MAP,
|
||||
getDebugFlag,
|
||||
getBaseUrlForRole,
|
||||
isApiKeySet
|
||||
isApiKeySet,
|
||||
getOllamaBaseURL,
|
||||
getAzureBaseURL,
|
||||
getVertexProjectId,
|
||||
getVertexLocation
|
||||
} from './config-manager.js';
|
||||
import { log, resolveEnvVariable, findProjectRoot } from './utils.js';
|
||||
import { log, findProjectRoot, resolveEnvVariable } from './utils.js';
|
||||
|
||||
import * as anthropic from '../../src/ai-providers/anthropic.js';
|
||||
import * as perplexity from '../../src/ai-providers/perplexity.js';
|
||||
import * as google from '../../src/ai-providers/google.js';
|
||||
import * as openai from '../../src/ai-providers/openai.js';
|
||||
import * as xai from '../../src/ai-providers/xai.js';
|
||||
import * as openrouter from '../../src/ai-providers/openrouter.js';
|
||||
import * as ollama from '../../src/ai-providers/ollama.js';
|
||||
// TODO: Import other provider modules when implemented (ollama, etc.)
|
||||
// Import provider classes
|
||||
import {
|
||||
AnthropicAIProvider,
|
||||
PerplexityAIProvider,
|
||||
GoogleAIProvider,
|
||||
OpenAIProvider,
|
||||
XAIProvider,
|
||||
OpenRouterAIProvider,
|
||||
OllamaAIProvider,
|
||||
BedrockAIProvider,
|
||||
AzureProvider,
|
||||
VertexAIProvider
|
||||
} from '../../src/ai-providers/index.js';
|
||||
|
||||
// Create provider instances
|
||||
const PROVIDERS = {
|
||||
anthropic: new AnthropicAIProvider(),
|
||||
perplexity: new PerplexityAIProvider(),
|
||||
google: new GoogleAIProvider(),
|
||||
openai: new OpenAIProvider(),
|
||||
xai: new XAIProvider(),
|
||||
openrouter: new OpenRouterAIProvider(),
|
||||
ollama: new OllamaAIProvider(),
|
||||
bedrock: new BedrockAIProvider(),
|
||||
azure: new AzureProvider(),
|
||||
vertex: new VertexAIProvider()
|
||||
};
|
||||
|
||||
// Helper function to get cost for a specific model
|
||||
function _getCostForModel(providerName, modelId) {
|
||||
@@ -62,51 +85,6 @@ function _getCostForModel(providerName, modelId) {
|
||||
};
|
||||
}
|
||||
|
||||
// --- Provider Function Map ---
|
||||
// Maps provider names (lowercase) to their respective service functions
|
||||
const PROVIDER_FUNCTIONS = {
|
||||
anthropic: {
|
||||
generateText: anthropic.generateAnthropicText,
|
||||
streamText: anthropic.streamAnthropicText,
|
||||
generateObject: anthropic.generateAnthropicObject
|
||||
},
|
||||
perplexity: {
|
||||
generateText: perplexity.generatePerplexityText,
|
||||
streamText: perplexity.streamPerplexityText,
|
||||
generateObject: perplexity.generatePerplexityObject
|
||||
},
|
||||
google: {
|
||||
// Add Google entry
|
||||
generateText: google.generateGoogleText,
|
||||
streamText: google.streamGoogleText,
|
||||
generateObject: google.generateGoogleObject
|
||||
},
|
||||
openai: {
|
||||
// ADD: OpenAI entry
|
||||
generateText: openai.generateOpenAIText,
|
||||
streamText: openai.streamOpenAIText,
|
||||
generateObject: openai.generateOpenAIObject
|
||||
},
|
||||
xai: {
|
||||
// ADD: xAI entry
|
||||
generateText: xai.generateXaiText,
|
||||
streamText: xai.streamXaiText,
|
||||
generateObject: xai.generateXaiObject // Note: Object generation might be unsupported
|
||||
},
|
||||
openrouter: {
|
||||
// ADD: OpenRouter entry
|
||||
generateText: openrouter.generateOpenRouterText,
|
||||
streamText: openrouter.streamOpenRouterText,
|
||||
generateObject: openrouter.generateOpenRouterObject
|
||||
},
|
||||
ollama: {
|
||||
generateText: ollama.generateOllamaText,
|
||||
streamText: ollama.streamOllamaText,
|
||||
generateObject: ollama.generateOllamaObject
|
||||
}
|
||||
// TODO: Add entries for ollama, etc. when implemented
|
||||
};
|
||||
|
||||
// --- Configuration for Retries ---
|
||||
const MAX_RETRIES = 2;
|
||||
const INITIAL_RETRY_DELAY_MS = 1000;
|
||||
@@ -191,7 +169,9 @@ function _resolveApiKey(providerName, session, projectRoot = null) {
|
||||
azure: 'AZURE_OPENAI_API_KEY',
|
||||
openrouter: 'OPENROUTER_API_KEY',
|
||||
xai: 'XAI_API_KEY',
|
||||
ollama: 'OLLAMA_API_KEY'
|
||||
ollama: 'OLLAMA_API_KEY',
|
||||
bedrock: 'AWS_ACCESS_KEY_ID',
|
||||
vertex: 'GOOGLE_API_KEY'
|
||||
};
|
||||
|
||||
const envVarName = keyMap[providerName];
|
||||
@@ -203,12 +183,11 @@ function _resolveApiKey(providerName, session, projectRoot = null) {
|
||||
|
||||
const apiKey = resolveEnvVariable(envVarName, session, projectRoot);
|
||||
|
||||
// Special handling for Ollama - API key is optional
|
||||
if (providerName === 'ollama') {
|
||||
// Special handling for providers that can use alternative auth
|
||||
if (providerName === 'ollama' || providerName === 'bedrock') {
|
||||
return apiKey || null;
|
||||
}
|
||||
|
||||
// For all other providers, API key is required
|
||||
if (!apiKey) {
|
||||
throw new Error(
|
||||
`Required API key ${envVarName} for provider '${providerName}' is not set in environment, session, or .env file.`
|
||||
@@ -229,14 +208,15 @@ function _resolveApiKey(providerName, session, projectRoot = null) {
|
||||
* @throws {Error} If the call fails after all retries.
|
||||
*/
|
||||
async function _attemptProviderCallWithRetries(
|
||||
providerApiFn,
|
||||
provider,
|
||||
serviceType,
|
||||
callParams,
|
||||
providerName,
|
||||
modelId,
|
||||
attemptRole
|
||||
) {
|
||||
let retries = 0;
|
||||
const fnName = providerApiFn.name;
|
||||
const fnName = serviceType;
|
||||
|
||||
while (retries <= MAX_RETRIES) {
|
||||
try {
|
||||
@@ -247,8 +227,8 @@ async function _attemptProviderCallWithRetries(
|
||||
);
|
||||
}
|
||||
|
||||
// Call the specific provider function directly
|
||||
const result = await providerApiFn(callParams);
|
||||
// Call the appropriate method on the provider instance
|
||||
const result = await provider[serviceType](callParams);
|
||||
|
||||
if (getDebugFlag()) {
|
||||
log(
|
||||
@@ -350,9 +330,8 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
modelId,
|
||||
apiKey,
|
||||
roleParams,
|
||||
providerFnSet,
|
||||
providerApiFn,
|
||||
baseUrl,
|
||||
provider,
|
||||
baseURL,
|
||||
providerResponse,
|
||||
telemetryData = null;
|
||||
|
||||
@@ -391,7 +370,20 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if API key is set for the current provider and role (excluding 'ollama')
|
||||
// Get provider instance
|
||||
provider = PROVIDERS[providerName?.toLowerCase()];
|
||||
if (!provider) {
|
||||
log(
|
||||
'warn',
|
||||
`Skipping role '${currentRole}': Provider '${providerName}' not supported.`
|
||||
);
|
||||
lastError =
|
||||
lastError ||
|
||||
new Error(`Unsupported provider configured: ${providerName}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check API key if needed
|
||||
if (providerName?.toLowerCase() !== 'ollama') {
|
||||
if (!isApiKeySet(providerName, session, effectiveProjectRoot)) {
|
||||
log(
|
||||
@@ -407,40 +399,70 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
}
|
||||
}
|
||||
|
||||
// Get base URL if configured (optional for most providers)
|
||||
baseURL = getBaseUrlForRole(currentRole, effectiveProjectRoot);
|
||||
|
||||
// For Azure, use the global Azure base URL if role-specific URL is not configured
|
||||
if (providerName?.toLowerCase() === 'azure' && !baseURL) {
|
||||
baseURL = getAzureBaseURL(effectiveProjectRoot);
|
||||
log('debug', `Using global Azure base URL: ${baseURL}`);
|
||||
} else if (providerName?.toLowerCase() === 'ollama' && !baseURL) {
|
||||
// For Ollama, use the global Ollama base URL if role-specific URL is not configured
|
||||
baseURL = getOllamaBaseURL(effectiveProjectRoot);
|
||||
log('debug', `Using global Ollama base URL: ${baseURL}`);
|
||||
}
|
||||
|
||||
// Get AI parameters for the current role
|
||||
roleParams = getParametersForRole(currentRole, effectiveProjectRoot);
|
||||
baseUrl = getBaseUrlForRole(currentRole, effectiveProjectRoot);
|
||||
providerFnSet = PROVIDER_FUNCTIONS[providerName?.toLowerCase()];
|
||||
if (!providerFnSet) {
|
||||
log(
|
||||
'warn',
|
||||
`Skipping role '${currentRole}': Provider '${providerName}' not supported or map entry missing.`
|
||||
);
|
||||
lastError =
|
||||
lastError ||
|
||||
new Error(`Unsupported provider configured: ${providerName}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
providerApiFn = providerFnSet[serviceType];
|
||||
if (typeof providerApiFn !== 'function') {
|
||||
log(
|
||||
'warn',
|
||||
`Skipping role '${currentRole}': Service type '${serviceType}' not implemented for provider '${providerName}'.`
|
||||
);
|
||||
lastError =
|
||||
lastError ||
|
||||
new Error(
|
||||
`Service '${serviceType}' not implemented for provider ${providerName}`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
apiKey = _resolveApiKey(
|
||||
providerName?.toLowerCase(),
|
||||
session,
|
||||
effectiveProjectRoot
|
||||
);
|
||||
|
||||
// Prepare provider-specific configuration
|
||||
let providerSpecificParams = {};
|
||||
|
||||
// Handle Vertex AI specific configuration
|
||||
if (providerName?.toLowerCase() === 'vertex') {
|
||||
// Get Vertex project ID and location
|
||||
const projectId =
|
||||
getVertexProjectId(effectiveProjectRoot) ||
|
||||
resolveEnvVariable(
|
||||
'VERTEX_PROJECT_ID',
|
||||
session,
|
||||
effectiveProjectRoot
|
||||
);
|
||||
|
||||
const location =
|
||||
getVertexLocation(effectiveProjectRoot) ||
|
||||
resolveEnvVariable(
|
||||
'VERTEX_LOCATION',
|
||||
session,
|
||||
effectiveProjectRoot
|
||||
) ||
|
||||
'us-central1';
|
||||
|
||||
// Get credentials path if available
|
||||
const credentialsPath = resolveEnvVariable(
|
||||
'GOOGLE_APPLICATION_CREDENTIALS',
|
||||
session,
|
||||
effectiveProjectRoot
|
||||
);
|
||||
|
||||
// Add Vertex-specific parameters
|
||||
providerSpecificParams = {
|
||||
projectId,
|
||||
location,
|
||||
...(credentialsPath && { credentials: { credentialsFromEnv: true } })
|
||||
};
|
||||
|
||||
log(
|
||||
'debug',
|
||||
`Using Vertex AI configuration: Project ID=${projectId}, Location=${location}`
|
||||
);
|
||||
}
|
||||
|
||||
const messages = [];
|
||||
if (systemPrompt) {
|
||||
messages.push({ role: 'system', content: systemPrompt });
|
||||
@@ -476,13 +498,15 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
maxTokens: roleParams.maxTokens,
|
||||
temperature: roleParams.temperature,
|
||||
messages,
|
||||
baseUrl,
|
||||
...(baseURL && { baseURL }),
|
||||
...(serviceType === 'generateObject' && { schema, objectName }),
|
||||
...providerSpecificParams,
|
||||
...restApiParams
|
||||
};
|
||||
|
||||
providerResponse = await _attemptProviderCallWithRetries(
|
||||
providerApiFn,
|
||||
provider,
|
||||
serviceType,
|
||||
callParams,
|
||||
providerName,
|
||||
modelId,
|
||||
|
||||
@@ -13,7 +13,7 @@ import http from 'http';
|
||||
import inquirer from 'inquirer';
|
||||
import ora from 'ora'; // Import ora
|
||||
|
||||
import { log, readJSON } from './utils.js';
|
||||
import { log, readJSON, findProjectRoot } from './utils.js';
|
||||
import {
|
||||
parsePRD,
|
||||
updateTasks,
|
||||
@@ -76,7 +76,6 @@ import {
|
||||
setModel,
|
||||
getApiKeyStatusReport
|
||||
} from './task-manager/models.js';
|
||||
import { findProjectRoot } from './utils.js';
|
||||
import {
|
||||
isValidTaskStatus,
|
||||
TASK_STATUS_OPTIONS
|
||||
@@ -156,11 +155,11 @@ async function runInteractiveSetup(projectRoot) {
|
||||
}
|
||||
|
||||
// Helper function to fetch Ollama models (duplicated for CLI context)
|
||||
function fetchOllamaModelsCLI(baseUrl = 'http://localhost:11434/api') {
|
||||
function fetchOllamaModelsCLI(baseURL = 'http://localhost:11434/api') {
|
||||
return new Promise((resolve) => {
|
||||
try {
|
||||
// Parse the base URL to extract hostname, port, and base path
|
||||
const url = new URL(baseUrl);
|
||||
const url = new URL(baseURL);
|
||||
const isHttps = url.protocol === 'https:';
|
||||
const port = url.port || (isHttps ? 443 : 80);
|
||||
const basePath = url.pathname.endsWith('/')
|
||||
@@ -245,6 +244,11 @@ async function runInteractiveSetup(projectRoot) {
|
||||
value: '__CUSTOM_OLLAMA__'
|
||||
};
|
||||
|
||||
const customBedrockOption = {
|
||||
name: '* Custom Bedrock model', // Add Bedrock custom option
|
||||
value: '__CUSTOM_BEDROCK__'
|
||||
};
|
||||
|
||||
let choices = [];
|
||||
let defaultIndex = 0; // Default to 'Cancel'
|
||||
|
||||
@@ -291,6 +295,7 @@ async function runInteractiveSetup(projectRoot) {
|
||||
commonPrefix.push(cancelOption);
|
||||
commonPrefix.push(customOpenRouterOption);
|
||||
commonPrefix.push(customOllamaOption);
|
||||
commonPrefix.push(customBedrockOption);
|
||||
|
||||
let prefixLength = commonPrefix.length; // Initial prefix length
|
||||
|
||||
@@ -437,13 +442,13 @@ async function runInteractiveSetup(projectRoot) {
|
||||
modelIdToSet = customId;
|
||||
providerHint = 'ollama';
|
||||
// Get the Ollama base URL from config for this role
|
||||
const ollamaBaseUrl = getBaseUrlForRole(role, projectRoot);
|
||||
const ollamaBaseURL = getBaseUrlForRole(role, projectRoot);
|
||||
// Validate against live Ollama list
|
||||
const ollamaModels = await fetchOllamaModelsCLI(ollamaBaseUrl);
|
||||
const ollamaModels = await fetchOllamaModelsCLI(ollamaBaseURL);
|
||||
if (ollamaModels === null) {
|
||||
console.error(
|
||||
chalk.red(
|
||||
`Error: Unable to connect to Ollama server at ${ollamaBaseUrl}. Please ensure Ollama is running and try again.`
|
||||
`Error: Unable to connect to Ollama server at ${ollamaBaseURL}. Please ensure Ollama is running and try again.`
|
||||
)
|
||||
);
|
||||
setupSuccess = false;
|
||||
@@ -456,12 +461,47 @@ async function runInteractiveSetup(projectRoot) {
|
||||
);
|
||||
console.log(
|
||||
chalk.yellow(
|
||||
`You can check available models with: curl ${ollamaBaseUrl}/tags`
|
||||
`You can check available models with: curl ${ollamaBaseURL}/tags`
|
||||
)
|
||||
);
|
||||
setupSuccess = false;
|
||||
return true; // Continue setup, but mark as failed
|
||||
}
|
||||
} else if (selectedValue === '__CUSTOM_BEDROCK__') {
|
||||
isCustomSelection = true;
|
||||
const { customId } = await inquirer.prompt([
|
||||
{
|
||||
type: 'input',
|
||||
name: 'customId',
|
||||
message: `Enter the custom Bedrock Model ID for the ${role} role (e.g., anthropic.claude-3-sonnet-20240229-v1:0):`
|
||||
}
|
||||
]);
|
||||
if (!customId) {
|
||||
console.log(chalk.yellow('No custom ID entered. Skipping role.'));
|
||||
return true; // Continue setup, but don't set this role
|
||||
}
|
||||
modelIdToSet = customId;
|
||||
providerHint = 'bedrock';
|
||||
|
||||
// Check if AWS environment variables exist
|
||||
if (
|
||||
!process.env.AWS_ACCESS_KEY_ID ||
|
||||
!process.env.AWS_SECRET_ACCESS_KEY
|
||||
) {
|
||||
console.error(
|
||||
chalk.red(
|
||||
`Error: AWS_ACCESS_KEY_ID and/or AWS_SECRET_ACCESS_KEY environment variables are missing. Please set them before using custom Bedrock models.`
|
||||
)
|
||||
);
|
||||
setupSuccess = false;
|
||||
return true; // Continue setup, but mark as failed
|
||||
}
|
||||
|
||||
console.log(
|
||||
chalk.blue(
|
||||
`Custom Bedrock model "${modelIdToSet}" will be used. No validation performed.`
|
||||
)
|
||||
);
|
||||
} else if (
|
||||
selectedValue &&
|
||||
typeof selectedValue === 'object' &&
|
||||
@@ -2294,6 +2334,10 @@ function registerCommands(programInstance) {
|
||||
'--ollama',
|
||||
'Allow setting a custom Ollama model ID (use with --set-*) '
|
||||
)
|
||||
.option(
|
||||
'--bedrock',
|
||||
'Allow setting a custom Bedrock model ID (use with --set-*) '
|
||||
)
|
||||
.addHelpText(
|
||||
'after',
|
||||
`
|
||||
@@ -2303,17 +2347,26 @@ Examples:
|
||||
$ task-master models --set-research sonar-pro # Set research model
|
||||
$ task-master models --set-fallback claude-3-5-sonnet-20241022 # Set fallback
|
||||
$ task-master models --set-main my-custom-model --ollama # Set custom Ollama model for main role
|
||||
$ task-master models --set-main anthropic.claude-3-sonnet-20240229-v1:0 --bedrock # Set custom Bedrock model for main role
|
||||
$ task-master models --set-main some/other-model --openrouter # Set custom OpenRouter model for main role
|
||||
$ task-master models --setup # Run interactive setup`
|
||||
)
|
||||
.action(async (options) => {
|
||||
const projectRoot = findProjectRoot(); // Find project root for context
|
||||
|
||||
// Validate flags: cannot use both --openrouter and --ollama simultaneously
|
||||
if (options.openrouter && options.ollama) {
|
||||
const projectRoot = findProjectRoot();
|
||||
if (!projectRoot) {
|
||||
console.error(chalk.red('Error: Could not find project root.'));
|
||||
process.exit(1);
|
||||
}
|
||||
// Validate flags: cannot use multiple provider flags simultaneously
|
||||
const providerFlags = [
|
||||
options.openrouter,
|
||||
options.ollama,
|
||||
options.bedrock
|
||||
].filter(Boolean).length;
|
||||
if (providerFlags > 1) {
|
||||
console.error(
|
||||
chalk.red(
|
||||
'Error: Cannot use both --openrouter and --ollama flags simultaneously.'
|
||||
'Error: Cannot use multiple provider flags (--openrouter, --ollama, --bedrock) simultaneously.'
|
||||
)
|
||||
);
|
||||
process.exit(1);
|
||||
@@ -2353,7 +2406,9 @@ Examples:
|
||||
? 'openrouter'
|
||||
: options.ollama
|
||||
? 'ollama'
|
||||
: undefined
|
||||
: options.bedrock
|
||||
? 'bedrock'
|
||||
: undefined
|
||||
});
|
||||
if (result.success) {
|
||||
console.log(chalk.green(`✅ ${result.data.message}`));
|
||||
@@ -2373,7 +2428,9 @@ Examples:
|
||||
? 'openrouter'
|
||||
: options.ollama
|
||||
? 'ollama'
|
||||
: undefined
|
||||
: options.bedrock
|
||||
? 'bedrock'
|
||||
: undefined
|
||||
});
|
||||
if (result.success) {
|
||||
console.log(chalk.green(`✅ ${result.data.message}`));
|
||||
@@ -2395,7 +2452,9 @@ Examples:
|
||||
? 'openrouter'
|
||||
: options.ollama
|
||||
? 'ollama'
|
||||
: undefined
|
||||
: options.bedrock
|
||||
? 'bedrock'
|
||||
: undefined
|
||||
});
|
||||
if (result.success) {
|
||||
console.log(chalk.green(`✅ ${result.data.message}`));
|
||||
|
||||
@@ -2,7 +2,7 @@ import fs from 'fs';
|
||||
import path from 'path';
|
||||
import chalk from 'chalk';
|
||||
import { fileURLToPath } from 'url';
|
||||
import { log, resolveEnvVariable, findProjectRoot } from './utils.js';
|
||||
import { log, findProjectRoot, resolveEnvVariable } from './utils.js';
|
||||
|
||||
// Calculate __dirname in ESM
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
@@ -61,7 +61,7 @@ const DEFAULTS = {
|
||||
defaultSubtasks: 5,
|
||||
defaultPriority: 'medium',
|
||||
projectName: 'Task Master',
|
||||
ollamaBaseUrl: 'http://localhost:11434/api'
|
||||
ollamaBaseURL: 'http://localhost:11434/api'
|
||||
}
|
||||
};
|
||||
|
||||
@@ -361,9 +361,34 @@ function getProjectName(explicitRoot = null) {
|
||||
return getGlobalConfig(explicitRoot).projectName;
|
||||
}
|
||||
|
||||
function getOllamaBaseUrl(explicitRoot = null) {
|
||||
function getOllamaBaseURL(explicitRoot = null) {
|
||||
// Directly return value from config
|
||||
return getGlobalConfig(explicitRoot).ollamaBaseUrl;
|
||||
return getGlobalConfig(explicitRoot).ollamaBaseURL;
|
||||
}
|
||||
|
||||
function getAzureBaseURL(explicitRoot = null) {
|
||||
// Directly return value from config
|
||||
return getGlobalConfig(explicitRoot).azureBaseURL;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the Google Cloud project ID for Vertex AI from configuration
|
||||
* @param {string|null} explicitRoot - Optional explicit path to the project root.
|
||||
* @returns {string|null} The project ID or null if not configured
|
||||
*/
|
||||
function getVertexProjectId(explicitRoot = null) {
|
||||
// Return value from config
|
||||
return getGlobalConfig(explicitRoot).vertexProjectId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the Google Cloud location for Vertex AI from configuration
|
||||
* @param {string|null} explicitRoot - Optional explicit path to the project root.
|
||||
* @returns {string} The location or default value of "us-central1"
|
||||
*/
|
||||
function getVertexLocation(explicitRoot = null) {
|
||||
// Return value from config or default
|
||||
return getGlobalConfig(explicitRoot).vertexLocation || 'us-central1';
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -450,7 +475,8 @@ function isApiKeySet(providerName, session = null, projectRoot = null) {
|
||||
mistral: 'MISTRAL_API_KEY',
|
||||
azure: 'AZURE_OPENAI_API_KEY',
|
||||
openrouter: 'OPENROUTER_API_KEY',
|
||||
xai: 'XAI_API_KEY'
|
||||
xai: 'XAI_API_KEY',
|
||||
vertex: 'GOOGLE_API_KEY' // Vertex uses the same key as Google
|
||||
// Add other providers as needed
|
||||
};
|
||||
|
||||
@@ -542,6 +568,10 @@ function getMcpApiKeyStatus(providerName, projectRoot = null) {
|
||||
apiKeyToCheck = mcpEnv.AZURE_OPENAI_API_KEY;
|
||||
placeholderValue = 'YOUR_AZURE_OPENAI_API_KEY_HERE';
|
||||
break;
|
||||
case 'vertex':
|
||||
apiKeyToCheck = mcpEnv.GOOGLE_API_KEY; // Vertex uses Google API key
|
||||
placeholderValue = 'YOUR_GOOGLE_API_KEY_HERE';
|
||||
break;
|
||||
default:
|
||||
return false; // Unknown provider
|
||||
}
|
||||
@@ -707,8 +737,8 @@ function getAllProviders() {
|
||||
|
||||
function getBaseUrlForRole(role, explicitRoot = null) {
|
||||
const roleConfig = getModelConfigForRole(role, explicitRoot);
|
||||
return roleConfig && typeof roleConfig.baseUrl === 'string'
|
||||
? roleConfig.baseUrl
|
||||
return roleConfig && typeof roleConfig.baseURL === 'string'
|
||||
? roleConfig.baseURL
|
||||
: undefined;
|
||||
}
|
||||
|
||||
@@ -718,14 +748,12 @@ export {
|
||||
writeConfig,
|
||||
ConfigurationError,
|
||||
isConfigFilePresent,
|
||||
|
||||
// Validation
|
||||
validateProvider,
|
||||
validateProviderModelCombination,
|
||||
VALID_PROVIDERS,
|
||||
MODEL_MAP,
|
||||
getAvailableModels,
|
||||
|
||||
// Role-specific getters (No env var overrides)
|
||||
getMainProvider,
|
||||
getMainModelId,
|
||||
@@ -740,7 +768,6 @@ export {
|
||||
getFallbackMaxTokens,
|
||||
getFallbackTemperature,
|
||||
getBaseUrlForRole,
|
||||
|
||||
// Global setting getters (No env var overrides)
|
||||
getLogLevel,
|
||||
getDebugFlag,
|
||||
@@ -748,13 +775,15 @@ export {
|
||||
getDefaultSubtasks,
|
||||
getDefaultPriority,
|
||||
getProjectName,
|
||||
getOllamaBaseUrl,
|
||||
getOllamaBaseURL,
|
||||
getAzureBaseURL,
|
||||
getParametersForRole,
|
||||
getUserId,
|
||||
// API Key Checkers (still relevant)
|
||||
isApiKeySet,
|
||||
getMcpApiKeyStatus,
|
||||
|
||||
// ADD: Function to get all provider names
|
||||
getAllProviders
|
||||
getAllProviders,
|
||||
getVertexProjectId,
|
||||
getVertexLocation
|
||||
};
|
||||
|
||||
@@ -5,14 +5,14 @@
|
||||
"swe_score": 0.727,
|
||||
"cost_per_1m_tokens": { "input": 3.0, "output": 15.0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 120000
|
||||
"max_tokens": 64000
|
||||
},
|
||||
{
|
||||
"id": "claude-opus-4-20250514",
|
||||
"swe_score": 0.725,
|
||||
"cost_per_1m_tokens": { "input": 15.0, "output": 75.0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 120000
|
||||
"max_tokens": 32000
|
||||
},
|
||||
{
|
||||
"id": "claude-3-7-sonnet-20250219",
|
||||
|
||||
@@ -852,30 +852,32 @@ async function addTask(
|
||||
: 0;
|
||||
}
|
||||
|
||||
// Add a visual transition to show we're moving to AI generation
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.white.bold('AI Task Generation') +
|
||||
`\n\n${chalk.gray('Analyzing context and generating task details using AI...')}` +
|
||||
`\n${chalk.cyan('Context size: ')}${chalk.yellow(contextTasks.length.toLocaleString())} characters` +
|
||||
`\n${chalk.cyan('Dependency detection: ')}${chalk.yellow(numericDependencies.length > 0 ? 'Explicit dependencies' : 'Auto-discovery mode')}` +
|
||||
`\n${chalk.cyan('Detailed tasks: ')}${chalk.yellow(
|
||||
numericDependencies.length > 0
|
||||
? dependentTasks.length // Use length of tasks from explicit dependency path
|
||||
: uniqueDetailedTasks.length // Use length of tasks from fuzzy search path
|
||||
)}` +
|
||||
(promptCategory
|
||||
? `\n${chalk.cyan('Category detected: ')}${chalk.yellow(promptCategory.label)}`
|
||||
: ''),
|
||||
{
|
||||
padding: { top: 0, bottom: 1, left: 1, right: 1 },
|
||||
margin: { top: 1, bottom: 0 },
|
||||
borderColor: 'white',
|
||||
borderStyle: 'round'
|
||||
}
|
||||
)
|
||||
);
|
||||
console.log(); // Add spacing
|
||||
// Add a visual transition to show we're moving to AI generation - only for CLI
|
||||
if (outputFormat === 'text') {
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.white.bold('AI Task Generation') +
|
||||
`\n\n${chalk.gray('Analyzing context and generating task details using AI...')}` +
|
||||
`\n${chalk.cyan('Context size: ')}${chalk.yellow(contextTasks.length.toLocaleString())} characters` +
|
||||
`\n${chalk.cyan('Dependency detection: ')}${chalk.yellow(numericDependencies.length > 0 ? 'Explicit dependencies' : 'Auto-discovery mode')}` +
|
||||
`\n${chalk.cyan('Detailed tasks: ')}${chalk.yellow(
|
||||
numericDependencies.length > 0
|
||||
? dependentTasks.length // Use length of tasks from explicit dependency path
|
||||
: uniqueDetailedTasks.length // Use length of tasks from fuzzy search path
|
||||
)}` +
|
||||
(promptCategory
|
||||
? `\n${chalk.cyan('Category detected: ')}${chalk.yellow(promptCategory.label)}`
|
||||
: ''),
|
||||
{
|
||||
padding: { top: 0, bottom: 1, left: 1, right: 1 },
|
||||
margin: { top: 1, bottom: 0 },
|
||||
borderColor: 'white',
|
||||
borderStyle: 'round'
|
||||
}
|
||||
)
|
||||
);
|
||||
console.log(); // Add spacing
|
||||
}
|
||||
|
||||
// System Prompt - Enhanced for dependency awareness
|
||||
const systemPrompt =
|
||||
|
||||
@@ -308,7 +308,8 @@ function parseSubtasksFromText(
|
||||
logger.error(
|
||||
`Advanced extraction: Problematic JSON string for parse (first 500 chars): ${jsonToParse.substring(0, 500)}`
|
||||
);
|
||||
throw new Error( // Re-throw a more specific error if advanced also fails
|
||||
throw new Error(
|
||||
// Re-throw a more specific error if advanced also fails
|
||||
`Failed to parse JSON response object after both simple and advanced attempts: ${parseError.message}`
|
||||
);
|
||||
}
|
||||
|
||||
@@ -72,14 +72,14 @@ function fetchOpenRouterModels() {
|
||||
|
||||
/**
|
||||
* Fetches the list of models from Ollama instance.
|
||||
* @param {string} baseUrl - The base URL for the Ollama API (e.g., "http://localhost:11434/api")
|
||||
* @param {string} baseURL - The base URL for the Ollama API (e.g., "http://localhost:11434/api")
|
||||
* @returns {Promise<Array|null>} A promise that resolves with the list of model objects or null if fetch fails.
|
||||
*/
|
||||
function fetchOllamaModels(baseUrl = 'http://localhost:11434/api') {
|
||||
function fetchOllamaModels(baseURL = 'http://localhost:11434/api') {
|
||||
return new Promise((resolve) => {
|
||||
try {
|
||||
// Parse the base URL to extract hostname, port, and base path
|
||||
const url = new URL(baseUrl);
|
||||
const url = new URL(baseURL);
|
||||
const isHttps = url.protocol === 'https:';
|
||||
const port = url.port || (isHttps ? 443 : 80);
|
||||
const basePath = url.pathname.endsWith('/')
|
||||
@@ -484,13 +484,13 @@ async function setModel(role, modelId, options = {}) {
|
||||
report('info', `Checking Ollama for ${modelId} (as hinted)...`);
|
||||
|
||||
// Get the Ollama base URL from config
|
||||
const ollamaBaseUrl = getBaseUrlForRole(role, projectRoot);
|
||||
const ollamaModels = await fetchOllamaModels(ollamaBaseUrl);
|
||||
const ollamaBaseURL = getBaseUrlForRole(role, projectRoot);
|
||||
const ollamaModels = await fetchOllamaModels(ollamaBaseURL);
|
||||
|
||||
if (ollamaModels === null) {
|
||||
// Connection failed - server probably not running
|
||||
throw new Error(
|
||||
`Unable to connect to Ollama server at ${ollamaBaseUrl}. Please ensure Ollama is running and try again.`
|
||||
`Unable to connect to Ollama server at ${ollamaBaseURL}. Please ensure Ollama is running and try again.`
|
||||
);
|
||||
} else if (ollamaModels.some((m) => m.model === modelId)) {
|
||||
determinedProvider = 'ollama';
|
||||
@@ -498,7 +498,7 @@ async function setModel(role, modelId, options = {}) {
|
||||
report('warn', warningMessage);
|
||||
} else {
|
||||
// Server is running but model not found
|
||||
const tagsUrl = `${ollamaBaseUrl}/tags`;
|
||||
const tagsUrl = `${ollamaBaseURL}/tags`;
|
||||
throw new Error(
|
||||
`Model ID "${modelId}" not found in the Ollama instance. Please verify the model is pulled and available. You can check available models with: curl ${tagsUrl}`
|
||||
);
|
||||
|
||||
@@ -60,8 +60,7 @@ function resolveEnvVariable(key, session = null, projectRoot = null) {
|
||||
|
||||
// --- Project Root Finding Utility ---
|
||||
/**
|
||||
* Finds the project root directory by searching upwards from a given starting point
|
||||
* for a marker file or directory (e.g., 'package.json', '.git').
|
||||
* Finds the project root directory by searching for marker files/directories.
|
||||
* @param {string} [startPath=process.cwd()] - The directory to start searching from.
|
||||
* @param {string[]} [markers=['package.json', '.git', '.taskmasterconfig']] - Marker files/dirs to look for.
|
||||
* @returns {string|null} The path to the project root directory, or null if not found.
|
||||
@@ -71,27 +70,35 @@ function findProjectRoot(
|
||||
markers = ['package.json', '.git', '.taskmasterconfig']
|
||||
) {
|
||||
let currentPath = path.resolve(startPath);
|
||||
while (true) {
|
||||
for (const marker of markers) {
|
||||
if (fs.existsSync(path.join(currentPath, marker))) {
|
||||
return currentPath;
|
||||
}
|
||||
const rootPath = path.parse(currentPath).root;
|
||||
|
||||
while (currentPath !== rootPath) {
|
||||
// Check if any marker exists in the current directory
|
||||
const hasMarker = markers.some((marker) => {
|
||||
const markerPath = path.join(currentPath, marker);
|
||||
return fs.existsSync(markerPath);
|
||||
});
|
||||
|
||||
if (hasMarker) {
|
||||
return currentPath;
|
||||
}
|
||||
const parentPath = path.dirname(currentPath);
|
||||
if (parentPath === currentPath) {
|
||||
// Reached the filesystem root
|
||||
return null;
|
||||
}
|
||||
currentPath = parentPath;
|
||||
|
||||
// Move up one directory
|
||||
currentPath = path.dirname(currentPath);
|
||||
}
|
||||
|
||||
// Check the root directory as well
|
||||
const hasMarkerInRoot = markers.some((marker) => {
|
||||
const markerPath = path.join(rootPath, marker);
|
||||
return fs.existsSync(markerPath);
|
||||
});
|
||||
|
||||
return hasMarkerInRoot ? rootPath : null;
|
||||
}
|
||||
|
||||
// --- Dynamic Configuration Function --- (REMOVED)
|
||||
/*
|
||||
function getConfig(session = null) {
|
||||
// ... implementation removed ...
|
||||
}
|
||||
*/
|
||||
|
||||
// --- Logging and Utility Functions ---
|
||||
|
||||
// Set up logging based on log level
|
||||
const LOG_LEVELS = {
|
||||
|
||||
@@ -4,9 +4,9 @@
|
||||
* Implementation for interacting with Anthropic models (e.g., Claude)
|
||||
* using the Vercel AI SDK.
|
||||
*/
|
||||
|
||||
import { createAnthropic } from '@ai-sdk/anthropic';
|
||||
import { generateText, streamText, generateObject } from 'ai';
|
||||
import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible
|
||||
import { BaseAIProvider } from './base-provider.js';
|
||||
|
||||
// TODO: Implement standardized functions for generateText, streamText, generateObject
|
||||
|
||||
@@ -17,207 +17,38 @@ import { log } from '../../scripts/modules/utils.js'; // Assuming utils is acces
|
||||
// Remove the global variable and caching logic
|
||||
// let anthropicClient;
|
||||
|
||||
function getClient(apiKey, baseUrl) {
|
||||
if (!apiKey) {
|
||||
// In a real scenario, this would use the config resolver.
|
||||
// Throwing error here if key isn't passed for simplicity.
|
||||
// Keep the error check for the passed key
|
||||
throw new Error('Anthropic API key is required.');
|
||||
export class AnthropicAIProvider extends BaseAIProvider {
|
||||
constructor() {
|
||||
super();
|
||||
this.name = 'Anthropic';
|
||||
}
|
||||
// Remove the check for anthropicClient
|
||||
// if (!anthropicClient) {
|
||||
// TODO: Explore passing options like default headers if needed
|
||||
// Create and return a new instance directly with standard version header
|
||||
return createAnthropic({
|
||||
apiKey: apiKey,
|
||||
...(baseUrl && { baseURL: baseUrl }),
|
||||
// Use standard version header instead of beta
|
||||
headers: {
|
||||
'anthropic-beta': 'output-128k-2025-02-19'
|
||||
|
||||
/**
|
||||
* Creates and returns an Anthropic client instance.
|
||||
* @param {object} params - Parameters for client initialization
|
||||
* @param {string} params.apiKey - Anthropic API key
|
||||
* @param {string} [params.baseURL] - Optional custom API endpoint
|
||||
* @returns {Function} Anthropic client function
|
||||
* @throws {Error} If API key is missing or initialization fails
|
||||
*/
|
||||
getClient(params) {
|
||||
try {
|
||||
const { apiKey, baseURL } = params;
|
||||
|
||||
if (!apiKey) {
|
||||
throw new Error('Anthropic API key is required.');
|
||||
}
|
||||
|
||||
return createAnthropic({
|
||||
apiKey,
|
||||
...(baseURL && { baseURL }),
|
||||
headers: {
|
||||
'anthropic-beta': 'output-128k-2025-02-19'
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
this.handleError('client initialization', error);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// --- Standardized Service Function Implementations ---
|
||||
|
||||
/**
|
||||
* Generates text using an Anthropic model.
|
||||
*
|
||||
* @param {object} params - Parameters for the text generation.
|
||||
* @param {string} params.apiKey - The Anthropic API key.
|
||||
* @param {string} params.modelId - The specific Anthropic model ID.
|
||||
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
|
||||
* @returns {Promise<object>} The generated text content and usage.
|
||||
* @throws {Error} If the API call fails.
|
||||
*/
|
||||
export async function generateAnthropicText({
|
||||
apiKey,
|
||||
modelId,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature,
|
||||
baseUrl
|
||||
}) {
|
||||
log('debug', `Generating Anthropic text with model: ${modelId}`);
|
||||
try {
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
const result = await generateText({
|
||||
model: client(modelId),
|
||||
messages: messages,
|
||||
maxTokens: maxTokens,
|
||||
temperature: temperature
|
||||
// Beta header moved to client initialization
|
||||
// TODO: Add other relevant parameters like topP, topK if needed
|
||||
});
|
||||
log(
|
||||
'debug',
|
||||
`Anthropic generateText result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
||||
);
|
||||
// Return both text and usage
|
||||
return {
|
||||
text: result.text,
|
||||
usage: {
|
||||
inputTokens: result.usage.promptTokens,
|
||||
outputTokens: result.usage.completionTokens
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
log('error', `Anthropic generateText failed: ${error.message}`);
|
||||
// Consider more specific error handling or re-throwing a standardized error
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Streams text using an Anthropic model.
|
||||
*
|
||||
* @param {object} params - Parameters for the text streaming.
|
||||
* @param {string} params.apiKey - The Anthropic API key.
|
||||
* @param {string} params.modelId - The specific Anthropic model ID.
|
||||
* @param {Array<object>} params.messages - The messages array.
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
|
||||
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
||||
* @throws {Error} If the API call fails to initiate the stream.
|
||||
*/
|
||||
export async function streamAnthropicText({
|
||||
apiKey,
|
||||
modelId,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature,
|
||||
baseUrl
|
||||
}) {
|
||||
log('debug', `Streaming Anthropic text with model: ${modelId}`);
|
||||
try {
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
|
||||
log(
|
||||
'debug',
|
||||
'[streamAnthropicText] Parameters received by streamText:',
|
||||
JSON.stringify(
|
||||
{
|
||||
modelId: modelId,
|
||||
messages: messages,
|
||||
maxTokens: maxTokens,
|
||||
temperature: temperature
|
||||
},
|
||||
null,
|
||||
2
|
||||
)
|
||||
);
|
||||
|
||||
const stream = await streamText({
|
||||
model: client(modelId),
|
||||
messages: messages,
|
||||
maxTokens: maxTokens,
|
||||
temperature: temperature
|
||||
// TODO: Add other relevant parameters
|
||||
});
|
||||
|
||||
// *** RETURN THE FULL STREAM OBJECT, NOT JUST stream.textStream ***
|
||||
return stream;
|
||||
} catch (error) {
|
||||
log('error', `Anthropic streamText failed: ${error.message}`, error.stack);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a structured object using an Anthropic model.
|
||||
* NOTE: Anthropic's tool/function calling support might have limitations
|
||||
* compared to OpenAI, especially regarding complex schemas or enforcement.
|
||||
* The Vercel AI SDK attempts to abstract this.
|
||||
*
|
||||
* @param {object} params - Parameters for object generation.
|
||||
* @param {string} params.apiKey - The Anthropic API key.
|
||||
* @param {string} params.modelId - The specific Anthropic model ID.
|
||||
* @param {Array<object>} params.messages - The messages array.
|
||||
* @param {import('zod').ZodSchema} params.schema - The Zod schema for the object.
|
||||
* @param {string} params.objectName - A name for the object/tool.
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
||||
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
|
||||
* @returns {Promise<object>} The generated object matching the schema and usage.
|
||||
* @throws {Error} If generation or validation fails.
|
||||
*/
|
||||
export async function generateAnthropicObject({
|
||||
apiKey,
|
||||
modelId,
|
||||
messages,
|
||||
schema,
|
||||
objectName = 'generated_object',
|
||||
maxTokens,
|
||||
temperature,
|
||||
maxRetries = 3,
|
||||
baseUrl
|
||||
}) {
|
||||
log(
|
||||
'debug',
|
||||
`Generating Anthropic object ('${objectName}') with model: ${modelId}`
|
||||
);
|
||||
try {
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
log(
|
||||
'debug',
|
||||
`Using maxTokens: ${maxTokens}, temperature: ${temperature}, model: ${modelId}`
|
||||
);
|
||||
const result = await generateObject({
|
||||
model: client(modelId),
|
||||
mode: 'tool',
|
||||
schema: schema,
|
||||
messages: messages,
|
||||
tool: {
|
||||
name: objectName,
|
||||
description: `Generate a ${objectName} based on the prompt.`
|
||||
},
|
||||
maxTokens: maxTokens,
|
||||
temperature: temperature,
|
||||
maxRetries: maxRetries
|
||||
});
|
||||
log(
|
||||
'debug',
|
||||
`Anthropic generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
||||
);
|
||||
// Return both object and usage
|
||||
return {
|
||||
object: result.object,
|
||||
usage: {
|
||||
inputTokens: result.usage.promptTokens,
|
||||
outputTokens: result.usage.completionTokens
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
`Anthropic generateObject ('${objectName}') failed: ${error.message}`
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
52
src/ai-providers/azure.js
Normal file
52
src/ai-providers/azure.js
Normal file
@@ -0,0 +1,52 @@
|
||||
/**
|
||||
* azure.js
|
||||
* AI provider implementation for Azure OpenAI models using Vercel AI SDK.
|
||||
*/
|
||||
|
||||
import { createAzure } from '@ai-sdk/azure';
|
||||
import { BaseAIProvider } from './base-provider.js';
|
||||
|
||||
export class AzureProvider extends BaseAIProvider {
|
||||
constructor() {
|
||||
super();
|
||||
this.name = 'Azure OpenAI';
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates Azure-specific authentication parameters
|
||||
* @param {object} params - Parameters to validate
|
||||
* @throws {Error} If required parameters are missing
|
||||
*/
|
||||
validateAuth(params) {
|
||||
if (!params.apiKey) {
|
||||
throw new Error('Azure API key is required');
|
||||
}
|
||||
|
||||
if (!params.baseURL) {
|
||||
throw new Error(
|
||||
'Azure endpoint URL is required. Set it in .taskmasterconfig global.azureBaseURL or models.[role].baseURL'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates and returns an Azure OpenAI client instance.
|
||||
* @param {object} params - Parameters for client initialization
|
||||
* @param {string} params.apiKey - Azure OpenAI API key
|
||||
* @param {string} params.baseURL - Azure OpenAI endpoint URL (from .taskmasterconfig global.azureBaseURL or models.[role].baseURL)
|
||||
* @returns {Function} Azure OpenAI client function
|
||||
* @throws {Error} If required parameters are missing or initialization fails
|
||||
*/
|
||||
getClient(params) {
|
||||
try {
|
||||
const { apiKey, baseURL } = params;
|
||||
|
||||
return createAzure({
|
||||
apiKey,
|
||||
baseURL
|
||||
});
|
||||
} catch (error) {
|
||||
this.handleError('client initialization', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
214
src/ai-providers/base-provider.js
Normal file
214
src/ai-providers/base-provider.js
Normal file
@@ -0,0 +1,214 @@
|
||||
import { generateText, streamText, generateObject } from 'ai';
|
||||
import { log } from '../../scripts/modules/index.js';
|
||||
|
||||
/**
|
||||
* Base class for all AI providers
|
||||
*/
|
||||
export class BaseAIProvider {
|
||||
constructor() {
|
||||
if (this.constructor === BaseAIProvider) {
|
||||
throw new Error('BaseAIProvider cannot be instantiated directly');
|
||||
}
|
||||
|
||||
// Each provider must set their name
|
||||
this.name = this.constructor.name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates authentication parameters - can be overridden by providers
|
||||
* @param {object} params - Parameters to validate
|
||||
*/
|
||||
validateAuth(params) {
|
||||
// Default: require API key (most providers need this)
|
||||
if (!params.apiKey) {
|
||||
throw new Error(`${this.name} API key is required`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates common parameters across all methods
|
||||
* @param {object} params - Parameters to validate
|
||||
*/
|
||||
validateParams(params) {
|
||||
// Validate authentication (can be overridden by providers)
|
||||
this.validateAuth(params);
|
||||
|
||||
// Validate required model ID
|
||||
if (!params.modelId) {
|
||||
throw new Error(`${this.name} Model ID is required`);
|
||||
}
|
||||
|
||||
// Validate optional parameters
|
||||
this.validateOptionalParams(params);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates optional parameters like temperature and maxTokens
|
||||
* @param {object} params - Parameters to validate
|
||||
*/
|
||||
validateOptionalParams(params) {
|
||||
if (
|
||||
params.temperature !== undefined &&
|
||||
(params.temperature < 0 || params.temperature > 1)
|
||||
) {
|
||||
throw new Error('Temperature must be between 0 and 1');
|
||||
}
|
||||
if (params.maxTokens !== undefined && params.maxTokens <= 0) {
|
||||
throw new Error('maxTokens must be greater than 0');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates message array structure
|
||||
*/
|
||||
validateMessages(messages) {
|
||||
if (!messages || !Array.isArray(messages) || messages.length === 0) {
|
||||
throw new Error('Invalid or empty messages array provided');
|
||||
}
|
||||
|
||||
for (const msg of messages) {
|
||||
if (!msg.role || !msg.content) {
|
||||
throw new Error(
|
||||
'Invalid message format. Each message must have role and content'
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Common error handler
|
||||
*/
|
||||
handleError(operation, error) {
|
||||
const errorMessage = error.message || 'Unknown error occurred';
|
||||
log('error', `${this.name} ${operation} failed: ${errorMessage}`, {
|
||||
error
|
||||
});
|
||||
throw new Error(
|
||||
`${this.name} API error during ${operation}: ${errorMessage}`
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates and returns a client instance for the provider
|
||||
* @abstract
|
||||
*/
|
||||
getClient(params) {
|
||||
throw new Error('getClient must be implemented by provider');
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates text using the provider's model
|
||||
*/
|
||||
async generateText(params) {
|
||||
try {
|
||||
this.validateParams(params);
|
||||
this.validateMessages(params.messages);
|
||||
|
||||
log(
|
||||
'debug',
|
||||
`Generating ${this.name} text with model: ${params.modelId}`
|
||||
);
|
||||
|
||||
const client = this.getClient(params);
|
||||
const result = await generateText({
|
||||
model: client(params.modelId),
|
||||
messages: params.messages,
|
||||
maxTokens: params.maxTokens,
|
||||
temperature: params.temperature
|
||||
});
|
||||
|
||||
log(
|
||||
'debug',
|
||||
`${this.name} generateText completed successfully for model: ${params.modelId}`
|
||||
);
|
||||
|
||||
return {
|
||||
text: result.text,
|
||||
usage: {
|
||||
inputTokens: result.usage?.promptTokens,
|
||||
outputTokens: result.usage?.completionTokens,
|
||||
totalTokens: result.usage?.totalTokens
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
this.handleError('text generation', error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Streams text using the provider's model
|
||||
*/
|
||||
async streamText(params) {
|
||||
try {
|
||||
this.validateParams(params);
|
||||
this.validateMessages(params.messages);
|
||||
|
||||
log('debug', `Streaming ${this.name} text with model: ${params.modelId}`);
|
||||
|
||||
const client = this.getClient(params);
|
||||
const stream = await streamText({
|
||||
model: client(params.modelId),
|
||||
messages: params.messages,
|
||||
maxTokens: params.maxTokens,
|
||||
temperature: params.temperature
|
||||
});
|
||||
|
||||
log(
|
||||
'debug',
|
||||
`${this.name} streamText initiated successfully for model: ${params.modelId}`
|
||||
);
|
||||
|
||||
return stream;
|
||||
} catch (error) {
|
||||
this.handleError('text streaming', error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a structured object using the provider's model
|
||||
*/
|
||||
async generateObject(params) {
|
||||
try {
|
||||
this.validateParams(params);
|
||||
this.validateMessages(params.messages);
|
||||
|
||||
if (!params.schema) {
|
||||
throw new Error('Schema is required for object generation');
|
||||
}
|
||||
if (!params.objectName) {
|
||||
throw new Error('Object name is required for object generation');
|
||||
}
|
||||
|
||||
log(
|
||||
'debug',
|
||||
`Generating ${this.name} object ('${params.objectName}') with model: ${params.modelId}`
|
||||
);
|
||||
|
||||
const client = this.getClient(params);
|
||||
const result = await generateObject({
|
||||
model: client(params.modelId),
|
||||
messages: params.messages,
|
||||
schema: params.schema,
|
||||
mode: 'tool',
|
||||
maxTokens: params.maxTokens,
|
||||
temperature: params.temperature
|
||||
});
|
||||
|
||||
log(
|
||||
'debug',
|
||||
`${this.name} generateObject completed successfully for model: ${params.modelId}`
|
||||
);
|
||||
|
||||
return {
|
||||
object: result.object,
|
||||
usage: {
|
||||
inputTokens: result.usage?.promptTokens,
|
||||
outputTokens: result.usage?.completionTokens,
|
||||
totalTokens: result.usage?.totalTokens
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
this.handleError('object generation', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
41
src/ai-providers/bedrock.js
Normal file
41
src/ai-providers/bedrock.js
Normal file
@@ -0,0 +1,41 @@
|
||||
import { createAmazonBedrock } from '@ai-sdk/amazon-bedrock';
|
||||
import { fromNodeProviderChain } from '@aws-sdk/credential-providers';
|
||||
import { BaseAIProvider } from './base-provider.js';
|
||||
|
||||
export class BedrockAIProvider extends BaseAIProvider {
|
||||
constructor() {
|
||||
super();
|
||||
this.name = 'Bedrock';
|
||||
}
|
||||
|
||||
/**
|
||||
* Override auth validation - Bedrock uses AWS credentials instead of API keys
|
||||
* @param {object} params - Parameters to validate
|
||||
*/
|
||||
validateAuth(params) {}
|
||||
|
||||
/**
|
||||
* Creates and returns a Bedrock client instance.
|
||||
* See https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html
|
||||
* for AWS SDK environment variables and configuration options.
|
||||
*/
|
||||
getClient(params) {
|
||||
try {
|
||||
const {
|
||||
profile = process.env.AWS_PROFILE || 'default',
|
||||
region = process.env.AWS_DEFAULT_REGION || 'us-east-1',
|
||||
baseURL
|
||||
} = params;
|
||||
|
||||
const credentialProvider = fromNodeProviderChain({ profile });
|
||||
|
||||
return createAmazonBedrock({
|
||||
region,
|
||||
credentialProvider,
|
||||
...(baseURL && { baseURL })
|
||||
});
|
||||
} catch (error) {
|
||||
this.handleError('client initialization', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
150
src/ai-providers/google-vertex.js
Normal file
150
src/ai-providers/google-vertex.js
Normal file
@@ -0,0 +1,150 @@
|
||||
/**
|
||||
* google-vertex.js
|
||||
* AI provider implementation for Google Vertex AI models using Vercel AI SDK.
|
||||
*/
|
||||
|
||||
import { createVertex } from '@ai-sdk/google-vertex';
|
||||
import { BaseAIProvider } from './base-provider.js';
|
||||
import { resolveEnvVariable } from '../../scripts/modules/utils.js';
|
||||
import { log } from '../../scripts/modules/utils.js';
|
||||
|
||||
// Vertex-specific error classes
|
||||
class VertexAuthError extends Error {
|
||||
constructor(message) {
|
||||
super(message);
|
||||
this.name = 'VertexAuthError';
|
||||
this.code = 'vertex_auth_error';
|
||||
}
|
||||
}
|
||||
|
||||
class VertexConfigError extends Error {
|
||||
constructor(message) {
|
||||
super(message);
|
||||
this.name = 'VertexConfigError';
|
||||
this.code = 'vertex_config_error';
|
||||
}
|
||||
}
|
||||
|
||||
class VertexApiError extends Error {
|
||||
constructor(message, statusCode) {
|
||||
super(message);
|
||||
this.name = 'VertexApiError';
|
||||
this.code = 'vertex_api_error';
|
||||
this.statusCode = statusCode;
|
||||
}
|
||||
}
|
||||
|
||||
export class VertexAIProvider extends BaseAIProvider {
|
||||
constructor() {
|
||||
super();
|
||||
this.name = 'Google Vertex AI';
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates Vertex AI-specific authentication parameters
|
||||
* @param {object} params - Parameters to validate
|
||||
* @throws {Error} If required parameters are missing
|
||||
*/
|
||||
validateAuth(params) {
|
||||
const { apiKey, projectId, location, credentials } = params;
|
||||
|
||||
// Check for API key OR service account credentials
|
||||
if (!apiKey && !credentials) {
|
||||
throw new VertexAuthError(
|
||||
'Either Google API key (GOOGLE_API_KEY) or service account credentials (GOOGLE_APPLICATION_CREDENTIALS) is required for Vertex AI'
|
||||
);
|
||||
}
|
||||
|
||||
// Project ID is required for Vertex AI
|
||||
if (!projectId) {
|
||||
throw new VertexConfigError(
|
||||
'Google Cloud project ID is required for Vertex AI. Set VERTEX_PROJECT_ID environment variable.'
|
||||
);
|
||||
}
|
||||
|
||||
// Location is required for Vertex AI
|
||||
if (!location) {
|
||||
throw new VertexConfigError(
|
||||
'Google Cloud location is required for Vertex AI. Set VERTEX_LOCATION environment variable (e.g., "us-central1").'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates and returns a Google Vertex AI client instance.
|
||||
* @param {object} params - Parameters for client initialization
|
||||
* @param {string} [params.apiKey] - Google API key
|
||||
* @param {string} params.projectId - Google Cloud project ID
|
||||
* @param {string} params.location - Google Cloud location (e.g., "us-central1")
|
||||
* @param {object} [params.credentials] - Service account credentials object
|
||||
* @param {string} [params.baseURL] - Optional custom API endpoint
|
||||
* @returns {Function} Google Vertex AI client function
|
||||
* @throws {Error} If required parameters are missing or initialization fails
|
||||
*/
|
||||
getClient(params) {
|
||||
try {
|
||||
// Validate required parameters
|
||||
this.validateAuth(params);
|
||||
|
||||
const { apiKey, projectId, location, credentials, baseURL } = params;
|
||||
|
||||
// Configure auth options - either API key or service account
|
||||
const authOptions = {};
|
||||
if (apiKey) {
|
||||
authOptions.apiKey = apiKey;
|
||||
} else if (credentials) {
|
||||
authOptions.googleAuthOptions = credentials;
|
||||
}
|
||||
|
||||
// Return Vertex AI client
|
||||
return createVertex({
|
||||
...authOptions,
|
||||
projectId,
|
||||
location,
|
||||
...(baseURL && { baseURL })
|
||||
});
|
||||
} catch (error) {
|
||||
this.handleError('client initialization', error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle errors from Vertex AI
|
||||
* @param {string} operation - Description of the operation that failed
|
||||
* @param {Error} error - The error object
|
||||
* @throws {Error} Rethrows the error with additional context
|
||||
*/
|
||||
handleError(operation, error) {
|
||||
log('error', `Vertex AI ${operation} error:`, error);
|
||||
|
||||
// Handle known error types
|
||||
if (
|
||||
error.name === 'VertexAuthError' ||
|
||||
error.name === 'VertexConfigError' ||
|
||||
error.name === 'VertexApiError'
|
||||
) {
|
||||
throw error;
|
||||
}
|
||||
|
||||
// Handle network/API errors
|
||||
if (error.response) {
|
||||
const statusCode = error.response.status;
|
||||
const errorMessage = error.response.data?.error?.message || error.message;
|
||||
|
||||
// Categorize by status code
|
||||
if (statusCode === 401 || statusCode === 403) {
|
||||
throw new VertexAuthError(`Authentication failed: ${errorMessage}`);
|
||||
} else if (statusCode === 400) {
|
||||
throw new VertexConfigError(`Invalid request: ${errorMessage}`);
|
||||
} else {
|
||||
throw new VertexApiError(
|
||||
`API error (${statusCode}): ${errorMessage}`,
|
||||
statusCode
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Generic error handling
|
||||
throw new Error(`Vertex AI ${operation} failed: ${error.message}`);
|
||||
}
|
||||
}
|
||||
@@ -1,181 +1,39 @@
|
||||
/**
|
||||
* google.js
|
||||
* AI provider implementation for Google AI models (e.g., Gemini) using Vercel AI SDK.
|
||||
* AI provider implementation for Google AI models using Vercel AI SDK.
|
||||
*/
|
||||
|
||||
// import { GoogleGenerativeAI } from '@ai-sdk/google'; // Incorrect import
|
||||
import { createGoogleGenerativeAI } from '@ai-sdk/google'; // Correct import for customization
|
||||
import { generateText, streamText, generateObject } from 'ai'; // Import from main 'ai' package
|
||||
import { log } from '../../scripts/modules/utils.js'; // Import logging utility
|
||||
import { createGoogleGenerativeAI } from '@ai-sdk/google';
|
||||
import { BaseAIProvider } from './base-provider.js';
|
||||
|
||||
// Consider making model configurable via config-manager.js later
|
||||
const DEFAULT_MODEL = 'gemini-2.5-pro-exp-03-25'; // Or a suitable default
|
||||
const DEFAULT_TEMPERATURE = 0.2; // Or a suitable default
|
||||
|
||||
function getClient(apiKey, baseUrl) {
|
||||
if (!apiKey) {
|
||||
throw new Error('Google API key is required.');
|
||||
export class GoogleAIProvider extends BaseAIProvider {
|
||||
constructor() {
|
||||
super();
|
||||
this.name = 'Google';
|
||||
}
|
||||
return createGoogleGenerativeAI({
|
||||
apiKey: apiKey,
|
||||
...(baseUrl && { baseURL: baseUrl })
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates text using a Google AI model.
|
||||
*
|
||||
* @param {object} params - Parameters for the generation.
|
||||
* @param {string} params.apiKey - Google API Key.
|
||||
* @param {string} params.modelId - Specific model ID to use (overrides default).
|
||||
* @param {number} params.temperature - Generation temperature.
|
||||
* @param {Array<object>} params.messages - The conversation history (system/user prompts).
|
||||
* @param {number} [params.maxTokens] - Optional max tokens.
|
||||
* @returns {Promise<string>} The generated text content.
|
||||
* @throws {Error} If API key is missing or API call fails.
|
||||
*/
|
||||
async function generateGoogleText({
|
||||
apiKey,
|
||||
modelId = DEFAULT_MODEL,
|
||||
temperature = DEFAULT_TEMPERATURE,
|
||||
messages,
|
||||
maxTokens,
|
||||
baseUrl
|
||||
}) {
|
||||
if (!apiKey) {
|
||||
throw new Error('Google API key is required.');
|
||||
}
|
||||
log('info', `Generating text with Google model: ${modelId}`);
|
||||
/**
|
||||
* Creates and returns a Google AI client instance.
|
||||
* @param {object} params - Parameters for client initialization
|
||||
* @param {string} params.apiKey - Google API key
|
||||
* @param {string} [params.baseURL] - Optional custom API endpoint
|
||||
* @returns {Function} Google AI client function
|
||||
* @throws {Error} If API key is missing or initialization fails
|
||||
*/
|
||||
getClient(params) {
|
||||
try {
|
||||
const { apiKey, baseURL } = params;
|
||||
|
||||
try {
|
||||
const googleProvider = getClient(apiKey, baseUrl);
|
||||
const model = googleProvider(modelId);
|
||||
const result = await generateText({
|
||||
model,
|
||||
messages,
|
||||
temperature,
|
||||
maxOutputTokens: maxTokens
|
||||
});
|
||||
|
||||
// Assuming result structure provides text directly or within a property
|
||||
// return result.text; // Adjust based on actual SDK response
|
||||
// Return both text and usage
|
||||
return {
|
||||
text: result.text,
|
||||
usage: {
|
||||
inputTokens: result.usage.promptTokens,
|
||||
outputTokens: result.usage.completionTokens
|
||||
if (!apiKey) {
|
||||
throw new Error('Google API key is required.');
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
`Error generating text with Google (${modelId}): ${error.message}`
|
||||
);
|
||||
throw error;
|
||||
|
||||
return createGoogleGenerativeAI({
|
||||
apiKey,
|
||||
...(baseURL && { baseURL })
|
||||
});
|
||||
} catch (error) {
|
||||
this.handleError('client initialization', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Streams text using a Google AI model.
|
||||
*
|
||||
* @param {object} params - Parameters for the streaming.
|
||||
* @param {string} params.apiKey - Google API Key.
|
||||
* @param {string} params.modelId - Specific model ID to use (overrides default).
|
||||
* @param {number} params.temperature - Generation temperature.
|
||||
* @param {Array<object>} params.messages - The conversation history.
|
||||
* @param {number} [params.maxTokens] - Optional max tokens.
|
||||
* @returns {Promise<ReadableStream>} A readable stream of text deltas.
|
||||
* @throws {Error} If API key is missing or API call fails.
|
||||
*/
|
||||
async function streamGoogleText({
|
||||
apiKey,
|
||||
modelId = DEFAULT_MODEL,
|
||||
temperature = DEFAULT_TEMPERATURE,
|
||||
messages,
|
||||
maxTokens,
|
||||
baseUrl
|
||||
}) {
|
||||
if (!apiKey) {
|
||||
throw new Error('Google API key is required.');
|
||||
}
|
||||
log('info', `Streaming text with Google model: ${modelId}`);
|
||||
|
||||
try {
|
||||
const googleProvider = getClient(apiKey, baseUrl);
|
||||
const model = googleProvider(modelId);
|
||||
const stream = await streamText({
|
||||
model,
|
||||
messages,
|
||||
temperature,
|
||||
maxOutputTokens: maxTokens
|
||||
});
|
||||
return stream;
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
`Error streaming text with Google (${modelId}): ${error.message}`
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a structured object using a Google AI model.
|
||||
*
|
||||
* @param {object} params - Parameters for the object generation.
|
||||
* @param {string} params.apiKey - Google API Key.
|
||||
* @param {string} params.modelId - Specific model ID to use (overrides default).
|
||||
* @param {number} params.temperature - Generation temperature.
|
||||
* @param {Array<object>} params.messages - The conversation history.
|
||||
* @param {import('zod').ZodSchema} params.schema - Zod schema for the expected object.
|
||||
* @param {string} params.objectName - Name for the object generation context.
|
||||
* @param {number} [params.maxTokens] - Optional max tokens.
|
||||
* @returns {Promise<object>} The generated object matching the schema.
|
||||
* @throws {Error} If API key is missing or API call fails.
|
||||
*/
|
||||
async function generateGoogleObject({
|
||||
apiKey,
|
||||
modelId = DEFAULT_MODEL,
|
||||
temperature = DEFAULT_TEMPERATURE,
|
||||
messages,
|
||||
schema,
|
||||
objectName, // Note: Vercel SDK might use this differently or not at all
|
||||
maxTokens,
|
||||
baseUrl
|
||||
}) {
|
||||
if (!apiKey) {
|
||||
throw new Error('Google API key is required.');
|
||||
}
|
||||
log('info', `Generating object with Google model: ${modelId}`);
|
||||
|
||||
try {
|
||||
const googleProvider = getClient(apiKey, baseUrl);
|
||||
const model = googleProvider(modelId);
|
||||
const result = await generateObject({
|
||||
model,
|
||||
schema,
|
||||
messages,
|
||||
temperature,
|
||||
maxOutputTokens: maxTokens
|
||||
});
|
||||
|
||||
// return object; // Return the parsed object
|
||||
// Return both object and usage
|
||||
return {
|
||||
object: result.object,
|
||||
usage: {
|
||||
inputTokens: result.usage.promptTokens,
|
||||
outputTokens: result.usage.completionTokens
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
`Error generating object with Google (${modelId}): ${error.message}`
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
export { generateGoogleText, streamGoogleText, generateGoogleObject };
|
||||
|
||||
15
src/ai-providers/index.js
Normal file
15
src/ai-providers/index.js
Normal file
@@ -0,0 +1,15 @@
|
||||
/**
|
||||
* src/ai-providers/index.js
|
||||
* Central export point for all AI provider classes
|
||||
*/
|
||||
|
||||
export { AnthropicAIProvider } from './anthropic.js';
|
||||
export { PerplexityAIProvider } from './perplexity.js';
|
||||
export { GoogleAIProvider } from './google.js';
|
||||
export { OpenAIProvider } from './openai.js';
|
||||
export { XAIProvider } from './xai.js';
|
||||
export { OpenRouterAIProvider } from './openrouter.js';
|
||||
export { OllamaAIProvider } from './ollama.js';
|
||||
export { BedrockAIProvider } from './bedrock.js';
|
||||
export { AzureProvider } from './azure.js';
|
||||
export { VertexAIProvider } from './google-vertex.js';
|
||||
@@ -4,160 +4,39 @@
|
||||
*/
|
||||
|
||||
import { createOllama } from 'ollama-ai-provider';
|
||||
import { log } from '../../scripts/modules/utils.js'; // Import logging utility
|
||||
import { generateObject, generateText, streamText } from 'ai';
|
||||
import { BaseAIProvider } from './base-provider.js';
|
||||
|
||||
// Consider making model configurable via config-manager.js later
|
||||
const DEFAULT_MODEL = 'llama3'; // Or a suitable default for Ollama
|
||||
const DEFAULT_TEMPERATURE = 0.2;
|
||||
export class OllamaAIProvider extends BaseAIProvider {
|
||||
constructor() {
|
||||
super();
|
||||
this.name = 'Ollama';
|
||||
}
|
||||
|
||||
function getClient(baseUrl) {
|
||||
// baseUrl is optional, defaults to http://localhost:11434
|
||||
return createOllama({
|
||||
baseUrl: baseUrl || undefined
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Override auth validation - Ollama doesn't require API keys
|
||||
* @param {object} params - Parameters to validate
|
||||
*/
|
||||
validateAuth(_params) {
|
||||
// Ollama runs locally and doesn't require API keys
|
||||
// No authentication validation needed
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates text using an Ollama model.
|
||||
*
|
||||
* @param {object} params - Parameters for the generation.
|
||||
* @param {string} params.modelId - Specific model ID to use (overrides default).
|
||||
* @param {number} params.temperature - Generation temperature.
|
||||
* @param {Array<object>} params.messages - The conversation history (system/user prompts).
|
||||
* @param {number} [params.maxTokens] - Optional max tokens.
|
||||
* @param {string} [params.baseUrl] - Optional Ollama base URL.
|
||||
* @returns {Promise<string>} The generated text content.
|
||||
* @throws {Error} If API call fails.
|
||||
*/
|
||||
async function generateOllamaText({
|
||||
modelId = DEFAULT_MODEL,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature = DEFAULT_TEMPERATURE,
|
||||
baseUrl
|
||||
}) {
|
||||
log('info', `Generating text with Ollama model: ${modelId}`);
|
||||
/**
|
||||
* Creates and returns an Ollama client instance.
|
||||
* @param {object} params - Parameters for client initialization
|
||||
* @param {string} [params.baseURL] - Optional Ollama base URL (defaults to http://localhost:11434)
|
||||
* @returns {Function} Ollama client function
|
||||
* @throws {Error} If initialization fails
|
||||
*/
|
||||
getClient(params) {
|
||||
try {
|
||||
const { baseURL } = params;
|
||||
|
||||
try {
|
||||
const client = getClient(baseUrl);
|
||||
const result = await generateText({
|
||||
model: client(modelId),
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature
|
||||
});
|
||||
log('debug', `Ollama generated text: ${result.text}`);
|
||||
return {
|
||||
text: result.text,
|
||||
usage: {
|
||||
inputTokens: result.usage.promptTokens,
|
||||
outputTokens: result.usage.completionTokens
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
`Error generating text with Ollama (${modelId}): ${error.message}`
|
||||
);
|
||||
throw error;
|
||||
return createOllama({
|
||||
...(baseURL && { baseURL })
|
||||
});
|
||||
} catch (error) {
|
||||
this.handleError('client initialization', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Streams text using an Ollama model.
|
||||
*
|
||||
* @param {object} params - Parameters for the streaming.
|
||||
* @param {string} params.modelId - Specific model ID to use (overrides default).
|
||||
* @param {number} params.temperature - Generation temperature.
|
||||
* @param {Array<object>} params.messages - The conversation history.
|
||||
* @param {number} [params.maxTokens] - Optional max tokens.
|
||||
* @param {string} [params.baseUrl] - Optional Ollama base URL.
|
||||
* @returns {Promise<ReadableStream>} A readable stream of text deltas.
|
||||
* @throws {Error} If API call fails.
|
||||
*/
|
||||
async function streamOllamaText({
|
||||
modelId = DEFAULT_MODEL,
|
||||
temperature = DEFAULT_TEMPERATURE,
|
||||
messages,
|
||||
maxTokens,
|
||||
baseUrl
|
||||
}) {
|
||||
log('info', `Streaming text with Ollama model: ${modelId}`);
|
||||
|
||||
try {
|
||||
const ollama = getClient(baseUrl);
|
||||
const stream = await streamText({
|
||||
model: modelId,
|
||||
messages,
|
||||
temperature,
|
||||
maxTokens
|
||||
});
|
||||
return stream;
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
`Error streaming text with Ollama (${modelId}): ${error.message}`
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a structured object using an Ollama model using the Vercel AI SDK's generateObject.
|
||||
*
|
||||
* @param {object} params - Parameters for the object generation.
|
||||
* @param {string} params.modelId - Specific model ID to use (overrides default).
|
||||
* @param {number} params.temperature - Generation temperature.
|
||||
* @param {Array<object>} params.messages - The conversation history.
|
||||
* @param {import('zod').ZodSchema} params.schema - Zod schema for the expected object.
|
||||
* @param {string} params.objectName - Name for the object generation context.
|
||||
* @param {number} [params.maxTokens] - Optional max tokens.
|
||||
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
||||
* @param {string} [params.baseUrl] - Optional Ollama base URL.
|
||||
* @returns {Promise<object>} The generated object matching the schema.
|
||||
* @throws {Error} If generation or validation fails.
|
||||
*/
|
||||
async function generateOllamaObject({
|
||||
modelId = DEFAULT_MODEL,
|
||||
temperature = DEFAULT_TEMPERATURE,
|
||||
messages,
|
||||
schema,
|
||||
objectName = 'generated_object',
|
||||
maxTokens,
|
||||
maxRetries = 3,
|
||||
baseUrl
|
||||
}) {
|
||||
log('info', `Generating object with Ollama model: ${modelId}`);
|
||||
try {
|
||||
const ollama = getClient(baseUrl);
|
||||
const result = await generateObject({
|
||||
model: ollama(modelId),
|
||||
mode: 'tool',
|
||||
schema: schema,
|
||||
messages: messages,
|
||||
tool: {
|
||||
name: objectName,
|
||||
description: `Generate a ${objectName} based on the prompt.`
|
||||
},
|
||||
maxOutputTokens: maxTokens,
|
||||
temperature: temperature,
|
||||
maxRetries: maxRetries
|
||||
});
|
||||
return {
|
||||
object: result.object,
|
||||
usage: {
|
||||
inputTokens: result.usage.promptTokens,
|
||||
outputTokens: result.usage.completionTokens
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
`Ollama generateObject ('${objectName}') failed: ${error.message}`
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
export { generateOllamaText, streamOllamaText, generateOllamaObject };
|
||||
|
||||
@@ -1,199 +1,39 @@
|
||||
import { createOpenAI } from '@ai-sdk/openai'; // Using openai provider from Vercel AI SDK
|
||||
import { generateObject, generateText } from 'ai'; // Import necessary functions from 'ai'
|
||||
import { log } from '../../scripts/modules/utils.js';
|
||||
|
||||
function getClient(apiKey, baseUrl) {
|
||||
if (!apiKey) {
|
||||
throw new Error('OpenAI API key is required.');
|
||||
}
|
||||
return createOpenAI({
|
||||
apiKey: apiKey,
|
||||
...(baseUrl && { baseURL: baseUrl })
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates text using OpenAI models via Vercel AI SDK.
|
||||
*
|
||||
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature, baseUrl.
|
||||
* @returns {Promise<object>} The generated text content and usage.
|
||||
* @throws {Error} If API call fails.
|
||||
* openai.js
|
||||
* AI provider implementation for OpenAI models using Vercel AI SDK.
|
||||
*/
|
||||
export async function generateOpenAIText(params) {
|
||||
const { apiKey, modelId, messages, maxTokens, temperature, baseUrl } = params;
|
||||
log('debug', `generateOpenAIText called with model: ${modelId}`);
|
||||
|
||||
if (!apiKey) {
|
||||
throw new Error('OpenAI API key is required.');
|
||||
}
|
||||
if (!modelId) {
|
||||
throw new Error('OpenAI Model ID is required.');
|
||||
}
|
||||
if (!messages || !Array.isArray(messages) || messages.length === 0) {
|
||||
throw new Error('Invalid or empty messages array provided for OpenAI.');
|
||||
import { createOpenAI } from '@ai-sdk/openai';
|
||||
import { BaseAIProvider } from './base-provider.js';
|
||||
|
||||
export class OpenAIProvider extends BaseAIProvider {
|
||||
constructor() {
|
||||
super();
|
||||
this.name = 'OpenAI';
|
||||
}
|
||||
|
||||
const openaiClient = getClient(apiKey, baseUrl);
|
||||
/**
|
||||
* Creates and returns an OpenAI client instance.
|
||||
* @param {object} params - Parameters for client initialization
|
||||
* @param {string} params.apiKey - OpenAI API key
|
||||
* @param {string} [params.baseURL] - Optional custom API endpoint
|
||||
* @returns {Function} OpenAI client function
|
||||
* @throws {Error} If API key is missing or initialization fails
|
||||
*/
|
||||
getClient(params) {
|
||||
try {
|
||||
const { apiKey, baseURL } = params;
|
||||
|
||||
try {
|
||||
const result = await generateText({
|
||||
model: openaiClient(modelId),
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature
|
||||
});
|
||||
|
||||
if (!result || !result.text) {
|
||||
log(
|
||||
'warn',
|
||||
'OpenAI generateText response did not contain expected content.',
|
||||
{ result }
|
||||
);
|
||||
throw new Error('Failed to extract content from OpenAI response.');
|
||||
}
|
||||
log(
|
||||
'debug',
|
||||
`OpenAI generateText completed successfully for model: ${modelId}`
|
||||
);
|
||||
return {
|
||||
text: result.text.trim(),
|
||||
usage: {
|
||||
inputTokens: result.usage.promptTokens,
|
||||
outputTokens: result.usage.completionTokens
|
||||
if (!apiKey) {
|
||||
throw new Error('OpenAI API key is required.');
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
`Error in generateOpenAIText (Model: ${modelId}): ${error.message}`,
|
||||
{ error }
|
||||
);
|
||||
throw new Error(
|
||||
`OpenAI API error during text generation: ${error.message}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Streams text using OpenAI models via Vercel AI SDK.
|
||||
*
|
||||
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature, baseUrl.
|
||||
* @returns {Promise<ReadableStream>} A readable stream of text deltas.
|
||||
* @throws {Error} If API call fails.
|
||||
*/
|
||||
export async function streamOpenAIText(params) {
|
||||
const { apiKey, modelId, messages, maxTokens, temperature, baseUrl } = params;
|
||||
log('debug', `streamOpenAIText called with model: ${modelId}`);
|
||||
|
||||
if (!apiKey) {
|
||||
throw new Error('OpenAI API key is required.');
|
||||
}
|
||||
if (!modelId) {
|
||||
throw new Error('OpenAI Model ID is required.');
|
||||
}
|
||||
if (!messages || !Array.isArray(messages) || messages.length === 0) {
|
||||
throw new Error(
|
||||
'Invalid or empty messages array provided for OpenAI streaming.'
|
||||
);
|
||||
}
|
||||
|
||||
const openaiClient = getClient(apiKey, baseUrl);
|
||||
|
||||
try {
|
||||
const stream = await openaiClient.chat.stream(messages, {
|
||||
model: modelId,
|
||||
max_tokens: maxTokens,
|
||||
temperature
|
||||
});
|
||||
|
||||
log(
|
||||
'debug',
|
||||
`OpenAI streamText initiated successfully for model: ${modelId}`
|
||||
);
|
||||
return stream;
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
`Error initiating OpenAI stream (Model: ${modelId}): ${error.message}`,
|
||||
{ error }
|
||||
);
|
||||
throw new Error(
|
||||
`OpenAI API error during streaming initiation: ${error.message}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates structured objects using OpenAI models via Vercel AI SDK.
|
||||
*
|
||||
* @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature, baseUrl.
|
||||
* @returns {Promise<object>} The generated object matching the schema and usage.
|
||||
* @throws {Error} If API call fails or object generation fails.
|
||||
*/
|
||||
export async function generateOpenAIObject(params) {
|
||||
const {
|
||||
apiKey,
|
||||
modelId,
|
||||
messages,
|
||||
schema,
|
||||
objectName,
|
||||
maxTokens,
|
||||
temperature,
|
||||
baseUrl
|
||||
} = params;
|
||||
log(
|
||||
'debug',
|
||||
`generateOpenAIObject called with model: ${modelId}, object: ${objectName}`
|
||||
);
|
||||
|
||||
if (!apiKey) throw new Error('OpenAI API key is required.');
|
||||
if (!modelId) throw new Error('OpenAI Model ID is required.');
|
||||
if (!messages || !Array.isArray(messages) || messages.length === 0)
|
||||
throw new Error('Invalid messages array for OpenAI object generation.');
|
||||
if (!schema)
|
||||
throw new Error('Schema is required for OpenAI object generation.');
|
||||
if (!objectName)
|
||||
throw new Error('Object name is required for OpenAI object generation.');
|
||||
|
||||
const openaiClient = getClient(apiKey, baseUrl);
|
||||
|
||||
try {
|
||||
const result = await generateObject({
|
||||
model: openaiClient(modelId),
|
||||
schema: schema,
|
||||
messages: messages,
|
||||
mode: 'tool',
|
||||
maxTokens: maxTokens,
|
||||
temperature: temperature
|
||||
});
|
||||
|
||||
log(
|
||||
'debug',
|
||||
`OpenAI generateObject completed successfully for model: ${modelId}`
|
||||
);
|
||||
if (!result || typeof result.object === 'undefined') {
|
||||
log(
|
||||
'warn',
|
||||
'OpenAI generateObject response did not contain expected object.',
|
||||
{ result }
|
||||
);
|
||||
throw new Error('Failed to extract object from OpenAI response.');
|
||||
return createOpenAI({
|
||||
apiKey,
|
||||
...(baseURL && { baseURL })
|
||||
});
|
||||
} catch (error) {
|
||||
this.handleError('client initialization', error);
|
||||
}
|
||||
return {
|
||||
object: result.object,
|
||||
usage: {
|
||||
inputTokens: result.usage.promptTokens,
|
||||
outputTokens: result.usage.completionTokens
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
`Error in generateOpenAIObject (Model: ${modelId}, Object: ${objectName}): ${error.message}`,
|
||||
{ error }
|
||||
);
|
||||
throw new Error(
|
||||
`OpenAI API error during object generation: ${error.message}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,246 +1,39 @@
|
||||
/**
|
||||
* openrouter.js
|
||||
* AI provider implementation for OpenRouter models using Vercel AI SDK.
|
||||
*/
|
||||
|
||||
import { createOpenRouter } from '@openrouter/ai-sdk-provider';
|
||||
import { generateText, streamText, generateObject } from 'ai';
|
||||
import { log } from '../../scripts/modules/utils.js'; // Assuming utils.js is in scripts/modules
|
||||
import { BaseAIProvider } from './base-provider.js';
|
||||
|
||||
function getClient(apiKey, baseUrl) {
|
||||
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
||||
return createOpenRouter({
|
||||
apiKey,
|
||||
...(baseUrl && { baseURL: baseUrl })
|
||||
});
|
||||
}
|
||||
export class OpenRouterAIProvider extends BaseAIProvider {
|
||||
constructor() {
|
||||
super();
|
||||
this.name = 'OpenRouter';
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates text using an OpenRouter chat model.
|
||||
*
|
||||
* @param {object} params - Parameters for the text generation.
|
||||
* @param {string} params.apiKey - OpenRouter API key.
|
||||
* @param {string} params.modelId - The OpenRouter model ID (e.g., 'anthropic/claude-3.5-sonnet').
|
||||
* @param {Array<object>} params.messages - Array of message objects (system, user, assistant).
|
||||
* @param {number} [params.maxTokens] - Maximum tokens to generate.
|
||||
* @param {number} [params.temperature] - Sampling temperature.
|
||||
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
|
||||
* @returns {Promise<string>} The generated text content.
|
||||
* @throws {Error} If the API call fails.
|
||||
*/
|
||||
async function generateOpenRouterText({
|
||||
apiKey,
|
||||
modelId,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature,
|
||||
baseUrl,
|
||||
...rest // Capture any other Vercel AI SDK compatible parameters
|
||||
}) {
|
||||
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
||||
if (!modelId) throw new Error('OpenRouter model ID is required.');
|
||||
if (!messages || messages.length === 0)
|
||||
throw new Error('Messages array cannot be empty.');
|
||||
/**
|
||||
* Creates and returns an OpenRouter client instance.
|
||||
* @param {object} params - Parameters for client initialization
|
||||
* @param {string} params.apiKey - OpenRouter API key
|
||||
* @param {string} [params.baseURL] - Optional custom API endpoint
|
||||
* @returns {Function} OpenRouter client function
|
||||
* @throws {Error} If API key is missing or initialization fails
|
||||
*/
|
||||
getClient(params) {
|
||||
try {
|
||||
const { apiKey, baseURL } = params;
|
||||
|
||||
try {
|
||||
const openrouter = getClient(apiKey, baseUrl);
|
||||
const model = openrouter.chat(modelId); // Assuming chat model
|
||||
|
||||
// Capture the full result from generateText
|
||||
const result = await generateText({
|
||||
model,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature,
|
||||
...rest // Pass any additional parameters
|
||||
});
|
||||
|
||||
// Check if text and usage are present
|
||||
if (!result || typeof result.text !== 'string') {
|
||||
log(
|
||||
'warn',
|
||||
`OpenRouter generateText for model ${modelId} did not return expected text.`,
|
||||
{ result }
|
||||
);
|
||||
throw new Error('Failed to extract text from OpenRouter response.');
|
||||
}
|
||||
if (!result.usage) {
|
||||
log(
|
||||
'warn',
|
||||
`OpenRouter generateText for model ${modelId} did not return usage data.`,
|
||||
{ result }
|
||||
);
|
||||
// Decide if this is critical. For now, let it pass but telemetry will be incomplete.
|
||||
}
|
||||
|
||||
log('debug', `OpenRouter generateText completed for model ${modelId}`);
|
||||
// Return text and usage
|
||||
return {
|
||||
text: result.text,
|
||||
usage: {
|
||||
inputTokens: result.usage.promptTokens,
|
||||
outputTokens: result.usage.completionTokens
|
||||
if (!apiKey) {
|
||||
throw new Error('OpenRouter API key is required.');
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
let detailedMessage = `OpenRouter generateText failed for model ${modelId}: ${error.message}`;
|
||||
if (error.cause) {
|
||||
detailedMessage += `\n\nCause:\n\n ${typeof error.cause === 'string' ? error.cause : JSON.stringify(error.cause)}`;
|
||||
|
||||
return createOpenRouter({
|
||||
apiKey,
|
||||
...(baseURL && { baseURL })
|
||||
});
|
||||
} catch (error) {
|
||||
this.handleError('client initialization', error);
|
||||
}
|
||||
// Vercel AI SDK sometimes wraps the actual API error response in error.data
|
||||
if (error.data) {
|
||||
detailedMessage += `\n\nData:\n\n ${JSON.stringify(error.data)}`;
|
||||
}
|
||||
// Log the original error object for full context if needed for deeper debugging
|
||||
log('error', detailedMessage, { originalErrorObject: error });
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Streams text using an OpenRouter chat model.
|
||||
*
|
||||
* @param {object} params - Parameters for the text streaming.
|
||||
* @param {string} params.apiKey - OpenRouter API key.
|
||||
* @param {string} params.modelId - The OpenRouter model ID (e.g., 'anthropic/claude-3.5-sonnet').
|
||||
* @param {Array<object>} params.messages - Array of message objects (system, user, assistant).
|
||||
* @param {number} [params.maxTokens] - Maximum tokens to generate.
|
||||
* @param {number} [params.temperature] - Sampling temperature.
|
||||
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
|
||||
* @returns {Promise<ReadableStream<string>>} A readable stream of text deltas.
|
||||
* @throws {Error} If the API call fails.
|
||||
*/
|
||||
async function streamOpenRouterText({
|
||||
apiKey,
|
||||
modelId,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature,
|
||||
baseUrl,
|
||||
...rest
|
||||
}) {
|
||||
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
||||
if (!modelId) throw new Error('OpenRouter model ID is required.');
|
||||
if (!messages || messages.length === 0)
|
||||
throw new Error('Messages array cannot be empty.');
|
||||
|
||||
try {
|
||||
const openrouter = getClient(apiKey, baseUrl);
|
||||
const model = openrouter.chat(modelId);
|
||||
|
||||
// Directly return the stream from the Vercel AI SDK function
|
||||
const stream = await streamText({
|
||||
model,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature,
|
||||
...rest
|
||||
});
|
||||
return stream;
|
||||
} catch (error) {
|
||||
let detailedMessage = `OpenRouter streamText failed for model ${modelId}: ${error.message}`;
|
||||
if (error.cause) {
|
||||
detailedMessage += `\n\nCause:\n\n ${typeof error.cause === 'string' ? error.cause : JSON.stringify(error.cause)}`;
|
||||
}
|
||||
if (error.data) {
|
||||
detailedMessage += `\n\nData:\n\n ${JSON.stringify(error.data)}`;
|
||||
}
|
||||
log('error', detailedMessage, { originalErrorObject: error });
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a structured object using an OpenRouter chat model.
|
||||
*
|
||||
* @param {object} params - Parameters for object generation.
|
||||
* @param {string} params.apiKey - OpenRouter API key.
|
||||
* @param {string} params.modelId - The OpenRouter model ID.
|
||||
* @param {import('zod').ZodSchema} params.schema - The Zod schema for the expected object.
|
||||
* @param {Array<object>} params.messages - Array of message objects.
|
||||
* @param {string} [params.objectName='generated_object'] - Name for object/tool.
|
||||
* @param {number} [params.maxRetries=3] - Max retries for object generation.
|
||||
* @param {number} [params.maxTokens] - Maximum tokens.
|
||||
* @param {number} [params.temperature] - Temperature.
|
||||
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
|
||||
* @returns {Promise<object>} The generated object matching the schema.
|
||||
* @throws {Error} If the API call fails or validation fails.
|
||||
*/
|
||||
async function generateOpenRouterObject({
|
||||
apiKey,
|
||||
modelId,
|
||||
schema,
|
||||
messages,
|
||||
objectName = 'generated_object',
|
||||
maxRetries = 3,
|
||||
maxTokens,
|
||||
temperature,
|
||||
baseUrl,
|
||||
...rest
|
||||
}) {
|
||||
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
||||
if (!modelId) throw new Error('OpenRouter model ID is required.');
|
||||
if (!schema) throw new Error('Zod schema is required for object generation.');
|
||||
if (!messages || messages.length === 0)
|
||||
throw new Error('Messages array cannot be empty.');
|
||||
|
||||
try {
|
||||
const openrouter = getClient(apiKey, baseUrl);
|
||||
const model = openrouter.chat(modelId);
|
||||
|
||||
// Capture the full result from generateObject
|
||||
const result = await generateObject({
|
||||
model,
|
||||
schema,
|
||||
mode: 'tool',
|
||||
tool: {
|
||||
name: objectName,
|
||||
description: `Generate an object conforming to the ${objectName} schema.`,
|
||||
parameters: schema
|
||||
},
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature,
|
||||
maxRetries,
|
||||
...rest
|
||||
});
|
||||
|
||||
// Check if object and usage are present
|
||||
if (!result || typeof result.object === 'undefined') {
|
||||
log(
|
||||
'warn',
|
||||
`OpenRouter generateObject for model ${modelId} did not return expected object.`,
|
||||
{ result }
|
||||
);
|
||||
throw new Error('Failed to extract object from OpenRouter response.');
|
||||
}
|
||||
if (!result.usage) {
|
||||
log(
|
||||
'warn',
|
||||
`OpenRouter generateObject for model ${modelId} did not return usage data.`,
|
||||
{ result }
|
||||
);
|
||||
}
|
||||
|
||||
log('debug', `OpenRouter generateObject completed for model ${modelId}`);
|
||||
// Return object and usage
|
||||
return {
|
||||
object: result.object,
|
||||
usage: {
|
||||
inputTokens: result.usage.promptTokens,
|
||||
outputTokens: result.usage.completionTokens
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
let detailedMessage = `OpenRouter generateObject failed for model ${modelId}: ${error.message}`;
|
||||
if (error.cause) {
|
||||
detailedMessage += `\n\nCause:\n\n ${typeof error.cause === 'string' ? error.cause : JSON.stringify(error.cause)}`;
|
||||
}
|
||||
if (error.data) {
|
||||
detailedMessage += `\n\nData:\n\n ${JSON.stringify(error.data)}`;
|
||||
}
|
||||
log('error', detailedMessage, { originalErrorObject: error });
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
export {
|
||||
generateOpenRouterText,
|
||||
streamOpenRouterText,
|
||||
generateOpenRouterObject
|
||||
};
|
||||
|
||||
@@ -1,181 +1,39 @@
|
||||
/**
|
||||
* src/ai-providers/perplexity.js
|
||||
*
|
||||
* Implementation for interacting with Perplexity models
|
||||
* using the Vercel AI SDK.
|
||||
* perplexity.js
|
||||
* AI provider implementation for Perplexity models using Vercel AI SDK.
|
||||
*/
|
||||
|
||||
import { createPerplexity } from '@ai-sdk/perplexity';
|
||||
import { generateText, streamText, generateObject, streamObject } from 'ai';
|
||||
import { log } from '../../scripts/modules/utils.js';
|
||||
import { BaseAIProvider } from './base-provider.js';
|
||||
|
||||
// --- Client Instantiation ---
|
||||
// Similar to Anthropic, this expects the resolved API key to be passed in.
|
||||
function getClient(apiKey, baseUrl) {
|
||||
if (!apiKey) {
|
||||
throw new Error('Perplexity API key is required.');
|
||||
export class PerplexityAIProvider extends BaseAIProvider {
|
||||
constructor() {
|
||||
super();
|
||||
this.name = 'Perplexity';
|
||||
}
|
||||
return createPerplexity({
|
||||
apiKey: apiKey,
|
||||
...(baseUrl && { baseURL: baseUrl })
|
||||
});
|
||||
}
|
||||
|
||||
// --- Standardized Service Function Implementations ---
|
||||
/**
|
||||
* Creates and returns a Perplexity client instance.
|
||||
* @param {object} params - Parameters for client initialization
|
||||
* @param {string} params.apiKey - Perplexity API key
|
||||
* @param {string} [params.baseURL] - Optional custom API endpoint
|
||||
* @returns {Function} Perplexity client function
|
||||
* @throws {Error} If API key is missing or initialization fails
|
||||
*/
|
||||
getClient(params) {
|
||||
try {
|
||||
const { apiKey, baseURL } = params;
|
||||
|
||||
/**
|
||||
* Generates text using a Perplexity model.
|
||||
*
|
||||
* @param {object} params - Parameters for the text generation.
|
||||
* @param {string} params.apiKey - The Perplexity API key.
|
||||
* @param {string} params.modelId - The specific Perplexity model ID.
|
||||
* @param {Array<object>} params.messages - The messages array.
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
|
||||
* @returns {Promise<string>} The generated text content.
|
||||
* @throws {Error} If the API call fails.
|
||||
*/
|
||||
export async function generatePerplexityText({
|
||||
apiKey,
|
||||
modelId,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature,
|
||||
baseUrl
|
||||
}) {
|
||||
log('debug', `Generating Perplexity text with model: ${modelId}`);
|
||||
try {
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
const result = await generateText({
|
||||
model: client(modelId),
|
||||
messages: messages,
|
||||
maxTokens: maxTokens,
|
||||
temperature: temperature
|
||||
});
|
||||
log(
|
||||
'debug',
|
||||
`Perplexity generateText result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
||||
);
|
||||
|
||||
return {
|
||||
text: result.text,
|
||||
usage: {
|
||||
inputTokens: result.usage.promptTokens,
|
||||
outputTokens: result.usage.completionTokens
|
||||
if (!apiKey) {
|
||||
throw new Error('Perplexity API key is required.');
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
log('error', `Perplexity generateText failed: ${error.message}`);
|
||||
throw error;
|
||||
|
||||
return createPerplexity({
|
||||
apiKey,
|
||||
baseURL: baseURL || 'https://api.perplexity.ai'
|
||||
});
|
||||
} catch (error) {
|
||||
this.handleError('client initialization', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Streams text using a Perplexity model.
|
||||
*
|
||||
* @param {object} params - Parameters for the text streaming.
|
||||
* @param {string} params.apiKey - The Perplexity API key.
|
||||
* @param {string} params.modelId - The specific Perplexity model ID.
|
||||
* @param {Array<object>} params.messages - The messages array.
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
|
||||
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
||||
* @throws {Error} If the API call fails to initiate the stream.
|
||||
*/
|
||||
export async function streamPerplexityText({
|
||||
apiKey,
|
||||
modelId,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature,
|
||||
baseUrl
|
||||
}) {
|
||||
log('debug', `Streaming Perplexity text with model: ${modelId}`);
|
||||
try {
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
const stream = await streamText({
|
||||
model: client(modelId),
|
||||
messages: messages,
|
||||
maxTokens: maxTokens,
|
||||
temperature: temperature
|
||||
});
|
||||
return stream;
|
||||
} catch (error) {
|
||||
log('error', `Perplexity streamText failed: ${error.message}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a structured object using a Perplexity model.
|
||||
* Note: Perplexity API might not directly support structured object generation
|
||||
* in the same way as OpenAI or Anthropic. This function might need
|
||||
* adjustments or might not be feasible depending on the model's capabilities
|
||||
* and the Vercel AI SDK's support for Perplexity in this context.
|
||||
*
|
||||
* @param {object} params - Parameters for object generation.
|
||||
* @param {string} params.apiKey - The Perplexity API key.
|
||||
* @param {string} params.modelId - The specific Perplexity model ID.
|
||||
* @param {Array<object>} params.messages - The messages array.
|
||||
* @param {import('zod').ZodSchema} params.schema - The Zod schema for the object.
|
||||
* @param {string} params.objectName - A name for the object/tool.
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
||||
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
|
||||
* @returns {Promise<object>} The generated object matching the schema.
|
||||
* @throws {Error} If generation or validation fails or is unsupported.
|
||||
*/
|
||||
export async function generatePerplexityObject({
|
||||
apiKey,
|
||||
modelId,
|
||||
messages,
|
||||
schema,
|
||||
objectName = 'generated_object',
|
||||
maxTokens,
|
||||
temperature,
|
||||
maxRetries = 1,
|
||||
baseUrl
|
||||
}) {
|
||||
log(
|
||||
'debug',
|
||||
`Attempting to generate Perplexity object ('${objectName}') with model: ${modelId}`
|
||||
);
|
||||
log(
|
||||
'warn',
|
||||
'generateObject support for Perplexity might be limited or experimental.'
|
||||
);
|
||||
try {
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
const result = await generateObject({
|
||||
model: client(modelId),
|
||||
schema: schema,
|
||||
messages: messages,
|
||||
maxTokens: maxTokens,
|
||||
temperature: temperature,
|
||||
maxRetries: maxRetries
|
||||
});
|
||||
log(
|
||||
'debug',
|
||||
`Perplexity generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
||||
);
|
||||
return {
|
||||
object: result.object,
|
||||
usage: {
|
||||
inputTokens: result.usage.promptTokens,
|
||||
outputTokens: result.usage.completionTokens
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
`Perplexity generateObject ('${objectName}') failed: ${error.message}`
|
||||
);
|
||||
throw new Error(
|
||||
`Failed to generate object with Perplexity: ${error.message}. Structured output might not be fully supported.`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Implement streamPerplexityObject if needed and feasible.
|
||||
|
||||
@@ -1,178 +1,39 @@
|
||||
/**
|
||||
* src/ai-providers/xai.js
|
||||
*
|
||||
* Implementation for interacting with xAI models (e.g., Grok)
|
||||
* using the Vercel AI SDK.
|
||||
* xai.js
|
||||
* AI provider implementation for xAI models using Vercel AI SDK.
|
||||
*/
|
||||
|
||||
import { createXai } from '@ai-sdk/xai';
|
||||
import { generateText, streamText, generateObject } from 'ai'; // Only import what's used
|
||||
import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible
|
||||
import { BaseAIProvider } from './base-provider.js';
|
||||
|
||||
// --- Client Instantiation ---
|
||||
function getClient(apiKey, baseUrl) {
|
||||
if (!apiKey) {
|
||||
throw new Error('xAI API key is required.');
|
||||
export class XAIProvider extends BaseAIProvider {
|
||||
constructor() {
|
||||
super();
|
||||
this.name = 'xAI';
|
||||
}
|
||||
return createXai({
|
||||
apiKey: apiKey,
|
||||
...(baseUrl && { baseURL: baseUrl })
|
||||
});
|
||||
}
|
||||
|
||||
// --- Standardized Service Function Implementations ---
|
||||
/**
|
||||
* Creates and returns an xAI client instance.
|
||||
* @param {object} params - Parameters for client initialization
|
||||
* @param {string} params.apiKey - xAI API key
|
||||
* @param {string} [params.baseURL] - Optional custom API endpoint
|
||||
* @returns {Function} xAI client function
|
||||
* @throws {Error} If API key is missing or initialization fails
|
||||
*/
|
||||
getClient(params) {
|
||||
try {
|
||||
const { apiKey, baseURL } = params;
|
||||
|
||||
/**
|
||||
* Generates text using an xAI model.
|
||||
*
|
||||
* @param {object} params - Parameters for the text generation.
|
||||
* @param {string} params.apiKey - The xAI API key.
|
||||
* @param {string} params.modelId - The specific xAI model ID (e.g., 'grok-3').
|
||||
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {string} [params.baseUrl] - The base URL for the xAI API.
|
||||
* @returns {Promise<object>} The generated text content and usage.
|
||||
* @throws {Error} If the API call fails.
|
||||
*/
|
||||
export async function generateXaiText({
|
||||
apiKey,
|
||||
modelId,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature,
|
||||
baseUrl
|
||||
}) {
|
||||
log('debug', `Generating xAI text with model: ${modelId}`);
|
||||
try {
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
const result = await generateText({
|
||||
model: client(modelId),
|
||||
messages: messages,
|
||||
maxTokens: maxTokens,
|
||||
temperature: temperature
|
||||
});
|
||||
log(
|
||||
'debug',
|
||||
`xAI generateText result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
||||
);
|
||||
// Return text and usage
|
||||
return {
|
||||
text: result.text,
|
||||
usage: {
|
||||
inputTokens: result.usage.promptTokens,
|
||||
outputTokens: result.usage.completionTokens
|
||||
if (!apiKey) {
|
||||
throw new Error('xAI API key is required.');
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
log('error', `xAI generateText failed: ${error.message}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Streams text using an xAI model.
|
||||
*
|
||||
* @param {object} params - Parameters for the text streaming.
|
||||
* @param {string} params.apiKey - The xAI API key.
|
||||
* @param {string} params.modelId - The specific xAI model ID.
|
||||
* @param {Array<object>} params.messages - The messages array.
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {string} [params.baseUrl] - The base URL for the xAI API.
|
||||
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
||||
* @throws {Error} If the API call fails to initiate the stream.
|
||||
*/
|
||||
export async function streamXaiText({
|
||||
apiKey,
|
||||
modelId,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature,
|
||||
baseUrl
|
||||
}) {
|
||||
log('debug', `Streaming xAI text with model: ${modelId}`);
|
||||
try {
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
const stream = await streamText({
|
||||
model: client(modelId),
|
||||
messages: messages,
|
||||
maxTokens: maxTokens,
|
||||
temperature: temperature
|
||||
});
|
||||
return stream;
|
||||
} catch (error) {
|
||||
log('error', `xAI streamText failed: ${error.message}`, error.stack);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a structured object using an xAI model.
|
||||
* Note: Based on search results, xAI models do not currently support Object Generation.
|
||||
* This function is included for structural consistency but will likely fail if called.
|
||||
*
|
||||
* @param {object} params - Parameters for object generation.
|
||||
* @param {string} params.apiKey - The xAI API key.
|
||||
* @param {string} params.modelId - The specific xAI model ID.
|
||||
* @param {Array<object>} params.messages - The messages array.
|
||||
* @param {import('zod').ZodSchema} params.schema - The Zod schema for the object.
|
||||
* @param {string} params.objectName - A name for the object/tool.
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
||||
* @param {string} [params.baseUrl] - The base URL for the xAI API.
|
||||
* @returns {Promise<object>} The generated object matching the schema and its usage.
|
||||
* @throws {Error} If generation or validation fails.
|
||||
*/
|
||||
export async function generateXaiObject({
|
||||
apiKey,
|
||||
modelId,
|
||||
messages,
|
||||
schema,
|
||||
objectName = 'generated_xai_object',
|
||||
maxTokens,
|
||||
temperature,
|
||||
maxRetries = 3,
|
||||
baseUrl
|
||||
}) {
|
||||
log(
|
||||
'warn',
|
||||
`Attempting to generate xAI object ('${objectName}') with model: ${modelId}. This may not be supported by the provider.`
|
||||
);
|
||||
try {
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
const result = await generateObject({
|
||||
model: client(modelId),
|
||||
// Note: mode might need adjustment if xAI ever supports object generation differently
|
||||
mode: 'tool',
|
||||
schema: schema,
|
||||
messages: messages,
|
||||
tool: {
|
||||
name: objectName,
|
||||
description: `Generate a ${objectName} based on the prompt.`,
|
||||
parameters: schema
|
||||
},
|
||||
maxTokens: maxTokens,
|
||||
temperature: temperature,
|
||||
maxRetries: maxRetries
|
||||
});
|
||||
log(
|
||||
'debug',
|
||||
`xAI generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
||||
);
|
||||
// Return object and usage
|
||||
return {
|
||||
object: result.object,
|
||||
usage: {
|
||||
inputTokens: result.usage.promptTokens,
|
||||
outputTokens: result.usage.completionTokens
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
`xAI generateObject ('${objectName}') failed: ${error.message}. (Likely unsupported by provider)`
|
||||
);
|
||||
throw error;
|
||||
return createXai({
|
||||
apiKey,
|
||||
baseURL: baseURL || 'https://api.x.ai/v1'
|
||||
});
|
||||
} catch (error) {
|
||||
this.handleError('client initialization', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
94
tasks/task_092.txt
Normal file
94
tasks/task_092.txt
Normal file
@@ -0,0 +1,94 @@
|
||||
# Task ID: 92
|
||||
# Title: Implement Project Root Environment Variable Support in MCP Configuration
|
||||
# Status: in-progress
|
||||
# Dependencies: 1, 3, 17
|
||||
# Priority: medium
|
||||
# Description: Add support for a 'TASK_MASTER_PROJECT_ROOT' environment variable in MCP configuration, allowing it to be set in both mcp.json and .env, with precedence over other methods. This will define the root directory for the MCP server and take precedence over all other project root resolution methods. The implementation should be backward compatible with existing workflows that don't use this variable.
|
||||
# Details:
|
||||
Update the MCP server configuration system to support the TASK_MASTER_PROJECT_ROOT environment variable as the standard way to specify the project root directory. This provides better namespacing and avoids conflicts with other tools that might use a generic PROJECT_ROOT variable. Implement a clear precedence order for project root resolution:
|
||||
|
||||
1. TASK_MASTER_PROJECT_ROOT environment variable (from shell or .env file)
|
||||
2. 'projectRoot' key in mcp_config.toml or mcp.json configuration files
|
||||
3. Existing resolution logic (CLI args, current working directory, etc.)
|
||||
|
||||
Modify the configuration loading logic to check for these sources in the specified order, ensuring backward compatibility. All MCP tools and components should use this standardized project root resolution logic. The TASK_MASTER_PROJECT_ROOT environment variable will be required because path resolution is delegated to the MCP client implementation, ensuring consistent behavior across different environments.
|
||||
|
||||
Implementation steps:
|
||||
1. Identify all code locations where project root is determined (initialization, utility functions)
|
||||
2. Update configuration loaders to check for TASK_MASTER_PROJECT_ROOT in environment variables
|
||||
3. Add support for 'projectRoot' in configuration files as a fallback
|
||||
4. Refactor project root resolution logic to follow the new precedence rules
|
||||
5. Ensure all MCP tools and functions use the updated resolution logic
|
||||
6. Add comprehensive error handling for cases where TASK_MASTER_PROJECT_ROOT is not set or invalid
|
||||
7. Implement validation to ensure the specified directory exists and is accessible
|
||||
|
||||
# Test Strategy:
|
||||
1. Write unit tests to verify that the config loader correctly reads project root from environment variables and configuration files with the expected precedence:
|
||||
- Test TASK_MASTER_PROJECT_ROOT environment variable takes precedence when set
|
||||
- Test 'projectRoot' in configuration files is used when environment variable is absent
|
||||
- Test fallback to existing resolution logic when neither is specified
|
||||
|
||||
2. Add integration tests to ensure that the MCP server and all tools use the correct project root:
|
||||
- Test server startup with TASK_MASTER_PROJECT_ROOT set to various valid and invalid paths
|
||||
- Test configuration file loading from the specified project root
|
||||
- Test path resolution for resources relative to the project root
|
||||
|
||||
3. Test backward compatibility:
|
||||
- Verify existing workflows function correctly without the new variables
|
||||
- Ensure no regression in projects not using the new configuration options
|
||||
|
||||
4. Manual testing:
|
||||
- Set TASK_MASTER_PROJECT_ROOT in shell environment and verify correct behavior
|
||||
- Set TASK_MASTER_PROJECT_ROOT in .env file and verify it's properly loaded
|
||||
- Configure 'projectRoot' in configuration files and test precedence
|
||||
- Test with invalid or non-existent directories to verify error handling
|
||||
|
||||
# Subtasks:
|
||||
## 92.1. Update configuration loader to check for TASK_MASTER_PROJECT_ROOT environment variable [pending]
|
||||
### Dependencies: None
|
||||
### Description: Modify the configuration loading system to check for the TASK_MASTER_PROJECT_ROOT environment variable as the primary source for project root directory. Ensure proper error handling if the variable is set but points to a non-existent or inaccessible directory.
|
||||
### Details:
|
||||
|
||||
|
||||
## 92.2. Add support for 'projectRoot' in configuration files [pending]
|
||||
### Dependencies: None
|
||||
### Description: Implement support for a 'projectRoot' key in mcp_config.toml and mcp.json configuration files as a fallback when the environment variable is not set. Update the configuration parser to recognize and validate this field.
|
||||
### Details:
|
||||
|
||||
|
||||
## 92.3. Refactor project root resolution logic with clear precedence rules [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create a unified project root resolution function that follows the precedence order: 1) TASK_MASTER_PROJECT_ROOT environment variable, 2) 'projectRoot' in config files, 3) existing resolution methods. Ensure this function is used consistently throughout the codebase.
|
||||
### Details:
|
||||
|
||||
|
||||
## 92.4. Update all MCP tools to use the new project root resolution [pending]
|
||||
### Dependencies: None
|
||||
### Description: Identify all MCP tools and components that need to access the project root and update them to use the new resolution logic. Ensure consistent behavior across all parts of the system.
|
||||
### Details:
|
||||
|
||||
|
||||
## 92.5. Add comprehensive tests for the new project root resolution [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create unit and integration tests to verify the correct behavior of the project root resolution logic under various configurations and edge cases.
|
||||
### Details:
|
||||
|
||||
|
||||
## 92.6. Update documentation with new configuration options [pending]
|
||||
### Dependencies: None
|
||||
### Description: Update the project documentation to clearly explain the new TASK_MASTER_PROJECT_ROOT environment variable, the 'projectRoot' configuration option, and the precedence rules. Include examples of different configuration scenarios.
|
||||
### Details:
|
||||
|
||||
|
||||
## 92.7. Implement validation for project root directory [pending]
|
||||
### Dependencies: None
|
||||
### Description: Add validation to ensure the specified project root directory exists and has the necessary permissions. Provide clear error messages when validation fails.
|
||||
### Details:
|
||||
|
||||
|
||||
## 92.8. Implement support for loading environment variables from .env files [pending]
|
||||
### Dependencies: None
|
||||
### Description: Add functionality to load the TASK_MASTER_PROJECT_ROOT variable from .env files in the workspace, following best practices for environment variable management in MCP servers.
|
||||
### Details:
|
||||
|
||||
|
||||
55
tasks/task_093.txt
Normal file
55
tasks/task_093.txt
Normal file
@@ -0,0 +1,55 @@
|
||||
# Task ID: 93
|
||||
# Title: Implement Google Vertex AI Provider Integration
|
||||
# Status: pending
|
||||
# Dependencies: 19, 94
|
||||
# Priority: medium
|
||||
# Description: Develop a dedicated Google Vertex AI provider in the codebase, enabling users to leverage Vertex AI models with enterprise-grade configuration and authentication.
|
||||
# Details:
|
||||
1. Create a new provider class in `src/ai-providers/google-vertex.js` that extends the existing BaseAIProvider, following the established structure used by other providers (e.g., google.js, openai.js).
|
||||
2. Integrate the Vercel AI SDK's `@ai-sdk/google-vertex` package. Use the default `vertex` provider for standard usage, and allow for custom configuration via `createVertex` for advanced scenarios (e.g., specifying project ID, location, and credentials).
|
||||
3. Implement all required interface methods (such as `getClient`, `generateText`, etc.) to ensure compatibility with the provider system. Reference the implementation patterns from other providers for consistency.
|
||||
4. Handle Vertex AI-specific configuration, including project ID, location, and Google Cloud authentication. Support both environment-based authentication and explicit service account credentials via `googleAuthOptions`.
|
||||
5. Implement robust error handling for Vertex-specific issues, including authentication failures and API errors, leveraging the system-wide error handling patterns.
|
||||
6. Update `src/ai-providers/index.js` to export the new provider, and add the 'vertex' entry to the PROVIDERS object in `scripts/modules/ai-services-unified.js`.
|
||||
7. Update documentation to provide clear setup instructions for Google Vertex AI, including required environment variables, service account setup, and configuration examples.
|
||||
8. Ensure the implementation is modular and maintainable, supporting future expansion for additional Vertex AI features or models.
|
||||
|
||||
# Test Strategy:
|
||||
- Write unit tests for the new provider class, covering all interface methods and configuration scenarios (default, custom, error cases).
|
||||
- Verify that the provider can successfully authenticate using both environment-based and explicit service account credentials.
|
||||
- Test integration with the provider system by selecting 'vertex' as the provider and generating text using supported Vertex AI models (e.g., Gemini).
|
||||
- Simulate authentication and API errors to confirm robust error handling and user feedback.
|
||||
- Confirm that the provider is correctly exported and available in the PROVIDERS object.
|
||||
- Review and validate the updated documentation for accuracy and completeness.
|
||||
|
||||
# Subtasks:
|
||||
## 1. Create Google Vertex AI Provider Class [pending]
|
||||
### Dependencies: None
|
||||
### Description: Develop a new provider class in `src/ai-providers/google-vertex.js` that extends the BaseAIProvider, following the structure of existing providers.
|
||||
### Details:
|
||||
Ensure the new class is consistent with the architecture of other providers such as google.js and openai.js, and is ready to integrate with the AI SDK.
|
||||
|
||||
## 2. Integrate Vercel AI SDK Google Vertex Package [pending]
|
||||
### Dependencies: 93.1
|
||||
### Description: Integrate the `@ai-sdk/google-vertex` package, supporting both the default provider and custom configuration via `createVertex`.
|
||||
### Details:
|
||||
Allow for standard usage with the default `vertex` provider and advanced scenarios using `createVertex` for custom project ID, location, and credentials as per SDK documentation.
|
||||
|
||||
## 3. Implement Provider Interface Methods [pending]
|
||||
### Dependencies: 93.2
|
||||
### Description: Implement all required interface methods (e.g., `getClient`, `generateText`) to ensure compatibility with the provider system.
|
||||
### Details:
|
||||
Reference implementation patterns from other providers to maintain consistency and ensure all required methods are present and functional.
|
||||
|
||||
## 4. Handle Vertex AI Configuration and Authentication [pending]
|
||||
### Dependencies: 93.3
|
||||
### Description: Implement support for Vertex AI-specific configuration, including project ID, location, and authentication via environment variables or explicit service account credentials.
|
||||
### Details:
|
||||
Support both environment-based authentication and explicit credentials using `googleAuthOptions`, following Google Cloud and Vertex AI setup best practices.
|
||||
|
||||
## 5. Update Exports, Documentation, and Error Handling [pending]
|
||||
### Dependencies: 93.4
|
||||
### Description: Export the new provider, update the PROVIDERS object, and document setup instructions, including robust error handling for Vertex-specific issues.
|
||||
### Details:
|
||||
Update `src/ai-providers/index.js` and `scripts/modules/ai-services-unified.js`, and provide clear documentation for setup, configuration, and error handling patterns.
|
||||
|
||||
103
tasks/task_094.txt
Normal file
103
tasks/task_094.txt
Normal file
@@ -0,0 +1,103 @@
|
||||
# Task ID: 94
|
||||
# Title: Implement Azure OpenAI Provider Integration
|
||||
# Status: done
|
||||
# Dependencies: 19, 26
|
||||
# Priority: medium
|
||||
# Description: Create a comprehensive Azure OpenAI provider implementation that integrates with the existing AI provider system, enabling users to leverage Azure-hosted OpenAI models through proper authentication and configuration.
|
||||
# Details:
|
||||
Implement the Azure OpenAI provider following the established provider pattern:
|
||||
|
||||
1. **Create Azure Provider Class** (`src/ai-providers/azure.js`):
|
||||
- Extend BaseAIProvider class following the same pattern as openai.js and google.js
|
||||
- Import and use `createAzureOpenAI` from `@ai-sdk/azure` package
|
||||
- Implement required interface methods: `getClient()`, `validateConfig()`, and any other abstract methods
|
||||
- Handle Azure-specific configuration: endpoint URL, API key, and deployment name
|
||||
- Add proper error handling for missing or invalid Azure configuration
|
||||
|
||||
2. **Configuration Management**:
|
||||
- Support environment variables: AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_API_KEY, AZURE_OPENAI_DEPLOYMENT
|
||||
- Validate that both endpoint and API key are provided
|
||||
- Provide clear error messages for configuration issues
|
||||
- Follow the same configuration pattern as other providers
|
||||
|
||||
3. **Integration Updates**:
|
||||
- Update `src/ai-providers/index.js` to export the new AzureProvider
|
||||
- Add 'azure' entry to the PROVIDERS object in `scripts/modules/ai-services-unified.js`
|
||||
- Ensure the provider is properly registered and accessible through the unified AI services
|
||||
|
||||
4. **Error Handling**:
|
||||
- Implement Azure-specific error handling for authentication failures
|
||||
- Handle endpoint connectivity issues with helpful error messages
|
||||
- Validate deployment name and provide guidance for common configuration mistakes
|
||||
- Follow the established error handling patterns from Task 19
|
||||
|
||||
5. **Documentation Updates**:
|
||||
- Update any provider documentation to include Azure OpenAI setup instructions
|
||||
- Add configuration examples for Azure OpenAI environment variables
|
||||
- Include troubleshooting guidance for common Azure-specific issues
|
||||
|
||||
The implementation should maintain consistency with existing provider implementations while handling Azure's unique authentication and endpoint requirements.
|
||||
|
||||
# Test Strategy:
|
||||
Verify the Azure OpenAI provider implementation through comprehensive testing:
|
||||
|
||||
1. **Unit Testing**:
|
||||
- Test provider class instantiation and configuration validation
|
||||
- Verify getClient() method returns properly configured Azure OpenAI client
|
||||
- Test error handling for missing/invalid configuration parameters
|
||||
- Validate that the provider correctly extends BaseAIProvider
|
||||
|
||||
2. **Integration Testing**:
|
||||
- Test provider registration in the unified AI services system
|
||||
- Verify the provider appears in the PROVIDERS object and is accessible
|
||||
- Test end-to-end functionality with valid Azure OpenAI credentials
|
||||
- Validate that the provider works with existing AI operation workflows
|
||||
|
||||
3. **Configuration Testing**:
|
||||
- Test with various environment variable combinations
|
||||
- Verify proper error messages for missing endpoint or API key
|
||||
- Test with invalid endpoint URLs and ensure graceful error handling
|
||||
- Validate deployment name handling and error reporting
|
||||
|
||||
4. **Manual Verification**:
|
||||
- Set up test Azure OpenAI credentials and verify successful connection
|
||||
- Test actual AI operations (like task expansion) using the Azure provider
|
||||
- Verify that the provider selection works correctly in the CLI
|
||||
- Confirm that error messages are helpful and actionable for users
|
||||
|
||||
5. **Documentation Verification**:
|
||||
- Ensure all configuration examples work as documented
|
||||
- Verify that setup instructions are complete and accurate
|
||||
- Test troubleshooting guidance with common error scenarios
|
||||
|
||||
# Subtasks:
|
||||
## 1. Create Azure Provider Class [done]
|
||||
### Dependencies: None
|
||||
### Description: Implement the AzureProvider class that extends BaseAIProvider to handle Azure OpenAI integration
|
||||
### Details:
|
||||
Create the AzureProvider class in src/ai-providers/azure.js that extends BaseAIProvider. Import createAzureOpenAI from @ai-sdk/azure package. Implement required interface methods including getClient() and validateConfig(). Handle Azure-specific configuration parameters: endpoint URL, API key, and deployment name. Follow the established pattern in openai.js and google.js. Ensure proper error handling for missing or invalid configuration.
|
||||
|
||||
## 2. Implement Configuration Management [done]
|
||||
### Dependencies: 94.1
|
||||
### Description: Add support for Azure OpenAI environment variables and configuration validation
|
||||
### Details:
|
||||
Implement configuration management for Azure OpenAI provider that supports environment variables: AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_API_KEY, and AZURE_OPENAI_DEPLOYMENT. Add validation logic to ensure both endpoint and API key are provided. Create clear error messages for configuration issues. Follow the same configuration pattern as implemented in other providers. Ensure the validateConfig() method properly checks all required Azure configuration parameters.
|
||||
|
||||
## 3. Update Provider Integration [done]
|
||||
### Dependencies: 94.1, 94.2
|
||||
### Description: Integrate the Azure provider into the existing AI provider system
|
||||
### Details:
|
||||
Update src/ai-providers/index.js to export the new AzureProvider class. Add 'azure' entry to the PROVIDERS object in scripts/modules/ai-services-unified.js. Ensure the provider is properly registered and accessible through the unified AI services. Test that the provider can be instantiated and used through the provider selection mechanism. Follow the same integration pattern used for existing providers.
|
||||
|
||||
## 4. Implement Azure-Specific Error Handling [done]
|
||||
### Dependencies: 94.1, 94.2
|
||||
### Description: Add specialized error handling for Azure OpenAI-specific issues
|
||||
### Details:
|
||||
Implement Azure-specific error handling for authentication failures, endpoint connectivity issues, and deployment name validation. Provide helpful error messages that guide users to resolve common configuration mistakes. Follow the established error handling patterns from Task 19. Create custom error classes if needed for Azure-specific errors. Ensure errors are properly propagated and formatted for user display.
|
||||
|
||||
## 5. Update Documentation [done]
|
||||
### Dependencies: 94.1, 94.2, 94.3, 94.4
|
||||
### Description: Create comprehensive documentation for the Azure OpenAI provider integration
|
||||
### Details:
|
||||
Update provider documentation to include Azure OpenAI setup instructions. Add configuration examples for Azure OpenAI environment variables. Include troubleshooting guidance for common Azure-specific issues. Document the required Azure resource creation process with references to Microsoft's documentation. Provide examples of valid configuration settings and explain each required parameter. Include information about Azure OpenAI model deployment requirements.
|
||||
|
||||
210
tasks/tasks.json
210
tasks/tasks.json
@@ -5466,6 +5466,216 @@
|
||||
"parentTaskId": 91
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 93,
|
||||
"title": "Implement Google Vertex AI Provider Integration",
|
||||
"description": "Develop a dedicated Google Vertex AI provider in the codebase, enabling users to leverage Vertex AI models with enterprise-grade configuration and authentication.",
|
||||
"details": "1. Create a new provider class in `src/ai-providers/google-vertex.js` that extends the existing BaseAIProvider, following the established structure used by other providers (e.g., google.js, openai.js).\n2. Integrate the Vercel AI SDK's `@ai-sdk/google-vertex` package. Use the default `vertex` provider for standard usage, and allow for custom configuration via `createVertex` for advanced scenarios (e.g., specifying project ID, location, and credentials).\n3. Implement all required interface methods (such as `getClient`, `generateText`, etc.) to ensure compatibility with the provider system. Reference the implementation patterns from other providers for consistency.\n4. Handle Vertex AI-specific configuration, including project ID, location, and Google Cloud authentication. Support both environment-based authentication and explicit service account credentials via `googleAuthOptions`.\n5. Implement robust error handling for Vertex-specific issues, including authentication failures and API errors, leveraging the system-wide error handling patterns.\n6. Update `src/ai-providers/index.js` to export the new provider, and add the 'vertex' entry to the PROVIDERS object in `scripts/modules/ai-services-unified.js`.\n7. Update documentation to provide clear setup instructions for Google Vertex AI, including required environment variables, service account setup, and configuration examples.\n8. Ensure the implementation is modular and maintainable, supporting future expansion for additional Vertex AI features or models.",
|
||||
"testStrategy": "- Write unit tests for the new provider class, covering all interface methods and configuration scenarios (default, custom, error cases).\n- Verify that the provider can successfully authenticate using both environment-based and explicit service account credentials.\n- Test integration with the provider system by selecting 'vertex' as the provider and generating text using supported Vertex AI models (e.g., Gemini).\n- Simulate authentication and API errors to confirm robust error handling and user feedback.\n- Confirm that the provider is correctly exported and available in the PROVIDERS object.\n- Review and validate the updated documentation for accuracy and completeness.",
|
||||
"status": "pending",
|
||||
"dependencies": [
|
||||
19,
|
||||
94
|
||||
],
|
||||
"priority": "medium",
|
||||
"subtasks": [
|
||||
{
|
||||
"id": 1,
|
||||
"title": "Create Google Vertex AI Provider Class",
|
||||
"description": "Develop a new provider class in `src/ai-providers/google-vertex.js` that extends the BaseAIProvider, following the structure of existing providers.",
|
||||
"dependencies": [],
|
||||
"details": "Ensure the new class is consistent with the architecture of other providers such as google.js and openai.js, and is ready to integrate with the AI SDK.",
|
||||
"status": "pending",
|
||||
"testStrategy": "Verify the class structure matches other providers and can be instantiated without errors."
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"title": "Integrate Vercel AI SDK Google Vertex Package",
|
||||
"description": "Integrate the `@ai-sdk/google-vertex` package, supporting both the default provider and custom configuration via `createVertex`.",
|
||||
"dependencies": [
|
||||
1
|
||||
],
|
||||
"details": "Allow for standard usage with the default `vertex` provider and advanced scenarios using `createVertex` for custom project ID, location, and credentials as per SDK documentation.",
|
||||
"status": "pending",
|
||||
"testStrategy": "Write unit tests to ensure both default and custom provider instances can be created and configured."
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"title": "Implement Provider Interface Methods",
|
||||
"description": "Implement all required interface methods (e.g., `getClient`, `generateText`) to ensure compatibility with the provider system.",
|
||||
"dependencies": [
|
||||
2
|
||||
],
|
||||
"details": "Reference implementation patterns from other providers to maintain consistency and ensure all required methods are present and functional.",
|
||||
"status": "pending",
|
||||
"testStrategy": "Run integration tests to confirm the provider responds correctly to all interface method calls."
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"title": "Handle Vertex AI Configuration and Authentication",
|
||||
"description": "Implement support for Vertex AI-specific configuration, including project ID, location, and authentication via environment variables or explicit service account credentials.",
|
||||
"dependencies": [
|
||||
3
|
||||
],
|
||||
"details": "Support both environment-based authentication and explicit credentials using `googleAuthOptions`, following Google Cloud and Vertex AI setup best practices.",
|
||||
"status": "pending",
|
||||
"testStrategy": "Test with both environment variable-based and explicit service account authentication to ensure both methods work as expected."
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"title": "Update Exports, Documentation, and Error Handling",
|
||||
"description": "Export the new provider, update the PROVIDERS object, and document setup instructions, including robust error handling for Vertex-specific issues.",
|
||||
"dependencies": [
|
||||
4
|
||||
],
|
||||
"details": "Update `src/ai-providers/index.js` and `scripts/modules/ai-services-unified.js`, and provide clear documentation for setup, configuration, and error handling patterns.",
|
||||
"status": "pending",
|
||||
"testStrategy": "Verify the provider is available for import, documentation is accurate, and error handling works by simulating common failure scenarios."
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 94,
|
||||
"title": "Implement Azure OpenAI Provider Integration",
|
||||
"description": "Create a comprehensive Azure OpenAI provider implementation that integrates with the existing AI provider system, enabling users to leverage Azure-hosted OpenAI models through proper authentication and configuration.",
|
||||
"details": "Implement the Azure OpenAI provider following the established provider pattern:\n\n1. **Create Azure Provider Class** (`src/ai-providers/azure.js`):\n - Extend BaseAIProvider class following the same pattern as openai.js and google.js\n - Import and use `createAzureOpenAI` from `@ai-sdk/azure` package\n - Implement required interface methods: `getClient()`, `validateConfig()`, and any other abstract methods\n - Handle Azure-specific configuration: endpoint URL, API key, and deployment name\n - Add proper error handling for missing or invalid Azure configuration\n\n2. **Configuration Management**:\n - Support environment variables: AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_API_KEY, AZURE_OPENAI_DEPLOYMENT\n - Validate that both endpoint and API key are provided\n - Provide clear error messages for configuration issues\n - Follow the same configuration pattern as other providers\n\n3. **Integration Updates**:\n - Update `src/ai-providers/index.js` to export the new AzureProvider\n - Add 'azure' entry to the PROVIDERS object in `scripts/modules/ai-services-unified.js`\n - Ensure the provider is properly registered and accessible through the unified AI services\n\n4. **Error Handling**:\n - Implement Azure-specific error handling for authentication failures\n - Handle endpoint connectivity issues with helpful error messages\n - Validate deployment name and provide guidance for common configuration mistakes\n - Follow the established error handling patterns from Task 19\n\n5. **Documentation Updates**:\n - Update any provider documentation to include Azure OpenAI setup instructions\n - Add configuration examples for Azure OpenAI environment variables\n - Include troubleshooting guidance for common Azure-specific issues\n\nThe implementation should maintain consistency with existing provider implementations while handling Azure's unique authentication and endpoint requirements.",
|
||||
"testStrategy": "Verify the Azure OpenAI provider implementation through comprehensive testing:\n\n1. **Unit Testing**:\n - Test provider class instantiation and configuration validation\n - Verify getClient() method returns properly configured Azure OpenAI client\n - Test error handling for missing/invalid configuration parameters\n - Validate that the provider correctly extends BaseAIProvider\n\n2. **Integration Testing**:\n - Test provider registration in the unified AI services system\n - Verify the provider appears in the PROVIDERS object and is accessible\n - Test end-to-end functionality with valid Azure OpenAI credentials\n - Validate that the provider works with existing AI operation workflows\n\n3. **Configuration Testing**:\n - Test with various environment variable combinations\n - Verify proper error messages for missing endpoint or API key\n - Test with invalid endpoint URLs and ensure graceful error handling\n - Validate deployment name handling and error reporting\n\n4. **Manual Verification**:\n - Set up test Azure OpenAI credentials and verify successful connection\n - Test actual AI operations (like task expansion) using the Azure provider\n - Verify that the provider selection works correctly in the CLI\n - Confirm that error messages are helpful and actionable for users\n\n5. **Documentation Verification**:\n - Ensure all configuration examples work as documented\n - Verify that setup instructions are complete and accurate\n - Test troubleshooting guidance with common error scenarios",
|
||||
"status": "done",
|
||||
"dependencies": [
|
||||
19,
|
||||
26
|
||||
],
|
||||
"priority": "medium",
|
||||
"subtasks": [
|
||||
{
|
||||
"id": 1,
|
||||
"title": "Create Azure Provider Class",
|
||||
"description": "Implement the AzureProvider class that extends BaseAIProvider to handle Azure OpenAI integration",
|
||||
"dependencies": [],
|
||||
"details": "Create the AzureProvider class in src/ai-providers/azure.js that extends BaseAIProvider. Import createAzureOpenAI from @ai-sdk/azure package. Implement required interface methods including getClient() and validateConfig(). Handle Azure-specific configuration parameters: endpoint URL, API key, and deployment name. Follow the established pattern in openai.js and google.js. Ensure proper error handling for missing or invalid configuration.",
|
||||
"status": "done",
|
||||
"testStrategy": "Create unit tests that verify the AzureProvider class correctly initializes with valid configuration and throws appropriate errors with invalid configuration. Test the getClient() method returns a properly configured client instance.",
|
||||
"parentTaskId": 94
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"title": "Implement Configuration Management",
|
||||
"description": "Add support for Azure OpenAI environment variables and configuration validation",
|
||||
"dependencies": [
|
||||
1
|
||||
],
|
||||
"details": "Implement configuration management for Azure OpenAI provider that supports environment variables: AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_API_KEY, and AZURE_OPENAI_DEPLOYMENT. Add validation logic to ensure both endpoint and API key are provided. Create clear error messages for configuration issues. Follow the same configuration pattern as implemented in other providers. Ensure the validateConfig() method properly checks all required Azure configuration parameters.",
|
||||
"status": "done",
|
||||
"testStrategy": "Test configuration validation with various combinations of missing or invalid parameters. Verify environment variables are correctly loaded and applied to the provider configuration.",
|
||||
"parentTaskId": 94
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"title": "Update Provider Integration",
|
||||
"description": "Integrate the Azure provider into the existing AI provider system",
|
||||
"dependencies": [
|
||||
1,
|
||||
2
|
||||
],
|
||||
"details": "Update src/ai-providers/index.js to export the new AzureProvider class. Add 'azure' entry to the PROVIDERS object in scripts/modules/ai-services-unified.js. Ensure the provider is properly registered and accessible through the unified AI services. Test that the provider can be instantiated and used through the provider selection mechanism. Follow the same integration pattern used for existing providers.",
|
||||
"status": "done",
|
||||
"testStrategy": "Create integration tests that verify the Azure provider is correctly registered and can be selected through the provider system. Test that the provider is properly initialized when selected.",
|
||||
"parentTaskId": 94
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"title": "Implement Azure-Specific Error Handling",
|
||||
"description": "Add specialized error handling for Azure OpenAI-specific issues",
|
||||
"dependencies": [
|
||||
1,
|
||||
2
|
||||
],
|
||||
"details": "Implement Azure-specific error handling for authentication failures, endpoint connectivity issues, and deployment name validation. Provide helpful error messages that guide users to resolve common configuration mistakes. Follow the established error handling patterns from Task 19. Create custom error classes if needed for Azure-specific errors. Ensure errors are properly propagated and formatted for user display.",
|
||||
"status": "done",
|
||||
"testStrategy": "Test error handling by simulating various failure scenarios including authentication failures, invalid endpoints, and missing deployment names. Verify appropriate error messages are generated.",
|
||||
"parentTaskId": 94
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"title": "Update Documentation",
|
||||
"description": "Create comprehensive documentation for the Azure OpenAI provider integration",
|
||||
"dependencies": [
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4
|
||||
],
|
||||
"details": "Update provider documentation to include Azure OpenAI setup instructions. Add configuration examples for Azure OpenAI environment variables. Include troubleshooting guidance for common Azure-specific issues. Document the required Azure resource creation process with references to Microsoft's documentation. Provide examples of valid configuration settings and explain each required parameter. Include information about Azure OpenAI model deployment requirements.",
|
||||
"status": "done",
|
||||
"testStrategy": "Review documentation for completeness, accuracy, and clarity. Ensure all configuration options are documented and examples are provided. Verify troubleshooting guidance addresses common issues identified during implementation.",
|
||||
"parentTaskId": 94
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 92,
|
||||
"title": "Implement Project Root Environment Variable Support in MCP Configuration",
|
||||
"description": "Add support for a 'TASK_MASTER_PROJECT_ROOT' environment variable in MCP configuration, allowing it to be set in both mcp.json and .env, with precedence over other methods. This will define the root directory for the MCP server and take precedence over all other project root resolution methods. The implementation should be backward compatible with existing workflows that don't use this variable.",
|
||||
"status": "in-progress",
|
||||
"dependencies": [
|
||||
1,
|
||||
3,
|
||||
17
|
||||
],
|
||||
"priority": "medium",
|
||||
"details": "Update the MCP server configuration system to support the TASK_MASTER_PROJECT_ROOT environment variable as the standard way to specify the project root directory. This provides better namespacing and avoids conflicts with other tools that might use a generic PROJECT_ROOT variable. Implement a clear precedence order for project root resolution:\n\n1. TASK_MASTER_PROJECT_ROOT environment variable (from shell or .env file)\n2. 'projectRoot' key in mcp_config.toml or mcp.json configuration files\n3. Existing resolution logic (CLI args, current working directory, etc.)\n\nModify the configuration loading logic to check for these sources in the specified order, ensuring backward compatibility. All MCP tools and components should use this standardized project root resolution logic. The TASK_MASTER_PROJECT_ROOT environment variable will be required because path resolution is delegated to the MCP client implementation, ensuring consistent behavior across different environments.\n\nImplementation steps:\n1. Identify all code locations where project root is determined (initialization, utility functions)\n2. Update configuration loaders to check for TASK_MASTER_PROJECT_ROOT in environment variables\n3. Add support for 'projectRoot' in configuration files as a fallback\n4. Refactor project root resolution logic to follow the new precedence rules\n5. Ensure all MCP tools and functions use the updated resolution logic\n6. Add comprehensive error handling for cases where TASK_MASTER_PROJECT_ROOT is not set or invalid\n7. Implement validation to ensure the specified directory exists and is accessible",
|
||||
"testStrategy": "1. Write unit tests to verify that the config loader correctly reads project root from environment variables and configuration files with the expected precedence:\n - Test TASK_MASTER_PROJECT_ROOT environment variable takes precedence when set\n - Test 'projectRoot' in configuration files is used when environment variable is absent\n - Test fallback to existing resolution logic when neither is specified\n\n2. Add integration tests to ensure that the MCP server and all tools use the correct project root:\n - Test server startup with TASK_MASTER_PROJECT_ROOT set to various valid and invalid paths\n - Test configuration file loading from the specified project root\n - Test path resolution for resources relative to the project root\n\n3. Test backward compatibility:\n - Verify existing workflows function correctly without the new variables\n - Ensure no regression in projects not using the new configuration options\n\n4. Manual testing:\n - Set TASK_MASTER_PROJECT_ROOT in shell environment and verify correct behavior\n - Set TASK_MASTER_PROJECT_ROOT in .env file and verify it's properly loaded\n - Configure 'projectRoot' in configuration files and test precedence\n - Test with invalid or non-existent directories to verify error handling",
|
||||
"subtasks": [
|
||||
{
|
||||
"id": 92.1,
|
||||
"title": "Update configuration loader to check for TASK_MASTER_PROJECT_ROOT environment variable",
|
||||
"description": "Modify the configuration loading system to check for the TASK_MASTER_PROJECT_ROOT environment variable as the primary source for project root directory. Ensure proper error handling if the variable is set but points to a non-existent or inaccessible directory.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 92.2,
|
||||
"title": "Add support for 'projectRoot' in configuration files",
|
||||
"description": "Implement support for a 'projectRoot' key in mcp_config.toml and mcp.json configuration files as a fallback when the environment variable is not set. Update the configuration parser to recognize and validate this field.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 92.3,
|
||||
"title": "Refactor project root resolution logic with clear precedence rules",
|
||||
"description": "Create a unified project root resolution function that follows the precedence order: 1) TASK_MASTER_PROJECT_ROOT environment variable, 2) 'projectRoot' in config files, 3) existing resolution methods. Ensure this function is used consistently throughout the codebase.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 92.4,
|
||||
"title": "Update all MCP tools to use the new project root resolution",
|
||||
"description": "Identify all MCP tools and components that need to access the project root and update them to use the new resolution logic. Ensure consistent behavior across all parts of the system.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 92.5,
|
||||
"title": "Add comprehensive tests for the new project root resolution",
|
||||
"description": "Create unit and integration tests to verify the correct behavior of the project root resolution logic under various configurations and edge cases.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 92.6,
|
||||
"title": "Update documentation with new configuration options",
|
||||
"description": "Update the project documentation to clearly explain the new TASK_MASTER_PROJECT_ROOT environment variable, the 'projectRoot' configuration option, and the precedence rules. Include examples of different configuration scenarios.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 92.7,
|
||||
"title": "Implement validation for project root directory",
|
||||
"description": "Add validation to ensure the specified project root directory exists and has the necessary permissions. Provide clear error messages when validation fails.",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 92.8,
|
||||
"title": "Implement support for loading environment variables from .env files",
|
||||
"description": "Add functionality to load the TASK_MASTER_PROJECT_ROOT variable from .env files in the workspace, following best practices for environment variable management in MCP servers.",
|
||||
"status": "pending"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
5558
tasks/tasks.json.bak
Normal file
5558
tasks/tasks.json.bak
Normal file
File diff suppressed because one or more lines are too long
@@ -40,58 +40,145 @@ const mockModelMap = {
|
||||
// Add other providers/models if needed for specific tests
|
||||
};
|
||||
const mockGetBaseUrlForRole = jest.fn();
|
||||
const mockGetAllProviders = jest.fn();
|
||||
const mockGetOllamaBaseURL = jest.fn();
|
||||
const mockGetAzureBaseURL = jest.fn();
|
||||
const mockGetVertexProjectId = jest.fn();
|
||||
const mockGetVertexLocation = jest.fn();
|
||||
const mockGetAvailableModels = jest.fn();
|
||||
const mockValidateProvider = jest.fn();
|
||||
const mockValidateProviderModelCombination = jest.fn();
|
||||
const mockGetConfig = jest.fn();
|
||||
const mockWriteConfig = jest.fn();
|
||||
const mockIsConfigFilePresent = jest.fn();
|
||||
const mockGetMcpApiKeyStatus = jest.fn();
|
||||
const mockGetMainMaxTokens = jest.fn();
|
||||
const mockGetMainTemperature = jest.fn();
|
||||
const mockGetResearchMaxTokens = jest.fn();
|
||||
const mockGetResearchTemperature = jest.fn();
|
||||
const mockGetFallbackMaxTokens = jest.fn();
|
||||
const mockGetFallbackTemperature = jest.fn();
|
||||
const mockGetLogLevel = jest.fn();
|
||||
const mockGetDefaultNumTasks = jest.fn();
|
||||
const mockGetDefaultSubtasks = jest.fn();
|
||||
const mockGetDefaultPriority = jest.fn();
|
||||
const mockGetProjectName = jest.fn();
|
||||
|
||||
jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
|
||||
// Core config access
|
||||
getConfig: mockGetConfig,
|
||||
writeConfig: mockWriteConfig,
|
||||
isConfigFilePresent: mockIsConfigFilePresent,
|
||||
ConfigurationError: class ConfigurationError extends Error {
|
||||
constructor(message) {
|
||||
super(message);
|
||||
this.name = 'ConfigurationError';
|
||||
}
|
||||
},
|
||||
|
||||
// Validation
|
||||
validateProvider: mockValidateProvider,
|
||||
validateProviderModelCombination: mockValidateProviderModelCombination,
|
||||
VALID_PROVIDERS: ['anthropic', 'perplexity', 'openai', 'google'],
|
||||
MODEL_MAP: mockModelMap,
|
||||
getAvailableModels: mockGetAvailableModels,
|
||||
|
||||
// Role-specific getters
|
||||
getMainProvider: mockGetMainProvider,
|
||||
getMainModelId: mockGetMainModelId,
|
||||
getMainMaxTokens: mockGetMainMaxTokens,
|
||||
getMainTemperature: mockGetMainTemperature,
|
||||
getResearchProvider: mockGetResearchProvider,
|
||||
getResearchModelId: mockGetResearchModelId,
|
||||
getResearchMaxTokens: mockGetResearchMaxTokens,
|
||||
getResearchTemperature: mockGetResearchTemperature,
|
||||
getFallbackProvider: mockGetFallbackProvider,
|
||||
getFallbackModelId: mockGetFallbackModelId,
|
||||
getFallbackMaxTokens: mockGetFallbackMaxTokens,
|
||||
getFallbackTemperature: mockGetFallbackTemperature,
|
||||
getParametersForRole: mockGetParametersForRole,
|
||||
getUserId: mockGetUserId,
|
||||
getDebugFlag: mockGetDebugFlag,
|
||||
MODEL_MAP: mockModelMap,
|
||||
getBaseUrlForRole: mockGetBaseUrlForRole,
|
||||
isApiKeySet: mockIsApiKeySet
|
||||
|
||||
// Global settings
|
||||
getLogLevel: mockGetLogLevel,
|
||||
getDefaultNumTasks: mockGetDefaultNumTasks,
|
||||
getDefaultSubtasks: mockGetDefaultSubtasks,
|
||||
getDefaultPriority: mockGetDefaultPriority,
|
||||
getProjectName: mockGetProjectName,
|
||||
|
||||
// API Key and provider functions
|
||||
isApiKeySet: mockIsApiKeySet,
|
||||
getAllProviders: mockGetAllProviders,
|
||||
getOllamaBaseURL: mockGetOllamaBaseURL,
|
||||
getAzureBaseURL: mockGetAzureBaseURL,
|
||||
getVertexProjectId: mockGetVertexProjectId,
|
||||
getVertexLocation: mockGetVertexLocation,
|
||||
getMcpApiKeyStatus: mockGetMcpApiKeyStatus
|
||||
}));
|
||||
|
||||
// Mock AI Provider Modules
|
||||
const mockGenerateAnthropicText = jest.fn();
|
||||
const mockStreamAnthropicText = jest.fn();
|
||||
const mockGenerateAnthropicObject = jest.fn();
|
||||
jest.unstable_mockModule('../../src/ai-providers/anthropic.js', () => ({
|
||||
generateAnthropicText: mockGenerateAnthropicText,
|
||||
streamAnthropicText: mockStreamAnthropicText,
|
||||
generateAnthropicObject: mockGenerateAnthropicObject
|
||||
}));
|
||||
// Mock AI Provider Classes with proper methods
|
||||
const mockAnthropicProvider = {
|
||||
generateText: jest.fn(),
|
||||
streamText: jest.fn(),
|
||||
generateObject: jest.fn()
|
||||
};
|
||||
|
||||
const mockGeneratePerplexityText = jest.fn();
|
||||
const mockStreamPerplexityText = jest.fn();
|
||||
const mockGeneratePerplexityObject = jest.fn();
|
||||
jest.unstable_mockModule('../../src/ai-providers/perplexity.js', () => ({
|
||||
generatePerplexityText: mockGeneratePerplexityText,
|
||||
streamPerplexityText: mockStreamPerplexityText,
|
||||
generatePerplexityObject: mockGeneratePerplexityObject
|
||||
}));
|
||||
const mockPerplexityProvider = {
|
||||
generateText: jest.fn(),
|
||||
streamText: jest.fn(),
|
||||
generateObject: jest.fn()
|
||||
};
|
||||
|
||||
const mockGenerateOpenAIText = jest.fn();
|
||||
const mockStreamOpenAIText = jest.fn();
|
||||
const mockGenerateOpenAIObject = jest.fn();
|
||||
jest.unstable_mockModule('../../src/ai-providers/openai.js', () => ({
|
||||
generateOpenAIText: mockGenerateOpenAIText,
|
||||
streamOpenAIText: mockStreamOpenAIText,
|
||||
generateOpenAIObject: mockGenerateOpenAIObject
|
||||
}));
|
||||
const mockOpenAIProvider = {
|
||||
generateText: jest.fn(),
|
||||
streamText: jest.fn(),
|
||||
generateObject: jest.fn()
|
||||
};
|
||||
|
||||
// Mock ollama provider (for special case testing - API key is optional)
|
||||
const mockGenerateOllamaText = jest.fn();
|
||||
const mockStreamOllamaText = jest.fn();
|
||||
const mockGenerateOllamaObject = jest.fn();
|
||||
jest.unstable_mockModule('../../src/ai-providers/ollama.js', () => ({
|
||||
generateOllamaText: mockGenerateOllamaText,
|
||||
streamOllamaText: mockStreamOllamaText,
|
||||
generateOllamaObject: mockGenerateOllamaObject
|
||||
const mockOllamaProvider = {
|
||||
generateText: jest.fn(),
|
||||
streamText: jest.fn(),
|
||||
generateObject: jest.fn()
|
||||
};
|
||||
|
||||
// Mock the provider classes to return our mock instances
|
||||
jest.unstable_mockModule('../../src/ai-providers/index.js', () => ({
|
||||
AnthropicAIProvider: jest.fn(() => mockAnthropicProvider),
|
||||
PerplexityAIProvider: jest.fn(() => mockPerplexityProvider),
|
||||
GoogleAIProvider: jest.fn(() => ({
|
||||
generateText: jest.fn(),
|
||||
streamText: jest.fn(),
|
||||
generateObject: jest.fn()
|
||||
})),
|
||||
OpenAIProvider: jest.fn(() => mockOpenAIProvider),
|
||||
XAIProvider: jest.fn(() => ({
|
||||
generateText: jest.fn(),
|
||||
streamText: jest.fn(),
|
||||
generateObject: jest.fn()
|
||||
})),
|
||||
OpenRouterAIProvider: jest.fn(() => ({
|
||||
generateText: jest.fn(),
|
||||
streamText: jest.fn(),
|
||||
generateObject: jest.fn()
|
||||
})),
|
||||
OllamaAIProvider: jest.fn(() => mockOllamaProvider),
|
||||
BedrockAIProvider: jest.fn(() => ({
|
||||
generateText: jest.fn(),
|
||||
streamText: jest.fn(),
|
||||
generateObject: jest.fn()
|
||||
})),
|
||||
AzureProvider: jest.fn(() => ({
|
||||
generateText: jest.fn(),
|
||||
streamText: jest.fn(),
|
||||
generateObject: jest.fn()
|
||||
})),
|
||||
VertexAIProvider: jest.fn(() => ({
|
||||
generateText: jest.fn(),
|
||||
streamText: jest.fn(),
|
||||
generateObject: jest.fn()
|
||||
}))
|
||||
}));
|
||||
|
||||
// Mock utils logger, API key resolver, AND findProjectRoot
|
||||
@@ -100,13 +187,48 @@ const mockResolveEnvVariable = jest.fn();
|
||||
const mockFindProjectRoot = jest.fn();
|
||||
const mockIsSilentMode = jest.fn();
|
||||
const mockLogAiUsage = jest.fn();
|
||||
const mockFindCycles = jest.fn();
|
||||
const mockFormatTaskId = jest.fn();
|
||||
const mockTaskExists = jest.fn();
|
||||
const mockFindTaskById = jest.fn();
|
||||
const mockTruncate = jest.fn();
|
||||
const mockToKebabCase = jest.fn();
|
||||
const mockDetectCamelCaseFlags = jest.fn();
|
||||
const mockDisableSilentMode = jest.fn();
|
||||
const mockEnableSilentMode = jest.fn();
|
||||
const mockGetTaskManager = jest.fn();
|
||||
const mockAddComplexityToTask = jest.fn();
|
||||
const mockReadJSON = jest.fn();
|
||||
const mockWriteJSON = jest.fn();
|
||||
const mockSanitizePrompt = jest.fn();
|
||||
const mockReadComplexityReport = jest.fn();
|
||||
const mockFindTaskInComplexityReport = jest.fn();
|
||||
const mockAggregateTelemetry = jest.fn();
|
||||
|
||||
jest.unstable_mockModule('../../scripts/modules/utils.js', () => ({
|
||||
LOG_LEVELS: { error: 0, warn: 1, info: 2, debug: 3 },
|
||||
log: mockLog,
|
||||
resolveEnvVariable: mockResolveEnvVariable,
|
||||
findProjectRoot: mockFindProjectRoot,
|
||||
isSilentMode: mockIsSilentMode,
|
||||
logAiUsage: mockLogAiUsage
|
||||
logAiUsage: mockLogAiUsage,
|
||||
findCycles: mockFindCycles,
|
||||
formatTaskId: mockFormatTaskId,
|
||||
taskExists: mockTaskExists,
|
||||
findTaskById: mockFindTaskById,
|
||||
truncate: mockTruncate,
|
||||
toKebabCase: mockToKebabCase,
|
||||
detectCamelCaseFlags: mockDetectCamelCaseFlags,
|
||||
disableSilentMode: mockDisableSilentMode,
|
||||
enableSilentMode: mockEnableSilentMode,
|
||||
getTaskManager: mockGetTaskManager,
|
||||
addComplexityToTask: mockAddComplexityToTask,
|
||||
readJSON: mockReadJSON,
|
||||
writeJSON: mockWriteJSON,
|
||||
sanitizePrompt: mockSanitizePrompt,
|
||||
readComplexityReport: mockReadComplexityReport,
|
||||
findTaskInComplexityReport: mockFindTaskInComplexityReport,
|
||||
aggregateTelemetry: mockAggregateTelemetry
|
||||
}));
|
||||
|
||||
// Import the module to test (AFTER mocks)
|
||||
@@ -147,11 +269,12 @@ describe('Unified AI Services', () => {
|
||||
mockGetDebugFlag.mockReturnValue(false);
|
||||
mockGetUserId.mockReturnValue('test-user-id'); // Add default mock for getUserId
|
||||
mockIsApiKeySet.mockReturnValue(true); // Default to true for most tests
|
||||
mockGetBaseUrlForRole.mockReturnValue(null); // Default to no base URL
|
||||
});
|
||||
|
||||
describe('generateTextService', () => {
|
||||
test('should use main provider/model and succeed', async () => {
|
||||
mockGenerateAnthropicText.mockResolvedValue({
|
||||
mockAnthropicProvider.generateText.mockResolvedValue({
|
||||
text: 'Main provider response',
|
||||
usage: { inputTokens: 10, outputTokens: 20, totalTokens: 30 }
|
||||
});
|
||||
@@ -172,28 +295,13 @@ describe('Unified AI Services', () => {
|
||||
'main',
|
||||
fakeProjectRoot
|
||||
);
|
||||
expect(mockResolveEnvVariable).toHaveBeenCalledWith(
|
||||
'ANTHROPIC_API_KEY',
|
||||
params.session,
|
||||
fakeProjectRoot
|
||||
);
|
||||
expect(mockGenerateAnthropicText).toHaveBeenCalledTimes(1);
|
||||
expect(mockGenerateAnthropicText).toHaveBeenCalledWith({
|
||||
apiKey: 'mock-anthropic-key',
|
||||
modelId: 'test-main-model',
|
||||
maxTokens: 100,
|
||||
temperature: 0.5,
|
||||
messages: [
|
||||
{ role: 'system', content: 'System' },
|
||||
{ role: 'user', content: 'Test' }
|
||||
]
|
||||
});
|
||||
expect(mockGeneratePerplexityText).not.toHaveBeenCalled();
|
||||
expect(mockAnthropicProvider.generateText).toHaveBeenCalledTimes(1);
|
||||
expect(mockPerplexityProvider.generateText).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should fall back to fallback provider if main fails', async () => {
|
||||
const mainError = new Error('Main provider failed');
|
||||
mockGenerateAnthropicText
|
||||
mockAnthropicProvider.generateText
|
||||
.mockRejectedValueOnce(mainError)
|
||||
.mockResolvedValueOnce({
|
||||
text: 'Fallback provider response',
|
||||
@@ -221,14 +329,8 @@ describe('Unified AI Services', () => {
|
||||
explicitRoot
|
||||
);
|
||||
|
||||
expect(mockResolveEnvVariable).toHaveBeenCalledWith(
|
||||
'ANTHROPIC_API_KEY',
|
||||
undefined,
|
||||
explicitRoot
|
||||
);
|
||||
|
||||
expect(mockGenerateAnthropicText).toHaveBeenCalledTimes(2);
|
||||
expect(mockGeneratePerplexityText).not.toHaveBeenCalled();
|
||||
expect(mockAnthropicProvider.generateText).toHaveBeenCalledTimes(2);
|
||||
expect(mockPerplexityProvider.generateText).not.toHaveBeenCalled();
|
||||
expect(mockLog).toHaveBeenCalledWith(
|
||||
'error',
|
||||
expect.stringContaining('Service call failed for role main')
|
||||
@@ -242,10 +344,10 @@ describe('Unified AI Services', () => {
|
||||
test('should fall back to research provider if main and fallback fail', async () => {
|
||||
const mainError = new Error('Main failed');
|
||||
const fallbackError = new Error('Fallback failed');
|
||||
mockGenerateAnthropicText
|
||||
mockAnthropicProvider.generateText
|
||||
.mockRejectedValueOnce(mainError)
|
||||
.mockRejectedValueOnce(fallbackError);
|
||||
mockGeneratePerplexityText.mockResolvedValue({
|
||||
mockPerplexityProvider.generateText.mockResolvedValue({
|
||||
text: 'Research provider response',
|
||||
usage: { inputTokens: 20, outputTokens: 30, totalTokens: 50 }
|
||||
});
|
||||
@@ -271,24 +373,8 @@ describe('Unified AI Services', () => {
|
||||
fakeProjectRoot
|
||||
);
|
||||
|
||||
expect(mockResolveEnvVariable).toHaveBeenCalledWith(
|
||||
'ANTHROPIC_API_KEY',
|
||||
undefined,
|
||||
fakeProjectRoot
|
||||
);
|
||||
expect(mockResolveEnvVariable).toHaveBeenCalledWith(
|
||||
'ANTHROPIC_API_KEY',
|
||||
undefined,
|
||||
fakeProjectRoot
|
||||
);
|
||||
expect(mockResolveEnvVariable).toHaveBeenCalledWith(
|
||||
'PERPLEXITY_API_KEY',
|
||||
undefined,
|
||||
fakeProjectRoot
|
||||
);
|
||||
|
||||
expect(mockGenerateAnthropicText).toHaveBeenCalledTimes(2);
|
||||
expect(mockGeneratePerplexityText).toHaveBeenCalledTimes(1);
|
||||
expect(mockAnthropicProvider.generateText).toHaveBeenCalledTimes(2);
|
||||
expect(mockPerplexityProvider.generateText).toHaveBeenCalledTimes(1);
|
||||
expect(mockLog).toHaveBeenCalledWith(
|
||||
'error',
|
||||
expect.stringContaining('Service call failed for role fallback')
|
||||
@@ -300,10 +386,10 @@ describe('Unified AI Services', () => {
|
||||
});
|
||||
|
||||
test('should throw error if all providers in sequence fail', async () => {
|
||||
mockGenerateAnthropicText.mockRejectedValue(
|
||||
mockAnthropicProvider.generateText.mockRejectedValue(
|
||||
new Error('Anthropic failed')
|
||||
);
|
||||
mockGeneratePerplexityText.mockRejectedValue(
|
||||
mockPerplexityProvider.generateText.mockRejectedValue(
|
||||
new Error('Perplexity failed')
|
||||
);
|
||||
|
||||
@@ -313,13 +399,13 @@ describe('Unified AI Services', () => {
|
||||
'Perplexity failed' // Error from the last attempt (research)
|
||||
);
|
||||
|
||||
expect(mockGenerateAnthropicText).toHaveBeenCalledTimes(2); // main, fallback
|
||||
expect(mockGeneratePerplexityText).toHaveBeenCalledTimes(1); // research
|
||||
expect(mockAnthropicProvider.generateText).toHaveBeenCalledTimes(2); // main, fallback
|
||||
expect(mockPerplexityProvider.generateText).toHaveBeenCalledTimes(1); // research
|
||||
});
|
||||
|
||||
test('should handle retryable errors correctly', async () => {
|
||||
const retryableError = new Error('Rate limit');
|
||||
mockGenerateAnthropicText
|
||||
mockAnthropicProvider.generateText
|
||||
.mockRejectedValueOnce(retryableError) // Fails once
|
||||
.mockResolvedValueOnce({
|
||||
// Succeeds on retry
|
||||
@@ -332,7 +418,7 @@ describe('Unified AI Services', () => {
|
||||
|
||||
expect(result.mainResult).toBe('Success after retry');
|
||||
expect(result).toHaveProperty('telemetryData');
|
||||
expect(mockGenerateAnthropicText).toHaveBeenCalledTimes(2); // Initial + 1 retry
|
||||
expect(mockAnthropicProvider.generateText).toHaveBeenCalledTimes(2); // Initial + 1 retry
|
||||
expect(mockLog).toHaveBeenCalledWith(
|
||||
'info',
|
||||
expect.stringContaining(
|
||||
@@ -343,7 +429,7 @@ describe('Unified AI Services', () => {
|
||||
|
||||
test('should use default project root or handle null if findProjectRoot returns null', async () => {
|
||||
mockFindProjectRoot.mockReturnValue(null); // Simulate not finding root
|
||||
mockGenerateAnthropicText.mockResolvedValue({
|
||||
mockAnthropicProvider.generateText.mockResolvedValue({
|
||||
text: 'Response with no root',
|
||||
usage: { inputTokens: 1, outputTokens: 1, totalTokens: 2 }
|
||||
});
|
||||
@@ -353,22 +439,9 @@ describe('Unified AI Services', () => {
|
||||
|
||||
expect(mockGetMainProvider).toHaveBeenCalledWith(null);
|
||||
expect(mockGetParametersForRole).toHaveBeenCalledWith('main', null);
|
||||
expect(mockResolveEnvVariable).toHaveBeenCalledWith(
|
||||
'ANTHROPIC_API_KEY',
|
||||
undefined,
|
||||
null
|
||||
);
|
||||
expect(mockGenerateAnthropicText).toHaveBeenCalledTimes(1);
|
||||
expect(mockAnthropicProvider.generateText).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
// New tests for API key checking and fallback sequence
|
||||
// These tests verify that:
|
||||
// 1. The system checks if API keys are set before trying to use a provider
|
||||
// 2. If a provider's API key is missing, it skips to the next provider in the fallback sequence
|
||||
// 3. The system throws an appropriate error if all providers' API keys are missing
|
||||
// 4. Ollama is a special case where API key is optional and not checked
|
||||
// 5. Session context is correctly used for API key checks
|
||||
|
||||
test('should skip provider with missing API key and try next in fallback sequence', async () => {
|
||||
// Setup isApiKeySet to return false for anthropic but true for perplexity
|
||||
mockIsApiKeySet.mockImplementation((provider, session, root) => {
|
||||
@@ -377,7 +450,7 @@ describe('Unified AI Services', () => {
|
||||
});
|
||||
|
||||
// Mock perplexity text response (since we'll skip anthropic)
|
||||
mockGeneratePerplexityText.mockResolvedValue({
|
||||
mockPerplexityProvider.generateText.mockResolvedValue({
|
||||
text: 'Perplexity response (skipped to research)',
|
||||
usage: { inputTokens: 20, outputTokens: 30, totalTokens: 50 }
|
||||
});
|
||||
@@ -416,10 +489,10 @@ describe('Unified AI Services', () => {
|
||||
);
|
||||
|
||||
// Should NOT call anthropic provider
|
||||
expect(mockGenerateAnthropicText).not.toHaveBeenCalled();
|
||||
expect(mockAnthropicProvider.generateText).not.toHaveBeenCalled();
|
||||
|
||||
// Should call perplexity provider
|
||||
expect(mockGeneratePerplexityText).toHaveBeenCalledTimes(1);
|
||||
expect(mockPerplexityProvider.generateText).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
test('should skip multiple providers with missing API keys and use first available', async () => {
|
||||
@@ -442,7 +515,7 @@ describe('Unified AI Services', () => {
|
||||
});
|
||||
|
||||
// Mock perplexity text response (since we'll skip to research)
|
||||
mockGeneratePerplexityText.mockResolvedValue({
|
||||
mockPerplexityProvider.generateText.mockResolvedValue({
|
||||
text: 'Research response after skipping main and fallback',
|
||||
usage: { inputTokens: 20, outputTokens: 30, totalTokens: 50 }
|
||||
});
|
||||
@@ -492,11 +565,11 @@ describe('Unified AI Services', () => {
|
||||
);
|
||||
|
||||
// Should NOT call skipped providers
|
||||
expect(mockGenerateAnthropicText).not.toHaveBeenCalled();
|
||||
expect(mockGenerateOpenAIText).not.toHaveBeenCalled();
|
||||
expect(mockAnthropicProvider.generateText).not.toHaveBeenCalled();
|
||||
expect(mockOpenAIProvider.generateText).not.toHaveBeenCalled();
|
||||
|
||||
// Should call perplexity provider
|
||||
expect(mockGeneratePerplexityText).toHaveBeenCalledTimes(1);
|
||||
expect(mockPerplexityProvider.generateText).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
test('should throw error if all providers in sequence have missing API keys', async () => {
|
||||
@@ -543,8 +616,8 @@ describe('Unified AI Services', () => {
|
||||
);
|
||||
|
||||
// Should NOT call any providers
|
||||
expect(mockGenerateAnthropicText).not.toHaveBeenCalled();
|
||||
expect(mockGeneratePerplexityText).not.toHaveBeenCalled();
|
||||
expect(mockAnthropicProvider.generateText).not.toHaveBeenCalled();
|
||||
expect(mockPerplexityProvider.generateText).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should not check API key for Ollama provider and try to use it', async () => {
|
||||
@@ -553,7 +626,7 @@ describe('Unified AI Services', () => {
|
||||
mockGetMainModelId.mockReturnValue('llama3');
|
||||
|
||||
// Mock Ollama text generation to succeed
|
||||
mockGenerateOllamaText.mockResolvedValue({
|
||||
mockOllamaProvider.generateText.mockResolvedValue({
|
||||
text: 'Ollama response (no API key required)',
|
||||
usage: { inputTokens: 10, outputTokens: 10, totalTokens: 20 }
|
||||
});
|
||||
@@ -575,7 +648,7 @@ describe('Unified AI Services', () => {
|
||||
mockIsApiKeySet.mockReturnValue(false); // Should be ignored for Ollama
|
||||
|
||||
// Should call Ollama provider
|
||||
expect(mockGenerateOllamaText).toHaveBeenCalledTimes(1);
|
||||
expect(mockOllamaProvider.generateText).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
test('should correctly use the provided session for API key check', async () => {
|
||||
@@ -589,7 +662,7 @@ describe('Unified AI Services', () => {
|
||||
});
|
||||
|
||||
// Mock the anthropic response
|
||||
mockGenerateAnthropicText.mockResolvedValue({
|
||||
mockAnthropicProvider.generateText.mockResolvedValue({
|
||||
text: 'Anthropic response with session key',
|
||||
usage: { inputTokens: 10, outputTokens: 10, totalTokens: 20 }
|
||||
});
|
||||
|
||||
@@ -85,7 +85,7 @@ const DEFAULT_CONFIG = {
|
||||
defaultSubtasks: 5,
|
||||
defaultPriority: 'medium',
|
||||
projectName: 'Task Master',
|
||||
ollamaBaseUrl: 'http://localhost:11434/api'
|
||||
ollamaBaseURL: 'http://localhost:11434/api'
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -85,7 +85,7 @@ const DEFAULT_CONFIG = {
|
||||
defaultSubtasks: 5,
|
||||
defaultPriority: 'medium',
|
||||
projectName: 'Task Master',
|
||||
ollamaBaseUrl: 'http://localhost:11434/api'
|
||||
ollamaBaseURL: 'http://localhost:11434/api'
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
309
tests/unit/scripts/modules/task-manager/add-subtask.test.js
Normal file
309
tests/unit/scripts/modules/task-manager/add-subtask.test.js
Normal file
@@ -0,0 +1,309 @@
|
||||
/**
|
||||
* Tests for the addSubtask function
|
||||
*/
|
||||
import { jest } from '@jest/globals';
|
||||
import path from 'path';
|
||||
|
||||
// Mock dependencies
|
||||
const mockReadJSON = jest.fn();
|
||||
const mockWriteJSON = jest.fn();
|
||||
const mockGenerateTaskFiles = jest.fn();
|
||||
const mockIsTaskDependentOn = jest.fn().mockReturnValue(false);
|
||||
|
||||
// Mock path module
|
||||
jest.mock('path', () => ({
|
||||
dirname: jest.fn()
|
||||
}));
|
||||
|
||||
// Define test version of the addSubtask function
|
||||
const testAddSubtask = (
|
||||
tasksPath,
|
||||
parentId,
|
||||
existingTaskId,
|
||||
newSubtaskData,
|
||||
generateFiles = true
|
||||
) => {
|
||||
// Read the existing tasks
|
||||
const data = mockReadJSON(tasksPath);
|
||||
if (!data || !data.tasks) {
|
||||
throw new Error(`Invalid or missing tasks file at ${tasksPath}`);
|
||||
}
|
||||
|
||||
// Convert parent ID to number
|
||||
const parentIdNum = parseInt(parentId, 10);
|
||||
|
||||
// Find the parent task
|
||||
const parentTask = data.tasks.find((t) => t.id === parentIdNum);
|
||||
if (!parentTask) {
|
||||
throw new Error(`Parent task with ID ${parentIdNum} not found`);
|
||||
}
|
||||
|
||||
// Initialize subtasks array if it doesn't exist
|
||||
if (!parentTask.subtasks) {
|
||||
parentTask.subtasks = [];
|
||||
}
|
||||
|
||||
let newSubtask;
|
||||
|
||||
// Case 1: Convert an existing task to a subtask
|
||||
if (existingTaskId !== null) {
|
||||
const existingTaskIdNum = parseInt(existingTaskId, 10);
|
||||
|
||||
// Find the existing task
|
||||
const existingTaskIndex = data.tasks.findIndex(
|
||||
(t) => t.id === existingTaskIdNum
|
||||
);
|
||||
if (existingTaskIndex === -1) {
|
||||
throw new Error(`Task with ID ${existingTaskIdNum} not found`);
|
||||
}
|
||||
|
||||
const existingTask = data.tasks[existingTaskIndex];
|
||||
|
||||
// Check if task is already a subtask
|
||||
if (existingTask.parentTaskId) {
|
||||
throw new Error(
|
||||
`Task ${existingTaskIdNum} is already a subtask of task ${existingTask.parentTaskId}`
|
||||
);
|
||||
}
|
||||
|
||||
// Check for circular dependency
|
||||
if (existingTaskIdNum === parentIdNum) {
|
||||
throw new Error(`Cannot make a task a subtask of itself`);
|
||||
}
|
||||
|
||||
// Check for circular dependency using mockIsTaskDependentOn
|
||||
if (mockIsTaskDependentOn()) {
|
||||
throw new Error(
|
||||
`Cannot create circular dependency: task ${parentIdNum} is already a subtask or dependent of task ${existingTaskIdNum}`
|
||||
);
|
||||
}
|
||||
|
||||
// Find the highest subtask ID to determine the next ID
|
||||
const highestSubtaskId =
|
||||
parentTask.subtasks.length > 0
|
||||
? Math.max(...parentTask.subtasks.map((st) => st.id))
|
||||
: 0;
|
||||
const newSubtaskId = highestSubtaskId + 1;
|
||||
|
||||
// Clone the existing task to be converted to a subtask
|
||||
newSubtask = {
|
||||
...existingTask,
|
||||
id: newSubtaskId,
|
||||
parentTaskId: parentIdNum
|
||||
};
|
||||
|
||||
// Add to parent's subtasks
|
||||
parentTask.subtasks.push(newSubtask);
|
||||
|
||||
// Remove the task from the main tasks array
|
||||
data.tasks.splice(existingTaskIndex, 1);
|
||||
}
|
||||
// Case 2: Create a new subtask
|
||||
else if (newSubtaskData) {
|
||||
// Find the highest subtask ID to determine the next ID
|
||||
const highestSubtaskId =
|
||||
parentTask.subtasks.length > 0
|
||||
? Math.max(...parentTask.subtasks.map((st) => st.id))
|
||||
: 0;
|
||||
const newSubtaskId = highestSubtaskId + 1;
|
||||
|
||||
// Create the new subtask object
|
||||
newSubtask = {
|
||||
id: newSubtaskId,
|
||||
title: newSubtaskData.title,
|
||||
description: newSubtaskData.description || '',
|
||||
details: newSubtaskData.details || '',
|
||||
status: newSubtaskData.status || 'pending',
|
||||
dependencies: newSubtaskData.dependencies || [],
|
||||
parentTaskId: parentIdNum
|
||||
};
|
||||
|
||||
// Add to parent's subtasks
|
||||
parentTask.subtasks.push(newSubtask);
|
||||
} else {
|
||||
throw new Error('Either existingTaskId or newSubtaskData must be provided');
|
||||
}
|
||||
|
||||
// Write the updated tasks back to the file
|
||||
mockWriteJSON(tasksPath, data);
|
||||
|
||||
// Generate task files if requested
|
||||
if (generateFiles) {
|
||||
mockGenerateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||
}
|
||||
|
||||
return newSubtask;
|
||||
};
|
||||
|
||||
describe('addSubtask function', () => {
|
||||
// Reset mocks before each test
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
|
||||
// Default mock implementations
|
||||
mockReadJSON.mockImplementation(() => ({
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Parent Task',
|
||||
description: 'This is a parent task',
|
||||
status: 'pending',
|
||||
dependencies: []
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Existing Task',
|
||||
description: 'This is an existing task',
|
||||
status: 'pending',
|
||||
dependencies: []
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
title: 'Another Task',
|
||||
description: 'This is another task',
|
||||
status: 'pending',
|
||||
dependencies: [1]
|
||||
}
|
||||
]
|
||||
}));
|
||||
|
||||
// Setup success write response
|
||||
mockWriteJSON.mockImplementation((path, data) => {
|
||||
return data;
|
||||
});
|
||||
|
||||
// Set up default behavior for dependency check
|
||||
mockIsTaskDependentOn.mockReturnValue(false);
|
||||
});
|
||||
|
||||
test('should add a new subtask to a parent task', async () => {
|
||||
// Create new subtask data
|
||||
const newSubtaskData = {
|
||||
title: 'New Subtask',
|
||||
description: 'This is a new subtask',
|
||||
details: 'Implementation details for the subtask',
|
||||
status: 'pending',
|
||||
dependencies: []
|
||||
};
|
||||
|
||||
// Execute the test version of addSubtask
|
||||
const newSubtask = testAddSubtask(
|
||||
'tasks/tasks.json',
|
||||
1,
|
||||
null,
|
||||
newSubtaskData,
|
||||
true
|
||||
);
|
||||
|
||||
// Verify readJSON was called with the correct path
|
||||
expect(mockReadJSON).toHaveBeenCalledWith('tasks/tasks.json');
|
||||
|
||||
// Verify writeJSON was called with the correct path
|
||||
expect(mockWriteJSON).toHaveBeenCalledWith(
|
||||
'tasks/tasks.json',
|
||||
expect.any(Object)
|
||||
);
|
||||
|
||||
// Verify the subtask was created with correct data
|
||||
expect(newSubtask).toBeDefined();
|
||||
expect(newSubtask.id).toBe(1);
|
||||
expect(newSubtask.title).toBe('New Subtask');
|
||||
expect(newSubtask.parentTaskId).toBe(1);
|
||||
|
||||
// Verify generateTaskFiles was called
|
||||
expect(mockGenerateTaskFiles).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should convert an existing task to a subtask', async () => {
|
||||
// Execute the test version of addSubtask to convert task 2 to a subtask of task 1
|
||||
const convertedSubtask = testAddSubtask(
|
||||
'tasks/tasks.json',
|
||||
1,
|
||||
2,
|
||||
null,
|
||||
true
|
||||
);
|
||||
|
||||
// Verify readJSON was called with the correct path
|
||||
expect(mockReadJSON).toHaveBeenCalledWith('tasks/tasks.json');
|
||||
|
||||
// Verify writeJSON was called
|
||||
expect(mockWriteJSON).toHaveBeenCalled();
|
||||
|
||||
// Verify the subtask was created with correct data
|
||||
expect(convertedSubtask).toBeDefined();
|
||||
expect(convertedSubtask.id).toBe(1);
|
||||
expect(convertedSubtask.title).toBe('Existing Task');
|
||||
expect(convertedSubtask.parentTaskId).toBe(1);
|
||||
|
||||
// Verify generateTaskFiles was called
|
||||
expect(mockGenerateTaskFiles).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should throw an error if parent task does not exist', async () => {
|
||||
// Create new subtask data
|
||||
const newSubtaskData = {
|
||||
title: 'New Subtask',
|
||||
description: 'This is a new subtask'
|
||||
};
|
||||
|
||||
// Override mockReadJSON for this specific test case
|
||||
mockReadJSON.mockImplementationOnce(() => ({
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Task 1',
|
||||
status: 'pending'
|
||||
}
|
||||
]
|
||||
}));
|
||||
|
||||
// Expect an error when trying to add a subtask to a non-existent parent
|
||||
expect(() =>
|
||||
testAddSubtask('tasks/tasks.json', 999, null, newSubtaskData)
|
||||
).toThrow(/Parent task with ID 999 not found/);
|
||||
|
||||
// Verify writeJSON was not called
|
||||
expect(mockWriteJSON).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should throw an error if existing task does not exist', async () => {
|
||||
// Expect an error when trying to convert a non-existent task
|
||||
expect(() => testAddSubtask('tasks/tasks.json', 1, 999, null)).toThrow(
|
||||
/Task with ID 999 not found/
|
||||
);
|
||||
|
||||
// Verify writeJSON was not called
|
||||
expect(mockWriteJSON).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should throw an error if trying to create a circular dependency', async () => {
|
||||
// Force the isTaskDependentOn mock to return true for this test only
|
||||
mockIsTaskDependentOn.mockReturnValueOnce(true);
|
||||
|
||||
// Expect an error when trying to create a circular dependency
|
||||
expect(() => testAddSubtask('tasks/tasks.json', 3, 1, null)).toThrow(
|
||||
/circular dependency/
|
||||
);
|
||||
|
||||
// Verify writeJSON was not called
|
||||
expect(mockWriteJSON).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should not regenerate task files if generateFiles is false', async () => {
|
||||
// Create new subtask data
|
||||
const newSubtaskData = {
|
||||
title: 'New Subtask',
|
||||
description: 'This is a new subtask'
|
||||
};
|
||||
|
||||
// Execute the test version of addSubtask with generateFiles = false
|
||||
testAddSubtask('tasks/tasks.json', 1, null, newSubtaskData, false);
|
||||
|
||||
// Verify writeJSON was called
|
||||
expect(mockWriteJSON).toHaveBeenCalled();
|
||||
|
||||
// Verify task files were not regenerated
|
||||
expect(mockGenerateTaskFiles).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
400
tests/unit/scripts/modules/task-manager/add-task.test.js
Normal file
400
tests/unit/scripts/modules/task-manager/add-task.test.js
Normal file
@@ -0,0 +1,400 @@
|
||||
/**
|
||||
* Tests for the add-task.js module
|
||||
*/
|
||||
import { jest } from '@jest/globals';
|
||||
|
||||
// Mock the dependencies before importing the module under test
|
||||
jest.unstable_mockModule('../../../../../scripts/modules/utils.js', () => ({
|
||||
readJSON: jest.fn(),
|
||||
writeJSON: jest.fn(),
|
||||
log: jest.fn(),
|
||||
CONFIG: {
|
||||
model: 'mock-claude-model',
|
||||
maxTokens: 4000,
|
||||
temperature: 0.7,
|
||||
debug: false
|
||||
},
|
||||
truncate: jest.fn((text) => text)
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule('../../../../../scripts/modules/ui.js', () => ({
|
||||
displayBanner: jest.fn(),
|
||||
getStatusWithColor: jest.fn((status) => status),
|
||||
startLoadingIndicator: jest.fn(),
|
||||
stopLoadingIndicator: jest.fn(),
|
||||
displayAiUsageSummary: jest.fn()
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/ai-services-unified.js',
|
||||
() => ({
|
||||
generateObjectService: jest.fn().mockResolvedValue({
|
||||
mainResult: {
|
||||
object: {
|
||||
title: 'Task from prompt: Create a new authentication system',
|
||||
description:
|
||||
'Task generated from: Create a new authentication system',
|
||||
details:
|
||||
'Implementation details for task generated from prompt: Create a new authentication system',
|
||||
testStrategy: 'Write unit tests to verify functionality',
|
||||
dependencies: []
|
||||
}
|
||||
},
|
||||
telemetryData: {
|
||||
timestamp: new Date().toISOString(),
|
||||
userId: '1234567890',
|
||||
commandName: 'add-task',
|
||||
modelUsed: 'claude-3-5-sonnet',
|
||||
providerName: 'anthropic',
|
||||
inputTokens: 1000,
|
||||
outputTokens: 500,
|
||||
totalTokens: 1500,
|
||||
totalCost: 0.012414,
|
||||
currency: 'USD'
|
||||
}
|
||||
})
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/config-manager.js',
|
||||
() => ({
|
||||
getDefaultPriority: jest.fn(() => 'medium')
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/task-manager/generate-task-files.js',
|
||||
() => ({
|
||||
default: jest.fn().mockResolvedValue()
|
||||
})
|
||||
);
|
||||
|
||||
// Mock external UI libraries
|
||||
jest.unstable_mockModule('chalk', () => ({
|
||||
default: {
|
||||
white: { bold: jest.fn((text) => text) },
|
||||
cyan: Object.assign(
|
||||
jest.fn((text) => text),
|
||||
{
|
||||
bold: jest.fn((text) => text)
|
||||
}
|
||||
),
|
||||
green: jest.fn((text) => text),
|
||||
yellow: jest.fn((text) => text),
|
||||
bold: jest.fn((text) => text)
|
||||
}
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule('boxen', () => ({
|
||||
default: jest.fn((text) => text)
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule('cli-table3', () => ({
|
||||
default: jest.fn().mockImplementation(() => ({
|
||||
push: jest.fn(),
|
||||
toString: jest.fn(() => 'mocked table')
|
||||
}))
|
||||
}));
|
||||
|
||||
// Import the mocked modules
|
||||
const { readJSON, writeJSON, log } = await import(
|
||||
'../../../../../scripts/modules/utils.js'
|
||||
);
|
||||
|
||||
const { generateObjectService } = await import(
|
||||
'../../../../../scripts/modules/ai-services-unified.js'
|
||||
);
|
||||
|
||||
const generateTaskFiles = await import(
|
||||
'../../../../../scripts/modules/task-manager/generate-task-files.js'
|
||||
);
|
||||
|
||||
// Import the module under test
|
||||
const { default: addTask } = await import(
|
||||
'../../../../../scripts/modules/task-manager/add-task.js'
|
||||
);
|
||||
|
||||
describe('addTask', () => {
|
||||
const sampleTasks = {
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Task 1',
|
||||
description: 'First task',
|
||||
status: 'pending',
|
||||
dependencies: []
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Task 2',
|
||||
description: 'Second task',
|
||||
status: 'pending',
|
||||
dependencies: []
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
title: 'Task 3',
|
||||
description: 'Third task',
|
||||
status: 'pending',
|
||||
dependencies: [1]
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
// Create a helper function for consistent mcpLog mock
|
||||
const createMcpLogMock = () => ({
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
error: jest.fn(),
|
||||
debug: jest.fn(),
|
||||
success: jest.fn()
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
readJSON.mockReturnValue(JSON.parse(JSON.stringify(sampleTasks)));
|
||||
|
||||
// Mock console.log to avoid output during tests
|
||||
jest.spyOn(console, 'log').mockImplementation(() => {});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
console.log.mockRestore();
|
||||
});
|
||||
|
||||
test('should add a new task using AI', async () => {
|
||||
// Arrange
|
||||
const prompt = 'Create a new authentication system';
|
||||
const context = {
|
||||
mcpLog: createMcpLogMock()
|
||||
};
|
||||
|
||||
// Act
|
||||
const result = await addTask(
|
||||
'tasks/tasks.json',
|
||||
prompt,
|
||||
[],
|
||||
'medium',
|
||||
context,
|
||||
'json'
|
||||
);
|
||||
|
||||
// Assert
|
||||
expect(readJSON).toHaveBeenCalledWith('tasks/tasks.json');
|
||||
expect(generateObjectService).toHaveBeenCalledWith(expect.any(Object));
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
'tasks/tasks.json',
|
||||
expect.objectContaining({
|
||||
tasks: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
id: 4, // Next ID after existing tasks
|
||||
title: expect.stringContaining(
|
||||
'Create a new authentication system'
|
||||
),
|
||||
status: 'pending'
|
||||
})
|
||||
])
|
||||
})
|
||||
);
|
||||
expect(generateTaskFiles.default).toHaveBeenCalled();
|
||||
expect(result).toEqual(
|
||||
expect.objectContaining({
|
||||
newTaskId: 4,
|
||||
telemetryData: expect.any(Object)
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
test('should validate dependencies when adding a task', async () => {
|
||||
// Arrange
|
||||
const prompt = 'Create a new authentication system';
|
||||
const validDependencies = [1, 2]; // These exist in sampleTasks
|
||||
const context = {
|
||||
mcpLog: createMcpLogMock()
|
||||
};
|
||||
|
||||
// Act
|
||||
const result = await addTask(
|
||||
'tasks/tasks.json',
|
||||
prompt,
|
||||
validDependencies,
|
||||
'medium',
|
||||
context,
|
||||
'json'
|
||||
);
|
||||
|
||||
// Assert
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
'tasks/tasks.json',
|
||||
expect.objectContaining({
|
||||
tasks: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
id: 4,
|
||||
dependencies: validDependencies
|
||||
})
|
||||
])
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
test('should filter out invalid dependencies', async () => {
|
||||
// Arrange
|
||||
const prompt = 'Create a new authentication system';
|
||||
const invalidDependencies = [999]; // Non-existent task ID
|
||||
const context = { mcpLog: createMcpLogMock() };
|
||||
|
||||
// Act
|
||||
const result = await addTask(
|
||||
'tasks/tasks.json',
|
||||
prompt,
|
||||
invalidDependencies,
|
||||
'medium',
|
||||
context,
|
||||
'json'
|
||||
);
|
||||
|
||||
// Assert
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
'tasks/tasks.json',
|
||||
expect.objectContaining({
|
||||
tasks: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
id: 4,
|
||||
dependencies: [] // Invalid dependencies should be filtered out
|
||||
})
|
||||
])
|
||||
})
|
||||
);
|
||||
expect(context.mcpLog.warn).toHaveBeenCalledWith(
|
||||
expect.stringContaining(
|
||||
'The following dependencies do not exist or are invalid: 999'
|
||||
)
|
||||
);
|
||||
});
|
||||
|
||||
test('should use specified priority', async () => {
|
||||
// Arrange
|
||||
const prompt = 'Create a new authentication system';
|
||||
const priority = 'high';
|
||||
const context = {
|
||||
mcpLog: createMcpLogMock()
|
||||
};
|
||||
|
||||
// Act
|
||||
await addTask('tasks/tasks.json', prompt, [], priority, context, 'json');
|
||||
|
||||
// Assert
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
'tasks/tasks.json',
|
||||
expect.objectContaining({
|
||||
tasks: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
priority: priority
|
||||
})
|
||||
])
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
test('should handle empty tasks file', async () => {
|
||||
// Arrange
|
||||
readJSON.mockReturnValue({ tasks: [] });
|
||||
const prompt = 'Create a new authentication system';
|
||||
const context = {
|
||||
mcpLog: createMcpLogMock()
|
||||
};
|
||||
|
||||
// Act
|
||||
const result = await addTask(
|
||||
'tasks/tasks.json',
|
||||
prompt,
|
||||
[],
|
||||
'medium',
|
||||
context,
|
||||
'json'
|
||||
);
|
||||
|
||||
// Assert
|
||||
expect(result.newTaskId).toBe(1); // First task should have ID 1
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
'tasks/tasks.json',
|
||||
expect.objectContaining({
|
||||
tasks: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
id: 1
|
||||
})
|
||||
])
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
test('should handle missing tasks file', async () => {
|
||||
// Arrange
|
||||
readJSON.mockReturnValue(null);
|
||||
const prompt = 'Create a new authentication system';
|
||||
const context = {
|
||||
mcpLog: createMcpLogMock()
|
||||
};
|
||||
|
||||
// Act
|
||||
const result = await addTask(
|
||||
'tasks/tasks.json',
|
||||
prompt,
|
||||
[],
|
||||
'medium',
|
||||
context,
|
||||
'json'
|
||||
);
|
||||
|
||||
// Assert
|
||||
expect(result.newTaskId).toBe(1); // First task should have ID 1
|
||||
expect(writeJSON).toHaveBeenCalledTimes(2); // Once to create file, once to add task
|
||||
});
|
||||
|
||||
test('should handle AI service errors', async () => {
|
||||
// Arrange
|
||||
generateObjectService.mockRejectedValueOnce(new Error('AI service failed'));
|
||||
const prompt = 'Create a new authentication system';
|
||||
const context = {
|
||||
mcpLog: createMcpLogMock()
|
||||
};
|
||||
|
||||
// Act & Assert
|
||||
await expect(
|
||||
addTask('tasks/tasks.json', prompt, [], 'medium', context, 'json')
|
||||
).rejects.toThrow('AI service failed');
|
||||
});
|
||||
|
||||
test('should handle file read errors', async () => {
|
||||
// Arrange
|
||||
readJSON.mockImplementation(() => {
|
||||
throw new Error('File read failed');
|
||||
});
|
||||
const prompt = 'Create a new authentication system';
|
||||
const context = {
|
||||
mcpLog: createMcpLogMock()
|
||||
};
|
||||
|
||||
// Act & Assert
|
||||
await expect(
|
||||
addTask('tasks/tasks.json', prompt, [], 'medium', context, 'json')
|
||||
).rejects.toThrow('File read failed');
|
||||
});
|
||||
|
||||
test('should handle file write errors', async () => {
|
||||
// Arrange
|
||||
writeJSON.mockImplementation(() => {
|
||||
throw new Error('File write failed');
|
||||
});
|
||||
const prompt = 'Create a new authentication system';
|
||||
const context = {
|
||||
mcpLog: createMcpLogMock()
|
||||
};
|
||||
|
||||
// Act & Assert
|
||||
await expect(
|
||||
addTask('tasks/tasks.json', prompt, [], 'medium', context, 'json')
|
||||
).rejects.toThrow('File write failed');
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,402 @@
|
||||
/**
|
||||
* Tests for the analyze-task-complexity.js module
|
||||
*/
|
||||
import { jest } from '@jest/globals';
|
||||
|
||||
// Mock the dependencies before importing the module under test
|
||||
jest.unstable_mockModule('../../../../../scripts/modules/utils.js', () => ({
|
||||
readJSON: jest.fn(),
|
||||
writeJSON: jest.fn(),
|
||||
log: jest.fn(),
|
||||
CONFIG: {
|
||||
model: 'mock-claude-model',
|
||||
maxTokens: 4000,
|
||||
temperature: 0.7,
|
||||
debug: false,
|
||||
defaultSubtasks: 3
|
||||
},
|
||||
findTaskById: jest.fn(),
|
||||
readComplexityReport: jest.fn(),
|
||||
findTaskInComplexityReport: jest.fn(),
|
||||
findProjectRoot: jest.fn(() => '/mock/project/root'),
|
||||
resolveEnvVariable: jest.fn((varName) => `mock_${varName}`),
|
||||
isSilentMode: jest.fn(() => false),
|
||||
findCycles: jest.fn(() => []),
|
||||
formatTaskId: jest.fn((id) => `Task ${id}`),
|
||||
taskExists: jest.fn((tasks, id) => tasks.some((t) => t.id === id)),
|
||||
enableSilentMode: jest.fn(),
|
||||
disableSilentMode: jest.fn(),
|
||||
truncate: jest.fn((text) => text),
|
||||
addComplexityToTask: jest.fn((task, complexity) => ({ ...task, complexity })),
|
||||
aggregateTelemetry: jest.fn((telemetryArray) => telemetryArray[0] || {})
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/ai-services-unified.js',
|
||||
() => ({
|
||||
generateObjectService: jest.fn().mockResolvedValue({
|
||||
mainResult: {
|
||||
tasks: []
|
||||
},
|
||||
telemetryData: {
|
||||
timestamp: new Date().toISOString(),
|
||||
userId: '1234567890',
|
||||
commandName: 'analyze-complexity',
|
||||
modelUsed: 'claude-3-5-sonnet',
|
||||
providerName: 'anthropic',
|
||||
inputTokens: 1000,
|
||||
outputTokens: 500,
|
||||
totalTokens: 1500,
|
||||
totalCost: 0.012414,
|
||||
currency: 'USD'
|
||||
}
|
||||
}),
|
||||
generateTextService: jest.fn().mockResolvedValue({
|
||||
mainResult: '[]',
|
||||
telemetryData: {
|
||||
timestamp: new Date().toISOString(),
|
||||
userId: '1234567890',
|
||||
commandName: 'analyze-complexity',
|
||||
modelUsed: 'claude-3-5-sonnet',
|
||||
providerName: 'anthropic',
|
||||
inputTokens: 1000,
|
||||
outputTokens: 500,
|
||||
totalTokens: 1500,
|
||||
totalCost: 0.012414,
|
||||
currency: 'USD'
|
||||
}
|
||||
})
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/config-manager.js',
|
||||
() => ({
|
||||
// Core config access
|
||||
getConfig: jest.fn(() => ({
|
||||
models: { main: { provider: 'anthropic', modelId: 'claude-3-5-sonnet' } },
|
||||
global: { projectName: 'Test Project' }
|
||||
})),
|
||||
writeConfig: jest.fn(() => true),
|
||||
ConfigurationError: class extends Error {},
|
||||
isConfigFilePresent: jest.fn(() => true),
|
||||
|
||||
// Validation
|
||||
validateProvider: jest.fn(() => true),
|
||||
validateProviderModelCombination: jest.fn(() => true),
|
||||
VALID_PROVIDERS: ['anthropic', 'openai', 'perplexity'],
|
||||
MODEL_MAP: {
|
||||
anthropic: [
|
||||
{
|
||||
id: 'claude-3-5-sonnet',
|
||||
cost_per_1m_tokens: { input: 3, output: 15 }
|
||||
}
|
||||
],
|
||||
openai: [{ id: 'gpt-4', cost_per_1m_tokens: { input: 30, output: 60 } }]
|
||||
},
|
||||
getAvailableModels: jest.fn(() => [
|
||||
{
|
||||
id: 'claude-3-5-sonnet',
|
||||
name: 'Claude 3.5 Sonnet',
|
||||
provider: 'anthropic'
|
||||
},
|
||||
{ id: 'gpt-4', name: 'GPT-4', provider: 'openai' }
|
||||
]),
|
||||
|
||||
// Role-specific getters
|
||||
getMainProvider: jest.fn(() => 'anthropic'),
|
||||
getMainModelId: jest.fn(() => 'claude-3-5-sonnet'),
|
||||
getMainMaxTokens: jest.fn(() => 4000),
|
||||
getMainTemperature: jest.fn(() => 0.7),
|
||||
getResearchProvider: jest.fn(() => 'perplexity'),
|
||||
getResearchModelId: jest.fn(() => 'sonar-pro'),
|
||||
getResearchMaxTokens: jest.fn(() => 8700),
|
||||
getResearchTemperature: jest.fn(() => 0.1),
|
||||
getFallbackProvider: jest.fn(() => 'anthropic'),
|
||||
getFallbackModelId: jest.fn(() => 'claude-3-5-sonnet'),
|
||||
getFallbackMaxTokens: jest.fn(() => 4000),
|
||||
getFallbackTemperature: jest.fn(() => 0.7),
|
||||
getBaseUrlForRole: jest.fn(() => undefined),
|
||||
|
||||
// Global setting getters
|
||||
getLogLevel: jest.fn(() => 'info'),
|
||||
getDebugFlag: jest.fn(() => false),
|
||||
getDefaultNumTasks: jest.fn(() => 10),
|
||||
getDefaultSubtasks: jest.fn(() => 5),
|
||||
getDefaultPriority: jest.fn(() => 'medium'),
|
||||
getProjectName: jest.fn(() => 'Test Project'),
|
||||
getOllamaBaseURL: jest.fn(() => 'http://localhost:11434/api'),
|
||||
getAzureBaseURL: jest.fn(() => undefined),
|
||||
getParametersForRole: jest.fn(() => ({
|
||||
maxTokens: 4000,
|
||||
temperature: 0.7
|
||||
})),
|
||||
getUserId: jest.fn(() => '1234567890'),
|
||||
|
||||
// API Key Checkers
|
||||
isApiKeySet: jest.fn(() => true),
|
||||
getMcpApiKeyStatus: jest.fn(() => true),
|
||||
|
||||
// Additional functions
|
||||
getAllProviders: jest.fn(() => ['anthropic', 'openai', 'perplexity']),
|
||||
getVertexProjectId: jest.fn(() => undefined),
|
||||
getVertexLocation: jest.fn(() => undefined)
|
||||
})
|
||||
);
|
||||
|
||||
// Import the mocked modules
|
||||
const { readJSON, writeJSON, log, CONFIG } = await import(
|
||||
'../../../../../scripts/modules/utils.js'
|
||||
);
|
||||
|
||||
const { generateObjectService, generateTextService } = await import(
|
||||
'../../../../../scripts/modules/ai-services-unified.js'
|
||||
);
|
||||
|
||||
// Import the module under test
|
||||
const { default: analyzeTaskComplexity } = await import(
|
||||
'../../../../../scripts/modules/task-manager/analyze-task-complexity.js'
|
||||
);
|
||||
|
||||
describe('analyzeTaskComplexity', () => {
|
||||
// Sample response structure (simplified for these tests)
|
||||
const sampleApiResponse = {
|
||||
mainResult: JSON.stringify({
|
||||
tasks: [
|
||||
{ id: 1, complexity: 3, subtaskCount: 2 },
|
||||
{ id: 2, complexity: 7, subtaskCount: 5 },
|
||||
{ id: 3, complexity: 9, subtaskCount: 8 }
|
||||
]
|
||||
}),
|
||||
telemetryData: {
|
||||
timestamp: new Date().toISOString(),
|
||||
userId: '1234567890',
|
||||
commandName: 'analyze-complexity',
|
||||
modelUsed: 'claude-3-5-sonnet',
|
||||
providerName: 'anthropic',
|
||||
inputTokens: 1000,
|
||||
outputTokens: 500,
|
||||
totalTokens: 1500,
|
||||
totalCost: 0.012414,
|
||||
currency: 'USD'
|
||||
}
|
||||
};
|
||||
|
||||
const sampleTasks = {
|
||||
meta: { projectName: 'Test Project' },
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Task 1',
|
||||
description: 'First task description',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
priority: 'high'
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Task 2',
|
||||
description: 'Second task description',
|
||||
status: 'pending',
|
||||
dependencies: [1],
|
||||
priority: 'medium'
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
title: 'Task 3',
|
||||
description: 'Third task description',
|
||||
status: 'done',
|
||||
dependencies: [1, 2],
|
||||
priority: 'high'
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
|
||||
// Default mock implementations
|
||||
readJSON.mockReturnValue(JSON.parse(JSON.stringify(sampleTasks)));
|
||||
generateTextService.mockResolvedValue(sampleApiResponse);
|
||||
});
|
||||
|
||||
test('should call generateTextService with the correct parameters', async () => {
|
||||
// Arrange
|
||||
const options = {
|
||||
file: 'tasks/tasks.json',
|
||||
output: 'scripts/task-complexity-report.json',
|
||||
threshold: '5',
|
||||
research: false
|
||||
};
|
||||
|
||||
// Act
|
||||
await analyzeTaskComplexity(options, {
|
||||
mcpLog: {
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
error: jest.fn(),
|
||||
debug: jest.fn(),
|
||||
success: jest.fn()
|
||||
}
|
||||
});
|
||||
|
||||
// Assert
|
||||
expect(readJSON).toHaveBeenCalledWith('tasks/tasks.json');
|
||||
expect(generateTextService).toHaveBeenCalledWith(expect.any(Object));
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
'scripts/task-complexity-report.json',
|
||||
expect.objectContaining({
|
||||
meta: expect.objectContaining({
|
||||
thresholdScore: 5,
|
||||
projectName: 'Test Project'
|
||||
}),
|
||||
complexityAnalysis: expect.any(Array)
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
test('should use research flag to determine which AI service to use', async () => {
|
||||
// Arrange
|
||||
const researchOptions = {
|
||||
file: 'tasks/tasks.json',
|
||||
output: 'scripts/task-complexity-report.json',
|
||||
threshold: '5',
|
||||
research: true
|
||||
};
|
||||
|
||||
// Act
|
||||
await analyzeTaskComplexity(researchOptions, {
|
||||
mcpLog: {
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
error: jest.fn(),
|
||||
debug: jest.fn(),
|
||||
success: jest.fn()
|
||||
}
|
||||
});
|
||||
|
||||
// Assert
|
||||
expect(generateTextService).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
role: 'research' // This should be present when research is true
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
test('should handle different threshold parameter types correctly', async () => {
|
||||
// Test with string threshold
|
||||
let options = {
|
||||
file: 'tasks/tasks.json',
|
||||
output: 'scripts/task-complexity-report.json',
|
||||
threshold: '7'
|
||||
};
|
||||
|
||||
await analyzeTaskComplexity(options, {
|
||||
mcpLog: {
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
error: jest.fn(),
|
||||
debug: jest.fn(),
|
||||
success: jest.fn()
|
||||
}
|
||||
});
|
||||
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
'scripts/task-complexity-report.json',
|
||||
expect.objectContaining({
|
||||
meta: expect.objectContaining({
|
||||
thresholdScore: 7
|
||||
})
|
||||
})
|
||||
);
|
||||
|
||||
// Reset mocks
|
||||
jest.clearAllMocks();
|
||||
|
||||
// Test with number threshold
|
||||
options = {
|
||||
file: 'tasks/tasks.json',
|
||||
output: 'scripts/task-complexity-report.json',
|
||||
threshold: 8
|
||||
};
|
||||
|
||||
await analyzeTaskComplexity(options, {
|
||||
mcpLog: {
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
error: jest.fn(),
|
||||
debug: jest.fn(),
|
||||
success: jest.fn()
|
||||
}
|
||||
});
|
||||
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
'scripts/task-complexity-report.json',
|
||||
expect.objectContaining({
|
||||
meta: expect.objectContaining({
|
||||
thresholdScore: 8
|
||||
})
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
test('should filter out completed tasks from analysis', async () => {
|
||||
// Arrange
|
||||
const options = {
|
||||
file: 'tasks/tasks.json',
|
||||
output: 'scripts/task-complexity-report.json',
|
||||
threshold: '5'
|
||||
};
|
||||
|
||||
// Act
|
||||
await analyzeTaskComplexity(options, {
|
||||
mcpLog: {
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
error: jest.fn(),
|
||||
debug: jest.fn(),
|
||||
success: jest.fn()
|
||||
}
|
||||
});
|
||||
|
||||
// Assert
|
||||
// Check if the prompt sent to AI doesn't include the completed task (id: 3)
|
||||
expect(generateTextService).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
prompt: expect.not.stringContaining('"id": 3')
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
test('should handle API errors gracefully', async () => {
|
||||
// Arrange
|
||||
const options = {
|
||||
file: 'tasks/tasks.json',
|
||||
output: 'scripts/task-complexity-report.json',
|
||||
threshold: '5'
|
||||
};
|
||||
|
||||
// Force API error
|
||||
generateTextService.mockRejectedValueOnce(new Error('API Error'));
|
||||
|
||||
const mockMcpLog = {
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
error: jest.fn(),
|
||||
debug: jest.fn(),
|
||||
success: jest.fn()
|
||||
};
|
||||
|
||||
// Act & Assert
|
||||
await expect(
|
||||
analyzeTaskComplexity(options, {
|
||||
mcpLog: mockMcpLog
|
||||
})
|
||||
).rejects.toThrow('API Error');
|
||||
|
||||
// Check that the error was logged via mcpLog
|
||||
expect(mockMcpLog.error).toHaveBeenCalledWith(
|
||||
expect.stringContaining('API Error')
|
||||
);
|
||||
});
|
||||
});
|
||||
269
tests/unit/scripts/modules/task-manager/clear-subtasks.test.js
Normal file
269
tests/unit/scripts/modules/task-manager/clear-subtasks.test.js
Normal file
@@ -0,0 +1,269 @@
|
||||
/**
|
||||
* Tests for the clear-subtasks.js module
|
||||
*/
|
||||
import { jest } from '@jest/globals';
|
||||
|
||||
// Mock the dependencies before importing the module under test
|
||||
jest.unstable_mockModule('../../../../../scripts/modules/utils.js', () => ({
|
||||
readJSON: jest.fn(),
|
||||
writeJSON: jest.fn(),
|
||||
log: jest.fn(),
|
||||
CONFIG: {
|
||||
model: 'mock-claude-model',
|
||||
maxTokens: 4000,
|
||||
temperature: 0.7,
|
||||
debug: false
|
||||
},
|
||||
findTaskById: jest.fn(),
|
||||
isSilentMode: jest.fn(() => false),
|
||||
truncate: jest.fn((text) => text)
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule('../../../../../scripts/modules/ui.js', () => ({
|
||||
displayBanner: jest.fn()
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/task-manager/generate-task-files.js',
|
||||
() => ({
|
||||
default: jest.fn().mockResolvedValue()
|
||||
})
|
||||
);
|
||||
|
||||
// Mock external UI libraries
|
||||
jest.unstable_mockModule('chalk', () => ({
|
||||
default: {
|
||||
white: {
|
||||
bold: jest.fn((text) => text)
|
||||
},
|
||||
cyan: Object.assign(
|
||||
jest.fn((text) => text),
|
||||
{
|
||||
bold: jest.fn((text) => text)
|
||||
}
|
||||
),
|
||||
green: jest.fn((text) => text),
|
||||
yellow: jest.fn((text) => text),
|
||||
bold: jest.fn((text) => text)
|
||||
}
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule('boxen', () => ({
|
||||
default: jest.fn((text) => text)
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule('cli-table3', () => ({
|
||||
default: jest.fn().mockImplementation(() => ({
|
||||
push: jest.fn(),
|
||||
toString: jest.fn(() => 'mocked table')
|
||||
}))
|
||||
}));
|
||||
|
||||
// Import the mocked modules
|
||||
const { readJSON, writeJSON, log } = await import(
|
||||
'../../../../../scripts/modules/utils.js'
|
||||
);
|
||||
|
||||
const generateTaskFiles = await import(
|
||||
'../../../../../scripts/modules/task-manager/generate-task-files.js'
|
||||
);
|
||||
|
||||
// Import the module under test
|
||||
const { default: clearSubtasks } = await import(
|
||||
'../../../../../scripts/modules/task-manager/clear-subtasks.js'
|
||||
);
|
||||
|
||||
describe('clearSubtasks', () => {
|
||||
const sampleTasks = {
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Task 1',
|
||||
description: 'First task',
|
||||
status: 'pending',
|
||||
dependencies: []
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Task 2',
|
||||
description: 'Second task',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
subtasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Subtask 2.1',
|
||||
description: 'First subtask of task 2',
|
||||
status: 'pending',
|
||||
dependencies: []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
title: 'Task 3',
|
||||
description: 'Third task',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
subtasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Subtask 3.1',
|
||||
description: 'First subtask of task 3',
|
||||
status: 'pending',
|
||||
dependencies: []
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Subtask 3.2',
|
||||
description: 'Second subtask of task 3',
|
||||
status: 'done',
|
||||
dependencies: []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
readJSON.mockReturnValue(JSON.parse(JSON.stringify(sampleTasks)));
|
||||
|
||||
// Mock process.exit since this function doesn't have MCP mode support
|
||||
jest.spyOn(process, 'exit').mockImplementation(() => {
|
||||
throw new Error('process.exit called');
|
||||
});
|
||||
|
||||
// Mock console.log to avoid output during tests
|
||||
jest.spyOn(console, 'log').mockImplementation(() => {});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Restore process.exit
|
||||
process.exit.mockRestore();
|
||||
console.log.mockRestore();
|
||||
});
|
||||
|
||||
test('should clear subtasks from a specific task', () => {
|
||||
// Act
|
||||
clearSubtasks('tasks/tasks.json', '3');
|
||||
|
||||
// Assert
|
||||
expect(readJSON).toHaveBeenCalledWith('tasks/tasks.json');
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
'tasks/tasks.json',
|
||||
expect.objectContaining({
|
||||
tasks: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
id: 3,
|
||||
subtasks: []
|
||||
})
|
||||
])
|
||||
})
|
||||
);
|
||||
expect(generateTaskFiles.default).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should clear subtasks from multiple tasks when given comma-separated IDs', () => {
|
||||
// Act
|
||||
clearSubtasks('tasks/tasks.json', '2,3');
|
||||
|
||||
// Assert
|
||||
expect(readJSON).toHaveBeenCalledWith('tasks/tasks.json');
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
'tasks/tasks.json',
|
||||
expect.objectContaining({
|
||||
tasks: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
id: 2,
|
||||
subtasks: []
|
||||
}),
|
||||
expect.objectContaining({
|
||||
id: 3,
|
||||
subtasks: []
|
||||
})
|
||||
])
|
||||
})
|
||||
);
|
||||
expect(generateTaskFiles.default).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should handle tasks with no subtasks', () => {
|
||||
// Act
|
||||
clearSubtasks('tasks/tasks.json', '1');
|
||||
|
||||
// Assert
|
||||
expect(readJSON).toHaveBeenCalledWith('tasks/tasks.json');
|
||||
// Should not write the file if no changes were made
|
||||
expect(writeJSON).not.toHaveBeenCalled();
|
||||
expect(generateTaskFiles.default).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should handle non-existent task IDs gracefully', () => {
|
||||
// Act
|
||||
clearSubtasks('tasks/tasks.json', '99');
|
||||
|
||||
// Assert
|
||||
expect(readJSON).toHaveBeenCalledWith('tasks/tasks.json');
|
||||
expect(log).toHaveBeenCalledWith('error', 'Task 99 not found');
|
||||
// Should not write the file if no changes were made
|
||||
expect(writeJSON).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should handle multiple task IDs including both valid and non-existent IDs', () => {
|
||||
// Act
|
||||
clearSubtasks('tasks/tasks.json', '3,99');
|
||||
|
||||
// Assert
|
||||
expect(readJSON).toHaveBeenCalledWith('tasks/tasks.json');
|
||||
expect(log).toHaveBeenCalledWith('error', 'Task 99 not found');
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
'tasks/tasks.json',
|
||||
expect.objectContaining({
|
||||
tasks: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
id: 3,
|
||||
subtasks: []
|
||||
})
|
||||
])
|
||||
})
|
||||
);
|
||||
expect(generateTaskFiles.default).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should handle file read errors', () => {
|
||||
// Arrange
|
||||
readJSON.mockImplementation(() => {
|
||||
throw new Error('File read failed');
|
||||
});
|
||||
|
||||
// Act & Assert
|
||||
expect(() => {
|
||||
clearSubtasks('tasks/tasks.json', '3');
|
||||
}).toThrow('File read failed');
|
||||
});
|
||||
|
||||
test('should handle invalid tasks data', () => {
|
||||
// Arrange
|
||||
readJSON.mockReturnValue(null);
|
||||
|
||||
// Act & Assert
|
||||
expect(() => {
|
||||
clearSubtasks('tasks/tasks.json', '3');
|
||||
}).toThrow('process.exit called');
|
||||
|
||||
expect(log).toHaveBeenCalledWith('error', 'No valid tasks found.');
|
||||
});
|
||||
|
||||
test('should handle file write errors', () => {
|
||||
// Arrange
|
||||
writeJSON.mockImplementation(() => {
|
||||
throw new Error('File write failed');
|
||||
});
|
||||
|
||||
// Act & Assert
|
||||
expect(() => {
|
||||
clearSubtasks('tasks/tasks.json', '3');
|
||||
}).toThrow('File write failed');
|
||||
});
|
||||
});
|
||||
175
tests/unit/scripts/modules/task-manager/find-next-task.test.js
Normal file
175
tests/unit/scripts/modules/task-manager/find-next-task.test.js
Normal file
@@ -0,0 +1,175 @@
|
||||
/**
|
||||
* Tests for the find-next-task.js module
|
||||
*/
|
||||
import { jest } from '@jest/globals';
|
||||
import findNextTask from '../../../../../scripts/modules/task-manager/find-next-task.js';
|
||||
|
||||
describe('findNextTask', () => {
|
||||
test('should return the highest priority task with all dependencies satisfied', () => {
|
||||
const tasks = [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Setup Project',
|
||||
status: 'done',
|
||||
dependencies: [],
|
||||
priority: 'high'
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Implement Core Features',
|
||||
status: 'pending',
|
||||
dependencies: [1],
|
||||
priority: 'high'
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
title: 'Create Documentation',
|
||||
status: 'pending',
|
||||
dependencies: [1],
|
||||
priority: 'medium'
|
||||
},
|
||||
{
|
||||
id: 4,
|
||||
title: 'Deploy Application',
|
||||
status: 'pending',
|
||||
dependencies: [2, 3],
|
||||
priority: 'high'
|
||||
}
|
||||
];
|
||||
|
||||
const nextTask = findNextTask(tasks);
|
||||
|
||||
expect(nextTask).toBeDefined();
|
||||
expect(nextTask.id).toBe(2);
|
||||
expect(nextTask.title).toBe('Implement Core Features');
|
||||
});
|
||||
|
||||
test('should prioritize by priority level when dependencies are equal', () => {
|
||||
const tasks = [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Setup Project',
|
||||
status: 'done',
|
||||
dependencies: [],
|
||||
priority: 'high'
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Low Priority Task',
|
||||
status: 'pending',
|
||||
dependencies: [1],
|
||||
priority: 'low'
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
title: 'Medium Priority Task',
|
||||
status: 'pending',
|
||||
dependencies: [1],
|
||||
priority: 'medium'
|
||||
},
|
||||
{
|
||||
id: 4,
|
||||
title: 'High Priority Task',
|
||||
status: 'pending',
|
||||
dependencies: [1],
|
||||
priority: 'high'
|
||||
}
|
||||
];
|
||||
|
||||
const nextTask = findNextTask(tasks);
|
||||
|
||||
expect(nextTask.id).toBe(4);
|
||||
expect(nextTask.priority).toBe('high');
|
||||
});
|
||||
|
||||
test('should return null when all tasks are completed', () => {
|
||||
const tasks = [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Setup Project',
|
||||
status: 'done',
|
||||
dependencies: [],
|
||||
priority: 'high'
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Implement Features',
|
||||
status: 'done',
|
||||
dependencies: [1],
|
||||
priority: 'high'
|
||||
}
|
||||
];
|
||||
|
||||
const nextTask = findNextTask(tasks);
|
||||
|
||||
expect(nextTask).toBeNull();
|
||||
});
|
||||
|
||||
test('should return null when all pending tasks have unsatisfied dependencies', () => {
|
||||
const tasks = [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Setup Project',
|
||||
status: 'pending',
|
||||
dependencies: [2],
|
||||
priority: 'high'
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Implement Features',
|
||||
status: 'pending',
|
||||
dependencies: [1],
|
||||
priority: 'high'
|
||||
}
|
||||
];
|
||||
|
||||
const nextTask = findNextTask(tasks);
|
||||
|
||||
expect(nextTask).toBeNull();
|
||||
});
|
||||
|
||||
test('should handle empty tasks array', () => {
|
||||
const nextTask = findNextTask([]);
|
||||
|
||||
expect(nextTask).toBeNull();
|
||||
});
|
||||
|
||||
test('should consider subtask dependencies when finding next task', () => {
|
||||
const tasks = [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Parent Task',
|
||||
status: 'in-progress',
|
||||
dependencies: [],
|
||||
priority: 'high',
|
||||
subtasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Subtask 1',
|
||||
status: 'done',
|
||||
dependencies: []
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Subtask 2',
|
||||
status: 'pending',
|
||||
dependencies: []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Dependent Task',
|
||||
status: 'pending',
|
||||
dependencies: [1],
|
||||
priority: 'high'
|
||||
}
|
||||
];
|
||||
|
||||
const nextTask = findNextTask(tasks);
|
||||
|
||||
// Task 2 should not be returned because Task 1 is not completely done
|
||||
// (it has a pending subtask)
|
||||
expect(nextTask).not.toEqual(expect.objectContaining({ id: 2 }));
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,338 @@
|
||||
/**
|
||||
* Tests for the generate-task-files.js module
|
||||
*/
|
||||
import { jest } from '@jest/globals';
|
||||
|
||||
// Mock the dependencies before importing the module under test
|
||||
jest.unstable_mockModule('fs', () => ({
|
||||
default: {
|
||||
existsSync: jest.fn(),
|
||||
mkdirSync: jest.fn(),
|
||||
readdirSync: jest.fn(),
|
||||
unlinkSync: jest.fn(),
|
||||
writeFileSync: jest.fn()
|
||||
},
|
||||
existsSync: jest.fn(),
|
||||
mkdirSync: jest.fn(),
|
||||
readdirSync: jest.fn(),
|
||||
unlinkSync: jest.fn(),
|
||||
writeFileSync: jest.fn()
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule('path', () => ({
|
||||
default: {
|
||||
join: jest.fn((...args) => args.join('/')),
|
||||
dirname: jest.fn((p) => p.split('/').slice(0, -1).join('/'))
|
||||
},
|
||||
join: jest.fn((...args) => args.join('/')),
|
||||
dirname: jest.fn((p) => p.split('/').slice(0, -1).join('/'))
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule('../../../../../scripts/modules/utils.js', () => ({
|
||||
readJSON: jest.fn(),
|
||||
writeJSON: jest.fn(),
|
||||
log: jest.fn(),
|
||||
CONFIG: {
|
||||
model: 'mock-claude-model',
|
||||
maxTokens: 4000,
|
||||
temperature: 0.7,
|
||||
debug: false
|
||||
},
|
||||
sanitizePrompt: jest.fn((prompt) => prompt),
|
||||
truncate: jest.fn((text) => text),
|
||||
isSilentMode: jest.fn(() => false),
|
||||
findTaskById: jest.fn((tasks, id) =>
|
||||
tasks.find((t) => t.id === parseInt(id))
|
||||
),
|
||||
findProjectRoot: jest.fn(() => '/mock/project/root'),
|
||||
resolveEnvVariable: jest.fn((varName) => `mock_${varName}`)
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule('../../../../../scripts/modules/ui.js', () => ({
|
||||
formatDependenciesWithStatus: jest.fn(),
|
||||
displayBanner: jest.fn(),
|
||||
displayTaskList: jest.fn(),
|
||||
startLoadingIndicator: jest.fn(() => ({ stop: jest.fn() })),
|
||||
stopLoadingIndicator: jest.fn(),
|
||||
createProgressBar: jest.fn(() => ' MOCK_PROGRESS_BAR '),
|
||||
getStatusWithColor: jest.fn((status) => status),
|
||||
getComplexityWithColor: jest.fn((score) => `Score: ${score}`)
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/dependency-manager.js',
|
||||
() => ({
|
||||
validateAndFixDependencies: jest.fn(),
|
||||
validateTaskDependencies: jest.fn()
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/config-manager.js',
|
||||
() => ({
|
||||
getDebugFlag: jest.fn(() => false),
|
||||
getProjectName: jest.fn(() => 'Test Project')
|
||||
})
|
||||
);
|
||||
|
||||
// Import the mocked modules
|
||||
const { readJSON, writeJSON, log, findProjectRoot } = await import(
|
||||
'../../../../../scripts/modules/utils.js'
|
||||
);
|
||||
const { formatDependenciesWithStatus } = await import(
|
||||
'../../../../../scripts/modules/ui.js'
|
||||
);
|
||||
const { validateAndFixDependencies } = await import(
|
||||
'../../../../../scripts/modules/dependency-manager.js'
|
||||
);
|
||||
|
||||
const fs = (await import('fs')).default;
|
||||
const path = (await import('path')).default;
|
||||
|
||||
// Import the module under test
|
||||
const { default: generateTaskFiles } = await import(
|
||||
'../../../../../scripts/modules/task-manager/generate-task-files.js'
|
||||
);
|
||||
|
||||
describe('generateTaskFiles', () => {
|
||||
// Sample task data for testing
|
||||
const sampleTasks = {
|
||||
meta: { projectName: 'Test Project' },
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Task 1',
|
||||
description: 'First task description',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
priority: 'high',
|
||||
details: 'Detailed information for task 1',
|
||||
testStrategy: 'Test strategy for task 1'
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Task 2',
|
||||
description: 'Second task description',
|
||||
status: 'pending',
|
||||
dependencies: [1],
|
||||
priority: 'medium',
|
||||
details: 'Detailed information for task 2',
|
||||
testStrategy: 'Test strategy for task 2'
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
title: 'Task with Subtasks',
|
||||
description: 'Task with subtasks description',
|
||||
status: 'pending',
|
||||
dependencies: [1, 2],
|
||||
priority: 'high',
|
||||
details: 'Detailed information for task 3',
|
||||
testStrategy: 'Test strategy for task 3',
|
||||
subtasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Subtask 1',
|
||||
description: 'First subtask',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
details: 'Details for subtask 1'
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Subtask 2',
|
||||
description: 'Second subtask',
|
||||
status: 'pending',
|
||||
dependencies: [1],
|
||||
details: 'Details for subtask 2'
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
test('should generate task files from tasks.json - working test', async () => {
|
||||
// Set up mocks for this specific test
|
||||
readJSON.mockImplementationOnce(() => sampleTasks);
|
||||
fs.existsSync.mockImplementationOnce(() => true);
|
||||
|
||||
// Call the function
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const outputDir = 'tasks';
|
||||
|
||||
await generateTaskFiles(tasksPath, outputDir, {
|
||||
mcpLog: { info: jest.fn() }
|
||||
});
|
||||
|
||||
// Verify the data was read
|
||||
expect(readJSON).toHaveBeenCalledWith(tasksPath);
|
||||
|
||||
// Verify dependencies were validated
|
||||
expect(validateAndFixDependencies).toHaveBeenCalledWith(
|
||||
sampleTasks,
|
||||
tasksPath
|
||||
);
|
||||
|
||||
// Verify files were written for each task
|
||||
expect(fs.writeFileSync).toHaveBeenCalledTimes(3);
|
||||
|
||||
// Verify specific file paths
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
'tasks/task_001.txt',
|
||||
expect.any(String)
|
||||
);
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
'tasks/task_002.txt',
|
||||
expect.any(String)
|
||||
);
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
'tasks/task_003.txt',
|
||||
expect.any(String)
|
||||
);
|
||||
});
|
||||
|
||||
test('should format dependencies with status indicators', async () => {
|
||||
// Set up mocks
|
||||
readJSON.mockImplementationOnce(() => sampleTasks);
|
||||
fs.existsSync.mockImplementationOnce(() => true);
|
||||
formatDependenciesWithStatus.mockReturnValue(
|
||||
'✅ Task 1 (done), ⏱️ Task 2 (pending)'
|
||||
);
|
||||
|
||||
// Call the function
|
||||
await generateTaskFiles('tasks/tasks.json', 'tasks', {
|
||||
mcpLog: { info: jest.fn() }
|
||||
});
|
||||
|
||||
// Verify formatDependenciesWithStatus was called for tasks with dependencies
|
||||
expect(formatDependenciesWithStatus).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should handle tasks with no subtasks', async () => {
|
||||
// Create data with tasks that have no subtasks
|
||||
const tasksWithoutSubtasks = {
|
||||
meta: { projectName: 'Test Project' },
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Simple Task',
|
||||
description: 'A simple task without subtasks',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
priority: 'medium',
|
||||
details: 'Simple task details',
|
||||
testStrategy: 'Simple test strategy'
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
readJSON.mockImplementationOnce(() => tasksWithoutSubtasks);
|
||||
fs.existsSync.mockImplementationOnce(() => true);
|
||||
|
||||
// Call the function
|
||||
await generateTaskFiles('tasks/tasks.json', 'tasks', {
|
||||
mcpLog: { info: jest.fn() }
|
||||
});
|
||||
|
||||
// Verify the file was written
|
||||
expect(fs.writeFileSync).toHaveBeenCalledTimes(1);
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
'tasks/task_001.txt',
|
||||
expect.any(String)
|
||||
);
|
||||
});
|
||||
|
||||
test("should create the output directory if it doesn't exist", async () => {
|
||||
// Set up mocks
|
||||
readJSON.mockImplementationOnce(() => sampleTasks);
|
||||
fs.existsSync.mockImplementation((path) => {
|
||||
if (path === 'tasks') return false; // Directory doesn't exist
|
||||
return true; // Other paths exist
|
||||
});
|
||||
|
||||
// Call the function
|
||||
await generateTaskFiles('tasks/tasks.json', 'tasks', {
|
||||
mcpLog: { info: jest.fn() }
|
||||
});
|
||||
|
||||
// Verify mkdir was called
|
||||
expect(fs.mkdirSync).toHaveBeenCalledWith('tasks', { recursive: true });
|
||||
});
|
||||
|
||||
test('should format task files with proper sections', async () => {
|
||||
// Set up mocks
|
||||
readJSON.mockImplementationOnce(() => sampleTasks);
|
||||
fs.existsSync.mockImplementationOnce(() => true);
|
||||
|
||||
// Call the function
|
||||
await generateTaskFiles('tasks/tasks.json', 'tasks', {
|
||||
mcpLog: { info: jest.fn() }
|
||||
});
|
||||
|
||||
// Get the content written to the first task file
|
||||
const firstTaskContent = fs.writeFileSync.mock.calls[0][1];
|
||||
|
||||
// Verify the content includes expected sections
|
||||
expect(firstTaskContent).toContain('# Task ID: 1');
|
||||
expect(firstTaskContent).toContain('# Title: Task 1');
|
||||
expect(firstTaskContent).toContain('# Description');
|
||||
expect(firstTaskContent).toContain('# Status');
|
||||
expect(firstTaskContent).toContain('# Priority');
|
||||
expect(firstTaskContent).toContain('# Dependencies');
|
||||
expect(firstTaskContent).toContain('# Details:');
|
||||
expect(firstTaskContent).toContain('# Test Strategy:');
|
||||
});
|
||||
|
||||
test('should include subtasks in task files when present', async () => {
|
||||
// Set up mocks
|
||||
readJSON.mockImplementationOnce(() => sampleTasks);
|
||||
fs.existsSync.mockImplementationOnce(() => true);
|
||||
|
||||
// Call the function
|
||||
await generateTaskFiles('tasks/tasks.json', 'tasks', {
|
||||
mcpLog: { info: jest.fn() }
|
||||
});
|
||||
|
||||
// Get the content written to the task file with subtasks (task 3)
|
||||
const taskWithSubtasksContent = fs.writeFileSync.mock.calls[2][1];
|
||||
|
||||
// Verify the content includes subtasks section
|
||||
expect(taskWithSubtasksContent).toContain('# Subtasks:');
|
||||
expect(taskWithSubtasksContent).toContain('## 1. Subtask 1');
|
||||
expect(taskWithSubtasksContent).toContain('## 2. Subtask 2');
|
||||
});
|
||||
|
||||
test('should handle errors during file generation', () => {
|
||||
// Mock an error in readJSON
|
||||
readJSON.mockImplementationOnce(() => {
|
||||
throw new Error('File read failed');
|
||||
});
|
||||
|
||||
// Call the function and expect it to handle the error
|
||||
expect(() => {
|
||||
generateTaskFiles('tasks/tasks.json', 'tasks', {
|
||||
mcpLog: { info: jest.fn() }
|
||||
});
|
||||
}).toThrow('File read failed');
|
||||
});
|
||||
|
||||
test('should validate dependencies before generating files', async () => {
|
||||
// Set up mocks
|
||||
readJSON.mockImplementationOnce(() => sampleTasks);
|
||||
fs.existsSync.mockImplementationOnce(() => true);
|
||||
|
||||
// Call the function
|
||||
await generateTaskFiles('tasks/tasks.json', 'tasks', {
|
||||
mcpLog: { info: jest.fn() }
|
||||
});
|
||||
|
||||
// Verify validateAndFixDependencies was called
|
||||
expect(validateAndFixDependencies).toHaveBeenCalledWith(
|
||||
sampleTasks,
|
||||
'tasks/tasks.json'
|
||||
);
|
||||
});
|
||||
});
|
||||
332
tests/unit/scripts/modules/task-manager/list-tasks.test.js
Normal file
332
tests/unit/scripts/modules/task-manager/list-tasks.test.js
Normal file
@@ -0,0 +1,332 @@
|
||||
/**
|
||||
* Tests for the list-tasks.js module
|
||||
*/
|
||||
import { jest } from '@jest/globals';
|
||||
|
||||
// Mock the dependencies before importing the module under test
|
||||
jest.unstable_mockModule('../../../../../scripts/modules/utils.js', () => ({
|
||||
readJSON: jest.fn(),
|
||||
writeJSON: jest.fn(),
|
||||
log: jest.fn(),
|
||||
CONFIG: {
|
||||
model: 'mock-claude-model',
|
||||
maxTokens: 4000,
|
||||
temperature: 0.7,
|
||||
debug: false
|
||||
},
|
||||
sanitizePrompt: jest.fn((prompt) => prompt),
|
||||
truncate: jest.fn((text) => text),
|
||||
isSilentMode: jest.fn(() => false),
|
||||
findTaskById: jest.fn((tasks, id) =>
|
||||
tasks.find((t) => t.id === parseInt(id))
|
||||
),
|
||||
addComplexityToTask: jest.fn(),
|
||||
readComplexityReport: jest.fn(() => null)
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule('../../../../../scripts/modules/ui.js', () => ({
|
||||
formatDependenciesWithStatus: jest.fn(),
|
||||
displayBanner: jest.fn(),
|
||||
displayTaskList: jest.fn(),
|
||||
startLoadingIndicator: jest.fn(() => ({ stop: jest.fn() })),
|
||||
stopLoadingIndicator: jest.fn(),
|
||||
createProgressBar: jest.fn(() => ' MOCK_PROGRESS_BAR '),
|
||||
getStatusWithColor: jest.fn((status) => status),
|
||||
getComplexityWithColor: jest.fn((score) => `Score: ${score}`)
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/dependency-manager.js',
|
||||
() => ({
|
||||
validateAndFixDependencies: jest.fn(),
|
||||
validateTaskDependencies: jest.fn()
|
||||
})
|
||||
);
|
||||
|
||||
// Import the mocked modules
|
||||
const { readJSON, log, readComplexityReport, addComplexityToTask } =
|
||||
await import('../../../../../scripts/modules/utils.js');
|
||||
const { displayTaskList } = await import(
|
||||
'../../../../../scripts/modules/ui.js'
|
||||
);
|
||||
const { validateAndFixDependencies } = await import(
|
||||
'../../../../../scripts/modules/dependency-manager.js'
|
||||
);
|
||||
|
||||
// Import the module under test
|
||||
const { default: listTasks } = await import(
|
||||
'../../../../../scripts/modules/task-manager/list-tasks.js'
|
||||
);
|
||||
|
||||
// Sample data for tests
|
||||
const sampleTasks = {
|
||||
meta: { projectName: 'Test Project' },
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Setup Project',
|
||||
description: 'Initialize project structure',
|
||||
status: 'done',
|
||||
dependencies: [],
|
||||
priority: 'high'
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Implement Core Features',
|
||||
description: 'Build main functionality',
|
||||
status: 'pending',
|
||||
dependencies: [1],
|
||||
priority: 'high'
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
title: 'Create UI Components',
|
||||
description: 'Build user interface',
|
||||
status: 'in-progress',
|
||||
dependencies: [1, 2],
|
||||
priority: 'medium',
|
||||
subtasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Create Header Component',
|
||||
description: 'Build header component',
|
||||
status: 'done',
|
||||
dependencies: []
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Create Footer Component',
|
||||
description: 'Build footer component',
|
||||
status: 'pending',
|
||||
dependencies: [1]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
id: 4,
|
||||
title: 'Testing',
|
||||
description: 'Write and run tests',
|
||||
status: 'cancelled',
|
||||
dependencies: [2, 3],
|
||||
priority: 'low'
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
describe('listTasks', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
|
||||
// Mock console methods to suppress output
|
||||
jest.spyOn(console, 'log').mockImplementation(() => {});
|
||||
jest.spyOn(console, 'error').mockImplementation(() => {});
|
||||
|
||||
// Mock process.exit to prevent actual exit
|
||||
jest.spyOn(process, 'exit').mockImplementation((code) => {
|
||||
throw new Error(`process.exit: ${code}`);
|
||||
});
|
||||
|
||||
// Set up default mock return values
|
||||
readJSON.mockReturnValue(JSON.parse(JSON.stringify(sampleTasks)));
|
||||
readComplexityReport.mockReturnValue(null);
|
||||
validateAndFixDependencies.mockImplementation(() => {});
|
||||
displayTaskList.mockImplementation(() => {});
|
||||
addComplexityToTask.mockImplementation(() => {});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Restore console methods
|
||||
jest.restoreAllMocks();
|
||||
});
|
||||
|
||||
test('should list all tasks when no status filter is provided', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
|
||||
// Act
|
||||
const result = listTasks(tasksPath, null, null, false, 'json');
|
||||
|
||||
// Assert
|
||||
expect(readJSON).toHaveBeenCalledWith(tasksPath);
|
||||
expect(result).toEqual(
|
||||
expect.objectContaining({
|
||||
tasks: expect.arrayContaining([
|
||||
expect.objectContaining({ id: 1 }),
|
||||
expect.objectContaining({ id: 2 }),
|
||||
expect.objectContaining({ id: 3 }),
|
||||
expect.objectContaining({ id: 4 })
|
||||
])
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
test('should filter tasks by status when status filter is provided', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const statusFilter = 'pending';
|
||||
|
||||
// Act
|
||||
const result = listTasks(tasksPath, statusFilter, null, false, 'json');
|
||||
|
||||
// Assert
|
||||
expect(readJSON).toHaveBeenCalledWith(tasksPath);
|
||||
|
||||
// Verify only pending tasks are returned
|
||||
expect(result.tasks).toHaveLength(1);
|
||||
expect(result.tasks[0].status).toBe('pending');
|
||||
});
|
||||
|
||||
test('should filter tasks by done status', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const statusFilter = 'done';
|
||||
|
||||
// Act
|
||||
const result = listTasks(tasksPath, statusFilter, null, false, 'json');
|
||||
|
||||
// Assert
|
||||
// Verify only done tasks are returned
|
||||
expect(result.tasks).toHaveLength(1);
|
||||
expect(result.tasks[0].status).toBe('done');
|
||||
});
|
||||
|
||||
test('should include subtasks when withSubtasks option is true', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
|
||||
// Act
|
||||
const result = listTasks(tasksPath, null, null, true, 'json');
|
||||
|
||||
// Assert
|
||||
// Verify that the task with subtasks is included
|
||||
const taskWithSubtasks = result.tasks.find((task) => task.id === 3);
|
||||
expect(taskWithSubtasks).toBeDefined();
|
||||
expect(taskWithSubtasks.subtasks).toBeDefined();
|
||||
expect(taskWithSubtasks.subtasks).toHaveLength(2);
|
||||
});
|
||||
|
||||
test('should not include subtasks when withSubtasks option is false', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
|
||||
// Act
|
||||
const result = listTasks(tasksPath, null, null, false, 'json');
|
||||
|
||||
// Assert
|
||||
// For JSON output, subtasks should still be included in the data structure
|
||||
// The withSubtasks flag affects display, not the data structure
|
||||
expect(result).toEqual(
|
||||
expect.objectContaining({
|
||||
tasks: expect.any(Array)
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
test('should return empty array when no tasks match the status filter', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const statusFilter = 'blocked'; // Status that doesn't exist in sample data
|
||||
|
||||
// Act
|
||||
const result = listTasks(tasksPath, statusFilter, null, false, 'json');
|
||||
|
||||
// Assert
|
||||
// Verify empty array is returned
|
||||
expect(result.tasks).toHaveLength(0);
|
||||
});
|
||||
|
||||
test('should handle file read errors', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
readJSON.mockImplementation(() => {
|
||||
throw new Error('File not found');
|
||||
});
|
||||
|
||||
// Act & Assert
|
||||
expect(() => {
|
||||
listTasks(tasksPath, null, null, false, 'json');
|
||||
}).toThrow('File not found');
|
||||
});
|
||||
|
||||
test('should validate and fix dependencies before listing', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
|
||||
// Act
|
||||
listTasks(tasksPath, null, null, false, 'json');
|
||||
|
||||
// Assert
|
||||
expect(readJSON).toHaveBeenCalledWith(tasksPath);
|
||||
// Note: validateAndFixDependencies is not called by listTasks function
|
||||
// This test just verifies the function runs without error
|
||||
});
|
||||
|
||||
test('should pass correct options to displayTaskList', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
|
||||
// Act
|
||||
const result = listTasks(tasksPath, 'pending', null, true, 'json');
|
||||
|
||||
// Assert
|
||||
// For JSON output, we don't call displayTaskList, so just verify the result structure
|
||||
expect(result).toEqual(
|
||||
expect.objectContaining({
|
||||
tasks: expect.any(Array),
|
||||
filter: 'pending',
|
||||
stats: expect.any(Object)
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
test('should filter tasks by in-progress status', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const statusFilter = 'in-progress';
|
||||
|
||||
// Act
|
||||
const result = listTasks(tasksPath, statusFilter, null, false, 'json');
|
||||
|
||||
// Assert
|
||||
expect(result.tasks).toHaveLength(1);
|
||||
expect(result.tasks[0].status).toBe('in-progress');
|
||||
expect(result.tasks[0].id).toBe(3);
|
||||
});
|
||||
|
||||
test('should filter tasks by cancelled status', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const statusFilter = 'cancelled';
|
||||
|
||||
// Act
|
||||
const result = listTasks(tasksPath, statusFilter, null, false, 'json');
|
||||
|
||||
// Assert
|
||||
expect(result.tasks).toHaveLength(1);
|
||||
expect(result.tasks[0].status).toBe('cancelled');
|
||||
expect(result.tasks[0].id).toBe(4);
|
||||
});
|
||||
|
||||
test('should return the original tasks data structure', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
|
||||
// Act
|
||||
const result = listTasks(tasksPath, null, null, false, 'json');
|
||||
|
||||
// Assert
|
||||
expect(result).toEqual(
|
||||
expect.objectContaining({
|
||||
tasks: expect.any(Array),
|
||||
filter: 'all',
|
||||
stats: expect.objectContaining({
|
||||
total: 4,
|
||||
completed: expect.any(Number),
|
||||
inProgress: expect.any(Number),
|
||||
pending: expect.any(Number)
|
||||
})
|
||||
})
|
||||
);
|
||||
expect(result.tasks).toHaveLength(4);
|
||||
});
|
||||
});
|
||||
462
tests/unit/scripts/modules/task-manager/parse-prd.test.js
Normal file
462
tests/unit/scripts/modules/task-manager/parse-prd.test.js
Normal file
@@ -0,0 +1,462 @@
|
||||
/**
|
||||
* Tests for the parse-prd.js module
|
||||
*/
|
||||
import { jest } from '@jest/globals';
|
||||
|
||||
// Mock the dependencies before importing the module under test
|
||||
jest.unstable_mockModule('../../../../../scripts/modules/utils.js', () => ({
|
||||
readJSON: jest.fn(),
|
||||
writeJSON: jest.fn(),
|
||||
log: jest.fn(),
|
||||
CONFIG: {
|
||||
model: 'mock-claude-model',
|
||||
maxTokens: 4000,
|
||||
temperature: 0.7,
|
||||
debug: false
|
||||
},
|
||||
sanitizePrompt: jest.fn((prompt) => prompt),
|
||||
truncate: jest.fn((text) => text),
|
||||
isSilentMode: jest.fn(() => false),
|
||||
enableSilentMode: jest.fn(),
|
||||
disableSilentMode: jest.fn(),
|
||||
findTaskById: jest.fn(),
|
||||
promptYesNo: jest.fn()
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/ai-services-unified.js',
|
||||
() => ({
|
||||
generateObjectService: jest.fn().mockResolvedValue({
|
||||
mainResult: {
|
||||
tasks: []
|
||||
},
|
||||
telemetryData: {}
|
||||
})
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule('../../../../../scripts/modules/ui.js', () => ({
|
||||
getStatusWithColor: jest.fn((status) => status),
|
||||
startLoadingIndicator: jest.fn(),
|
||||
stopLoadingIndicator: jest.fn(),
|
||||
displayAiUsageSummary: jest.fn()
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/config-manager.js',
|
||||
() => ({
|
||||
getDebugFlag: jest.fn(() => false)
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/task-manager/generate-task-files.js',
|
||||
() => ({
|
||||
default: jest.fn().mockResolvedValue()
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/task-manager/models.js',
|
||||
() => ({
|
||||
getModelConfiguration: jest.fn(() => ({
|
||||
model: 'mock-model',
|
||||
maxTokens: 4000,
|
||||
temperature: 0.7
|
||||
}))
|
||||
})
|
||||
);
|
||||
|
||||
// Mock fs module
|
||||
jest.unstable_mockModule('fs', () => ({
|
||||
default: {
|
||||
readFileSync: jest.fn(),
|
||||
existsSync: jest.fn(),
|
||||
mkdirSync: jest.fn(),
|
||||
writeFileSync: jest.fn()
|
||||
},
|
||||
readFileSync: jest.fn(),
|
||||
existsSync: jest.fn(),
|
||||
mkdirSync: jest.fn(),
|
||||
writeFileSync: jest.fn()
|
||||
}));
|
||||
|
||||
// Mock path module
|
||||
jest.unstable_mockModule('path', () => ({
|
||||
default: {
|
||||
dirname: jest.fn(),
|
||||
join: jest.fn((dir, file) => `${dir}/${file}`)
|
||||
},
|
||||
dirname: jest.fn(),
|
||||
join: jest.fn((dir, file) => `${dir}/${file}`)
|
||||
}));
|
||||
|
||||
// Import the mocked modules
|
||||
const { readJSON, writeJSON, log, promptYesNo } = await import(
|
||||
'../../../../../scripts/modules/utils.js'
|
||||
);
|
||||
|
||||
const { generateObjectService } = await import(
|
||||
'../../../../../scripts/modules/ai-services-unified.js'
|
||||
);
|
||||
const generateTaskFiles = (
|
||||
await import(
|
||||
'../../../../../scripts/modules/task-manager/generate-task-files.js'
|
||||
)
|
||||
).default;
|
||||
|
||||
const fs = await import('fs');
|
||||
const path = await import('path');
|
||||
|
||||
// Import the module under test
|
||||
const { default: parsePRD } = await import(
|
||||
'../../../../../scripts/modules/task-manager/parse-prd.js'
|
||||
);
|
||||
|
||||
// Sample data for tests (from main test file)
|
||||
const sampleClaudeResponse = {
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Setup Project Structure',
|
||||
description: 'Initialize the project with necessary files and folders',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
priority: 'high',
|
||||
subtasks: []
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Implement Core Features',
|
||||
description: 'Build the main functionality',
|
||||
status: 'pending',
|
||||
dependencies: [1],
|
||||
priority: 'high',
|
||||
subtasks: []
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
describe('parsePRD', () => {
|
||||
// Mock the sample PRD content
|
||||
const samplePRDContent = '# Sample PRD for Testing';
|
||||
|
||||
// Mock existing tasks for append test
|
||||
const existingTasks = {
|
||||
tasks: [
|
||||
{ id: 1, title: 'Existing Task 1', status: 'done' },
|
||||
{ id: 2, title: 'Existing Task 2', status: 'pending' }
|
||||
]
|
||||
};
|
||||
|
||||
// Mock new tasks with continuing IDs for append test
|
||||
const newTasksWithContinuedIds = {
|
||||
tasks: [
|
||||
{ id: 3, title: 'New Task 3' },
|
||||
{ id: 4, title: 'New Task 4' }
|
||||
]
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
// Reset all mocks
|
||||
jest.clearAllMocks();
|
||||
|
||||
// Set up mocks for fs, path and other modules
|
||||
fs.default.readFileSync.mockReturnValue(samplePRDContent);
|
||||
fs.default.existsSync.mockReturnValue(true);
|
||||
path.default.dirname.mockReturnValue('tasks');
|
||||
generateObjectService.mockResolvedValue({
|
||||
mainResult: sampleClaudeResponse,
|
||||
telemetryData: {}
|
||||
});
|
||||
generateTaskFiles.mockResolvedValue(undefined);
|
||||
promptYesNo.mockResolvedValue(true); // Default to "yes" for confirmation
|
||||
|
||||
// Mock console.error to prevent output
|
||||
jest.spyOn(console, 'error').mockImplementation(() => {});
|
||||
jest.spyOn(console, 'log').mockImplementation(() => {});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Restore all mocks after each test
|
||||
jest.restoreAllMocks();
|
||||
});
|
||||
|
||||
test('should parse a PRD file and generate tasks', async () => {
|
||||
// Setup mocks to simulate normal conditions (no existing output file)
|
||||
fs.default.existsSync.mockImplementation((path) => {
|
||||
if (path === 'tasks/tasks.json') return false; // Output file doesn't exist
|
||||
if (path === 'tasks') return true; // Directory exists
|
||||
return false;
|
||||
});
|
||||
|
||||
// Call the function
|
||||
const result = await parsePRD('path/to/prd.txt', 'tasks/tasks.json', 3);
|
||||
|
||||
// Verify fs.readFileSync was called with the correct arguments
|
||||
expect(fs.default.readFileSync).toHaveBeenCalledWith(
|
||||
'path/to/prd.txt',
|
||||
'utf8'
|
||||
);
|
||||
|
||||
// Verify generateObjectService was called
|
||||
expect(generateObjectService).toHaveBeenCalled();
|
||||
|
||||
// Verify directory check
|
||||
expect(fs.default.existsSync).toHaveBeenCalledWith('tasks');
|
||||
|
||||
// Verify writeJSON was called with the correct arguments
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
'tasks/tasks.json',
|
||||
sampleClaudeResponse
|
||||
);
|
||||
|
||||
// Verify generateTaskFiles was called
|
||||
expect(generateTaskFiles).toHaveBeenCalledWith(
|
||||
'tasks/tasks.json',
|
||||
'tasks',
|
||||
{ mcpLog: undefined }
|
||||
);
|
||||
|
||||
// Verify result
|
||||
expect(result).toEqual({
|
||||
success: true,
|
||||
tasksPath: 'tasks/tasks.json',
|
||||
telemetryData: {}
|
||||
});
|
||||
|
||||
// Verify that the written data contains 2 tasks from sampleClaudeResponse
|
||||
const writtenData = writeJSON.mock.calls[0][1];
|
||||
expect(writtenData.tasks.length).toBe(2);
|
||||
});
|
||||
|
||||
test('should create the tasks directory if it does not exist', async () => {
|
||||
// Mock existsSync to return false specifically for the directory check
|
||||
// but true for the output file check (so we don't trigger confirmation path)
|
||||
fs.default.existsSync.mockImplementation((path) => {
|
||||
if (path === 'tasks/tasks.json') return false; // Output file doesn't exist
|
||||
if (path === 'tasks') return false; // Directory doesn't exist
|
||||
return true; // Default for other paths
|
||||
});
|
||||
|
||||
// Call the function
|
||||
await parsePRD('path/to/prd.txt', 'tasks/tasks.json', 3);
|
||||
|
||||
// Verify mkdir was called
|
||||
expect(fs.default.mkdirSync).toHaveBeenCalledWith('tasks', {
|
||||
recursive: true
|
||||
});
|
||||
});
|
||||
|
||||
test('should handle errors in the PRD parsing process', async () => {
|
||||
// Mock an error in generateObjectService
|
||||
const testError = new Error('Test error in AI API call');
|
||||
generateObjectService.mockRejectedValueOnce(testError);
|
||||
|
||||
// Setup mocks to simulate normal file conditions (no existing file)
|
||||
fs.default.existsSync.mockImplementation((path) => {
|
||||
if (path === 'tasks/tasks.json') return false; // Output file doesn't exist
|
||||
if (path === 'tasks') return true; // Directory exists
|
||||
return false;
|
||||
});
|
||||
|
||||
// Call the function with mcpLog to make it think it's in MCP mode (which throws instead of process.exit)
|
||||
await expect(
|
||||
parsePRD('path/to/prd.txt', 'tasks/tasks.json', 3, {
|
||||
mcpLog: {
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
error: jest.fn(),
|
||||
debug: jest.fn(),
|
||||
success: jest.fn()
|
||||
}
|
||||
})
|
||||
).rejects.toThrow('Test error in AI API call');
|
||||
});
|
||||
|
||||
test('should generate individual task files after creating tasks.json', async () => {
|
||||
// Setup mocks to simulate normal conditions (no existing output file)
|
||||
fs.default.existsSync.mockImplementation((path) => {
|
||||
if (path === 'tasks/tasks.json') return false; // Output file doesn't exist
|
||||
if (path === 'tasks') return true; // Directory exists
|
||||
return false;
|
||||
});
|
||||
|
||||
// Call the function
|
||||
await parsePRD('path/to/prd.txt', 'tasks/tasks.json', 3);
|
||||
|
||||
// Verify generateTaskFiles was called
|
||||
expect(generateTaskFiles).toHaveBeenCalledWith(
|
||||
'tasks/tasks.json',
|
||||
'tasks',
|
||||
{ mcpLog: undefined }
|
||||
);
|
||||
});
|
||||
|
||||
test('should overwrite tasks.json when force flag is true', async () => {
|
||||
// Setup mocks to simulate tasks.json already exists
|
||||
fs.default.existsSync.mockImplementation((path) => {
|
||||
if (path === 'tasks/tasks.json') return true; // Output file exists
|
||||
if (path === 'tasks') return true; // Directory exists
|
||||
return false;
|
||||
});
|
||||
|
||||
// Call the function with force=true to allow overwrite
|
||||
await parsePRD('path/to/prd.txt', 'tasks/tasks.json', 3, { force: true });
|
||||
|
||||
// Verify prompt was NOT called (confirmation happens at CLI level, not in core function)
|
||||
expect(promptYesNo).not.toHaveBeenCalled();
|
||||
|
||||
// Verify the file was written after force overwrite
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
'tasks/tasks.json',
|
||||
sampleClaudeResponse
|
||||
);
|
||||
});
|
||||
|
||||
test('should throw error when tasks.json exists without force flag in MCP mode', async () => {
|
||||
// Setup mocks to simulate tasks.json already exists
|
||||
fs.default.existsSync.mockImplementation((path) => {
|
||||
if (path === 'tasks/tasks.json') return true; // Output file exists
|
||||
if (path === 'tasks') return true; // Directory exists
|
||||
return false;
|
||||
});
|
||||
|
||||
// Call the function with mcpLog to make it think it's in MCP mode (which throws instead of process.exit)
|
||||
await expect(
|
||||
parsePRD('path/to/prd.txt', 'tasks/tasks.json', 3, {
|
||||
mcpLog: {
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
error: jest.fn(),
|
||||
debug: jest.fn(),
|
||||
success: jest.fn()
|
||||
}
|
||||
})
|
||||
).rejects.toThrow('Output file tasks/tasks.json already exists');
|
||||
|
||||
// Verify prompt was NOT called (confirmation happens at CLI level, not in core function)
|
||||
expect(promptYesNo).not.toHaveBeenCalled();
|
||||
|
||||
// Verify the file was NOT written
|
||||
expect(writeJSON).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should call process.exit when tasks.json exists without force flag in CLI mode', async () => {
|
||||
// Setup mocks to simulate tasks.json already exists
|
||||
fs.default.existsSync.mockImplementation((path) => {
|
||||
if (path === 'tasks/tasks.json') return true; // Output file exists
|
||||
if (path === 'tasks') return true; // Directory exists
|
||||
return false;
|
||||
});
|
||||
|
||||
// Mock process.exit for this specific test
|
||||
const mockProcessExit = jest
|
||||
.spyOn(process, 'exit')
|
||||
.mockImplementation((code) => {
|
||||
throw new Error(`process.exit: ${code}`);
|
||||
});
|
||||
|
||||
// Call the function without mcpLog (CLI mode) and expect it to throw due to mocked process.exit
|
||||
await expect(
|
||||
parsePRD('path/to/prd.txt', 'tasks/tasks.json', 3)
|
||||
).rejects.toThrow('process.exit: 1');
|
||||
|
||||
// Verify process.exit was called with code 1
|
||||
expect(mockProcessExit).toHaveBeenCalledWith(1);
|
||||
|
||||
// Verify the file was NOT written
|
||||
expect(writeJSON).not.toHaveBeenCalled();
|
||||
|
||||
// Restore the mock
|
||||
mockProcessExit.mockRestore();
|
||||
});
|
||||
|
||||
test('should not prompt for confirmation when tasks.json does not exist', async () => {
|
||||
// Setup mocks to simulate tasks.json does not exist
|
||||
fs.default.existsSync.mockImplementation((path) => {
|
||||
if (path === 'tasks/tasks.json') return false; // Output file doesn't exist
|
||||
if (path === 'tasks') return true; // Directory exists
|
||||
return false;
|
||||
});
|
||||
|
||||
// Call the function
|
||||
await parsePRD('path/to/prd.txt', 'tasks/tasks.json', 3);
|
||||
|
||||
// Verify prompt was NOT called
|
||||
expect(promptYesNo).not.toHaveBeenCalled();
|
||||
|
||||
// Verify the file was written without confirmation
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
'tasks/tasks.json',
|
||||
sampleClaudeResponse
|
||||
);
|
||||
});
|
||||
|
||||
test('should append new tasks when append option is true', async () => {
|
||||
// Setup mocks to simulate tasks.json already exists
|
||||
fs.default.existsSync.mockImplementation((path) => {
|
||||
if (path === 'tasks/tasks.json') return true; // Output file exists
|
||||
if (path === 'tasks') return true; // Directory exists
|
||||
return false;
|
||||
});
|
||||
|
||||
// Mock for reading existing tasks
|
||||
readJSON.mockReturnValue(existingTasks);
|
||||
|
||||
// Mock generateObjectService to return new tasks with continuing IDs
|
||||
generateObjectService.mockResolvedValueOnce({
|
||||
mainResult: newTasksWithContinuedIds,
|
||||
telemetryData: {}
|
||||
});
|
||||
|
||||
// Call the function with append option
|
||||
const result = await parsePRD('path/to/prd.txt', 'tasks/tasks.json', 2, {
|
||||
append: true
|
||||
});
|
||||
|
||||
// Verify prompt was NOT called (no confirmation needed for append)
|
||||
expect(promptYesNo).not.toHaveBeenCalled();
|
||||
|
||||
// Verify the file was written with merged tasks
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
'tasks/tasks.json',
|
||||
expect.objectContaining({
|
||||
tasks: expect.arrayContaining([
|
||||
expect.objectContaining({ id: 1 }),
|
||||
expect.objectContaining({ id: 2 }),
|
||||
expect.objectContaining({ id: 3 }),
|
||||
expect.objectContaining({ id: 4 })
|
||||
])
|
||||
})
|
||||
);
|
||||
|
||||
// Verify the result contains merged tasks
|
||||
expect(result).toEqual({
|
||||
success: true,
|
||||
tasksPath: 'tasks/tasks.json',
|
||||
telemetryData: {}
|
||||
});
|
||||
|
||||
// Verify that the written data contains 4 tasks (2 existing + 2 new)
|
||||
const writtenData = writeJSON.mock.calls[0][1];
|
||||
expect(writtenData.tasks.length).toBe(4);
|
||||
});
|
||||
|
||||
test('should skip prompt and not overwrite when append is true', async () => {
|
||||
// Setup mocks to simulate tasks.json already exists
|
||||
fs.default.existsSync.mockImplementation((path) => {
|
||||
if (path === 'tasks/tasks.json') return true; // Output file exists
|
||||
if (path === 'tasks') return true; // Directory exists
|
||||
return false;
|
||||
});
|
||||
|
||||
// Call the function with append option
|
||||
await parsePRD('path/to/prd.txt', 'tasks/tasks.json', 3, {
|
||||
append: true
|
||||
});
|
||||
|
||||
// Verify prompt was NOT called with append flag
|
||||
expect(promptYesNo).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
282
tests/unit/scripts/modules/task-manager/remove-subtask.test.js
Normal file
282
tests/unit/scripts/modules/task-manager/remove-subtask.test.js
Normal file
@@ -0,0 +1,282 @@
|
||||
/**
|
||||
* Tests for the removeSubtask function
|
||||
*/
|
||||
import { jest } from '@jest/globals';
|
||||
import path from 'path';
|
||||
|
||||
// Mock dependencies
|
||||
const mockReadJSON = jest.fn();
|
||||
const mockWriteJSON = jest.fn();
|
||||
const mockGenerateTaskFiles = jest.fn();
|
||||
|
||||
// Mock path module
|
||||
jest.mock('path', () => ({
|
||||
dirname: jest.fn()
|
||||
}));
|
||||
|
||||
// Define test version of the removeSubtask function
|
||||
const testRemoveSubtask = (
|
||||
tasksPath,
|
||||
subtaskId,
|
||||
convertToTask = false,
|
||||
generateFiles = true
|
||||
) => {
|
||||
// Read the existing tasks
|
||||
const data = mockReadJSON(tasksPath);
|
||||
if (!data || !data.tasks) {
|
||||
throw new Error(`Invalid or missing tasks file at ${tasksPath}`);
|
||||
}
|
||||
|
||||
// Parse the subtask ID (format: "parentId.subtaskId")
|
||||
if (!subtaskId.includes('.')) {
|
||||
throw new Error(`Invalid subtask ID format: ${subtaskId}`);
|
||||
}
|
||||
|
||||
const [parentIdStr, subtaskIdStr] = subtaskId.split('.');
|
||||
const parentId = parseInt(parentIdStr, 10);
|
||||
const subtaskIdNum = parseInt(subtaskIdStr, 10);
|
||||
|
||||
// Find the parent task
|
||||
const parentTask = data.tasks.find((t) => t.id === parentId);
|
||||
if (!parentTask) {
|
||||
throw new Error(`Parent task with ID ${parentId} not found`);
|
||||
}
|
||||
|
||||
// Check if parent has subtasks
|
||||
if (!parentTask.subtasks || parentTask.subtasks.length === 0) {
|
||||
throw new Error(`Parent task ${parentId} has no subtasks`);
|
||||
}
|
||||
|
||||
// Find the subtask to remove
|
||||
const subtaskIndex = parentTask.subtasks.findIndex(
|
||||
(st) => st.id === subtaskIdNum
|
||||
);
|
||||
if (subtaskIndex === -1) {
|
||||
throw new Error(`Subtask ${subtaskId} not found`);
|
||||
}
|
||||
|
||||
// Get a copy of the subtask before removing it
|
||||
const removedSubtask = { ...parentTask.subtasks[subtaskIndex] };
|
||||
|
||||
// Remove the subtask from the parent
|
||||
parentTask.subtasks.splice(subtaskIndex, 1);
|
||||
|
||||
// If parent has no more subtasks, remove the subtasks array
|
||||
if (parentTask.subtasks.length === 0) {
|
||||
delete parentTask.subtasks;
|
||||
}
|
||||
|
||||
let convertedTask = null;
|
||||
|
||||
// Convert the subtask to a standalone task if requested
|
||||
if (convertToTask) {
|
||||
// Find the highest task ID to determine the next ID
|
||||
const highestId = Math.max(...data.tasks.map((t) => t.id));
|
||||
const newTaskId = highestId + 1;
|
||||
|
||||
// Create the new task from the subtask
|
||||
convertedTask = {
|
||||
id: newTaskId,
|
||||
title: removedSubtask.title,
|
||||
description: removedSubtask.description || '',
|
||||
details: removedSubtask.details || '',
|
||||
status: removedSubtask.status || 'pending',
|
||||
dependencies: removedSubtask.dependencies || [],
|
||||
priority: parentTask.priority || 'medium' // Inherit priority from parent
|
||||
};
|
||||
|
||||
// Add the parent task as a dependency if not already present
|
||||
if (!convertedTask.dependencies.includes(parentId)) {
|
||||
convertedTask.dependencies.push(parentId);
|
||||
}
|
||||
|
||||
// Add the converted task to the tasks array
|
||||
data.tasks.push(convertedTask);
|
||||
}
|
||||
|
||||
// Write the updated tasks back to the file
|
||||
mockWriteJSON(tasksPath, data);
|
||||
|
||||
// Generate task files if requested
|
||||
if (generateFiles) {
|
||||
mockGenerateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||
}
|
||||
|
||||
return convertedTask;
|
||||
};
|
||||
|
||||
describe('removeSubtask function', () => {
|
||||
// Reset mocks before each test
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
|
||||
// Default mock implementations
|
||||
mockReadJSON.mockImplementation(() => ({
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Parent Task',
|
||||
description: 'This is a parent task',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
subtasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Subtask 1',
|
||||
description: 'This is subtask 1',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
parentTaskId: 1
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Subtask 2',
|
||||
description: 'This is subtask 2',
|
||||
status: 'in-progress',
|
||||
dependencies: [1], // Depends on subtask 1
|
||||
parentTaskId: 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Another Task',
|
||||
description: 'This is another task',
|
||||
status: 'pending',
|
||||
dependencies: [1]
|
||||
}
|
||||
]
|
||||
}));
|
||||
|
||||
// Setup success write response
|
||||
mockWriteJSON.mockImplementation((path, data) => {
|
||||
return data;
|
||||
});
|
||||
});
|
||||
|
||||
test('should remove a subtask from its parent task', async () => {
|
||||
// Execute the test version of removeSubtask to remove subtask 1.1
|
||||
testRemoveSubtask('tasks/tasks.json', '1.1', false, true);
|
||||
|
||||
// Verify readJSON was called with the correct path
|
||||
expect(mockReadJSON).toHaveBeenCalledWith('tasks/tasks.json');
|
||||
|
||||
// Verify writeJSON was called with updated data
|
||||
expect(mockWriteJSON).toHaveBeenCalled();
|
||||
|
||||
// Verify generateTaskFiles was called
|
||||
expect(mockGenerateTaskFiles).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should convert a subtask to a standalone task', async () => {
|
||||
// Execute the test version of removeSubtask to convert subtask 1.1 to a standalone task
|
||||
const result = testRemoveSubtask('tasks/tasks.json', '1.1', true, true);
|
||||
|
||||
// Verify the result is the new task
|
||||
expect(result).toBeDefined();
|
||||
expect(result.id).toBe(3);
|
||||
expect(result.title).toBe('Subtask 1');
|
||||
expect(result.dependencies).toContain(1);
|
||||
|
||||
// Verify writeJSON was called
|
||||
expect(mockWriteJSON).toHaveBeenCalled();
|
||||
|
||||
// Verify generateTaskFiles was called
|
||||
expect(mockGenerateTaskFiles).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should throw an error if subtask ID format is invalid', async () => {
|
||||
// Expect an error for invalid subtask ID format
|
||||
expect(() => testRemoveSubtask('tasks/tasks.json', '1', false)).toThrow(
|
||||
/Invalid subtask ID format/
|
||||
);
|
||||
|
||||
// Verify writeJSON was not called
|
||||
expect(mockWriteJSON).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should throw an error if parent task does not exist', async () => {
|
||||
// Expect an error for non-existent parent task
|
||||
expect(() => testRemoveSubtask('tasks/tasks.json', '999.1', false)).toThrow(
|
||||
/Parent task with ID 999 not found/
|
||||
);
|
||||
|
||||
// Verify writeJSON was not called
|
||||
expect(mockWriteJSON).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should throw an error if subtask does not exist', async () => {
|
||||
// Expect an error for non-existent subtask
|
||||
expect(() => testRemoveSubtask('tasks/tasks.json', '1.999', false)).toThrow(
|
||||
/Subtask 1.999 not found/
|
||||
);
|
||||
|
||||
// Verify writeJSON was not called
|
||||
expect(mockWriteJSON).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should remove subtasks array if last subtask is removed', async () => {
|
||||
// Create a data object with just one subtask
|
||||
mockReadJSON.mockImplementationOnce(() => ({
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Parent Task',
|
||||
description: 'This is a parent task',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
subtasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Last Subtask',
|
||||
description: 'This is the last subtask',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
parentTaskId: 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Another Task',
|
||||
description: 'This is another task',
|
||||
status: 'pending',
|
||||
dependencies: [1]
|
||||
}
|
||||
]
|
||||
}));
|
||||
|
||||
// Mock the behavior of writeJSON to capture the updated tasks data
|
||||
const updatedTasksData = { tasks: [] };
|
||||
mockWriteJSON.mockImplementation((path, data) => {
|
||||
// Store the data for assertions
|
||||
updatedTasksData.tasks = [...data.tasks];
|
||||
return data;
|
||||
});
|
||||
|
||||
// Remove the last subtask
|
||||
testRemoveSubtask('tasks/tasks.json', '1.1', false, true);
|
||||
|
||||
// Verify writeJSON was called
|
||||
expect(mockWriteJSON).toHaveBeenCalled();
|
||||
|
||||
// Verify the subtasks array was removed completely
|
||||
const parentTask = updatedTasksData.tasks.find((t) => t.id === 1);
|
||||
expect(parentTask).toBeDefined();
|
||||
expect(parentTask.subtasks).toBeUndefined();
|
||||
|
||||
// Verify generateTaskFiles was called
|
||||
expect(mockGenerateTaskFiles).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should not regenerate task files if generateFiles is false', async () => {
|
||||
// Execute the test version of removeSubtask with generateFiles = false
|
||||
testRemoveSubtask('tasks/tasks.json', '1.1', false, false);
|
||||
|
||||
// Verify writeJSON was called
|
||||
expect(mockWriteJSON).toHaveBeenCalled();
|
||||
|
||||
// Verify task files were not regenerated
|
||||
expect(mockGenerateTaskFiles).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
464
tests/unit/scripts/modules/task-manager/set-task-status.test.js
Normal file
464
tests/unit/scripts/modules/task-manager/set-task-status.test.js
Normal file
@@ -0,0 +1,464 @@
|
||||
/**
|
||||
* Tests for the set-task-status.js module
|
||||
*/
|
||||
import { jest } from '@jest/globals';
|
||||
|
||||
// Mock the dependencies before importing the module under test
|
||||
jest.unstable_mockModule('../../../../../scripts/modules/utils.js', () => ({
|
||||
readJSON: jest.fn(),
|
||||
writeJSON: jest.fn(),
|
||||
log: jest.fn(),
|
||||
CONFIG: {
|
||||
model: 'mock-claude-model',
|
||||
maxTokens: 4000,
|
||||
temperature: 0.7,
|
||||
debug: false
|
||||
},
|
||||
sanitizePrompt: jest.fn((prompt) => prompt),
|
||||
truncate: jest.fn((text) => text),
|
||||
isSilentMode: jest.fn(() => false),
|
||||
findTaskById: jest.fn((tasks, id) => tasks.find((t) => t.id === parseInt(id)))
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/task-manager/generate-task-files.js',
|
||||
() => ({
|
||||
default: jest.fn().mockResolvedValue()
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule('../../../../../scripts/modules/ui.js', () => ({
|
||||
formatDependenciesWithStatus: jest.fn(),
|
||||
displayBanner: jest.fn(),
|
||||
displayTaskList: jest.fn(),
|
||||
startLoadingIndicator: jest.fn(() => ({ stop: jest.fn() })),
|
||||
stopLoadingIndicator: jest.fn(),
|
||||
getStatusWithColor: jest.fn((status) => status)
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule('../../../../../src/constants/task-status.js', () => ({
|
||||
isValidTaskStatus: jest.fn((status) =>
|
||||
[
|
||||
'pending',
|
||||
'done',
|
||||
'in-progress',
|
||||
'review',
|
||||
'deferred',
|
||||
'cancelled'
|
||||
].includes(status)
|
||||
),
|
||||
TASK_STATUS_OPTIONS: [
|
||||
'pending',
|
||||
'done',
|
||||
'in-progress',
|
||||
'review',
|
||||
'deferred',
|
||||
'cancelled'
|
||||
]
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/task-manager/update-single-task-status.js',
|
||||
() => ({
|
||||
default: jest.fn()
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/dependency-manager.js',
|
||||
() => ({
|
||||
validateTaskDependencies: jest.fn()
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/config-manager.js',
|
||||
() => ({
|
||||
getDebugFlag: jest.fn(() => false)
|
||||
})
|
||||
);
|
||||
|
||||
// Import the mocked modules
|
||||
const { readJSON, writeJSON, log, findTaskById } = await import(
|
||||
'../../../../../scripts/modules/utils.js'
|
||||
);
|
||||
|
||||
const generateTaskFiles = (
|
||||
await import(
|
||||
'../../../../../scripts/modules/task-manager/generate-task-files.js'
|
||||
)
|
||||
).default;
|
||||
|
||||
const updateSingleTaskStatus = (
|
||||
await import(
|
||||
'../../../../../scripts/modules/task-manager/update-single-task-status.js'
|
||||
)
|
||||
).default;
|
||||
|
||||
// Import the module under test
|
||||
const { default: setTaskStatus } = await import(
|
||||
'../../../../../scripts/modules/task-manager/set-task-status.js'
|
||||
);
|
||||
|
||||
// Sample data for tests (from main test file)
|
||||
const sampleTasks = {
|
||||
meta: { projectName: 'Test Project' },
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Task 1',
|
||||
description: 'First task description',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
priority: 'high',
|
||||
details: 'Detailed information for task 1',
|
||||
testStrategy: 'Test strategy for task 1'
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Task 2',
|
||||
description: 'Second task description',
|
||||
status: 'pending',
|
||||
dependencies: [1],
|
||||
priority: 'medium',
|
||||
details: 'Detailed information for task 2',
|
||||
testStrategy: 'Test strategy for task 2'
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
title: 'Task with Subtasks',
|
||||
description: 'Task with subtasks description',
|
||||
status: 'pending',
|
||||
dependencies: [1, 2],
|
||||
priority: 'high',
|
||||
details: 'Detailed information for task 3',
|
||||
testStrategy: 'Test strategy for task 3',
|
||||
subtasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Subtask 1',
|
||||
description: 'First subtask',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
details: 'Details for subtask 1'
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Subtask 2',
|
||||
description: 'Second subtask',
|
||||
status: 'pending',
|
||||
dependencies: [1],
|
||||
details: 'Details for subtask 2'
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
describe('setTaskStatus', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
|
||||
// Mock console methods to suppress output
|
||||
jest.spyOn(console, 'log').mockImplementation(() => {});
|
||||
jest.spyOn(console, 'error').mockImplementation(() => {});
|
||||
|
||||
// Mock process.exit to prevent actual exit
|
||||
jest.spyOn(process, 'exit').mockImplementation((code) => {
|
||||
throw new Error(`process.exit: ${code}`);
|
||||
});
|
||||
|
||||
// Set up updateSingleTaskStatus mock to actually update the data
|
||||
updateSingleTaskStatus.mockImplementation(
|
||||
async (tasksPath, taskId, newStatus, data) => {
|
||||
// Handle subtask notation (e.g., "3.1")
|
||||
if (taskId.includes('.')) {
|
||||
const [parentId, subtaskId] = taskId
|
||||
.split('.')
|
||||
.map((id) => parseInt(id, 10));
|
||||
const parentTask = data.tasks.find((t) => t.id === parentId);
|
||||
if (!parentTask) {
|
||||
throw new Error(`Parent task ${parentId} not found`);
|
||||
}
|
||||
if (!parentTask.subtasks) {
|
||||
throw new Error(`Parent task ${parentId} has no subtasks`);
|
||||
}
|
||||
const subtask = parentTask.subtasks.find((st) => st.id === subtaskId);
|
||||
if (!subtask) {
|
||||
throw new Error(
|
||||
`Subtask ${subtaskId} not found in parent task ${parentId}`
|
||||
);
|
||||
}
|
||||
subtask.status = newStatus;
|
||||
} else {
|
||||
// Handle regular task
|
||||
const task = data.tasks.find((t) => t.id === parseInt(taskId, 10));
|
||||
if (!task) {
|
||||
throw new Error(`Task ${taskId} not found`);
|
||||
}
|
||||
task.status = newStatus;
|
||||
|
||||
// If marking parent as done, mark all subtasks as done too
|
||||
if (newStatus === 'done' && task.subtasks) {
|
||||
task.subtasks.forEach((subtask) => {
|
||||
subtask.status = 'done';
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Restore console methods
|
||||
jest.restoreAllMocks();
|
||||
});
|
||||
|
||||
test('should update task status in tasks.json', async () => {
|
||||
// Arrange
|
||||
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
||||
const tasksPath = '/mock/path/tasks.json';
|
||||
|
||||
readJSON.mockReturnValue(testTasksData);
|
||||
|
||||
// Act
|
||||
await setTaskStatus(tasksPath, '2', 'done', {
|
||||
mcpLog: { info: jest.fn() }
|
||||
});
|
||||
|
||||
// Assert
|
||||
expect(readJSON).toHaveBeenCalledWith(tasksPath);
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
tasksPath,
|
||||
expect.objectContaining({
|
||||
tasks: expect.arrayContaining([
|
||||
expect.objectContaining({ id: 2, status: 'done' })
|
||||
])
|
||||
})
|
||||
);
|
||||
expect(generateTaskFiles).toHaveBeenCalledWith(
|
||||
tasksPath,
|
||||
expect.any(String),
|
||||
expect.any(Object)
|
||||
);
|
||||
});
|
||||
|
||||
test('should update subtask status when using dot notation', async () => {
|
||||
// Arrange
|
||||
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
||||
const tasksPath = '/mock/path/tasks.json';
|
||||
|
||||
readJSON.mockReturnValue(testTasksData);
|
||||
|
||||
// Act
|
||||
await setTaskStatus(tasksPath, '3.1', 'done', {
|
||||
mcpLog: { info: jest.fn() }
|
||||
});
|
||||
|
||||
// Assert
|
||||
expect(readJSON).toHaveBeenCalledWith(tasksPath);
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
tasksPath,
|
||||
expect.objectContaining({
|
||||
tasks: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
id: 3,
|
||||
subtasks: expect.arrayContaining([
|
||||
expect.objectContaining({ id: 1, status: 'done' })
|
||||
])
|
||||
})
|
||||
])
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
test('should update multiple tasks when given comma-separated IDs', async () => {
|
||||
// Arrange
|
||||
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
||||
const tasksPath = '/mock/path/tasks.json';
|
||||
|
||||
readJSON.mockReturnValue(testTasksData);
|
||||
|
||||
// Act
|
||||
await setTaskStatus(tasksPath, '1,2', 'done', {
|
||||
mcpLog: { info: jest.fn() }
|
||||
});
|
||||
|
||||
// Assert
|
||||
expect(readJSON).toHaveBeenCalledWith(tasksPath);
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
tasksPath,
|
||||
expect.objectContaining({
|
||||
tasks: expect.arrayContaining([
|
||||
expect.objectContaining({ id: 1, status: 'done' }),
|
||||
expect.objectContaining({ id: 2, status: 'done' })
|
||||
])
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
test('should automatically mark subtasks as done when parent is marked done', async () => {
|
||||
// Arrange
|
||||
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
||||
const tasksPath = '/mock/path/tasks.json';
|
||||
|
||||
readJSON.mockReturnValue(testTasksData);
|
||||
|
||||
// Act
|
||||
await setTaskStatus(tasksPath, '3', 'done', {
|
||||
mcpLog: { info: jest.fn() }
|
||||
});
|
||||
|
||||
// Assert
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
tasksPath,
|
||||
expect.objectContaining({
|
||||
tasks: expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
id: 3,
|
||||
status: 'done',
|
||||
subtasks: expect.arrayContaining([
|
||||
expect.objectContaining({ id: 1, status: 'done' }),
|
||||
expect.objectContaining({ id: 2, status: 'done' })
|
||||
])
|
||||
})
|
||||
])
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
test('should throw error for non-existent task ID', async () => {
|
||||
// Arrange
|
||||
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
||||
const tasksPath = '/mock/path/tasks.json';
|
||||
|
||||
readJSON.mockReturnValue(testTasksData);
|
||||
|
||||
// Act & Assert
|
||||
await expect(
|
||||
setTaskStatus(tasksPath, '99', 'done', { mcpLog: { info: jest.fn() } })
|
||||
).rejects.toThrow('Task 99 not found');
|
||||
});
|
||||
|
||||
test('should throw error for invalid status', async () => {
|
||||
// Arrange
|
||||
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
||||
const tasksPath = '/mock/path/tasks.json';
|
||||
|
||||
readJSON.mockReturnValue(testTasksData);
|
||||
|
||||
// Act & Assert
|
||||
await expect(
|
||||
setTaskStatus(tasksPath, '2', 'InvalidStatus', {
|
||||
mcpLog: { info: jest.fn() }
|
||||
})
|
||||
).rejects.toThrow(/Invalid status value: InvalidStatus/);
|
||||
});
|
||||
|
||||
test('should handle parent tasks without subtasks when updating subtask', async () => {
|
||||
// Arrange
|
||||
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
||||
// Remove subtasks from task 3
|
||||
testTasksData.tasks[2] = { ...testTasksData.tasks[2] };
|
||||
delete testTasksData.tasks[2].subtasks;
|
||||
|
||||
const tasksPath = '/mock/path/tasks.json';
|
||||
readJSON.mockReturnValue(testTasksData);
|
||||
|
||||
// Act & Assert
|
||||
await expect(
|
||||
setTaskStatus(tasksPath, '3.1', 'done', { mcpLog: { info: jest.fn() } })
|
||||
).rejects.toThrow('has no subtasks');
|
||||
});
|
||||
|
||||
test('should handle non-existent subtask ID', async () => {
|
||||
// Arrange
|
||||
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
||||
const tasksPath = '/mock/path/tasks.json';
|
||||
|
||||
readJSON.mockReturnValue(testTasksData);
|
||||
|
||||
// Act & Assert
|
||||
await expect(
|
||||
setTaskStatus(tasksPath, '3.99', 'done', { mcpLog: { info: jest.fn() } })
|
||||
).rejects.toThrow('Subtask 99 not found');
|
||||
});
|
||||
|
||||
test('should handle file read errors', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const taskId = '2';
|
||||
const newStatus = 'done';
|
||||
|
||||
readJSON.mockImplementation(() => {
|
||||
throw new Error('File not found');
|
||||
});
|
||||
|
||||
// Act & Assert
|
||||
await expect(
|
||||
setTaskStatus(tasksPath, taskId, newStatus, {
|
||||
mcpLog: { info: jest.fn() }
|
||||
})
|
||||
).rejects.toThrow('File not found');
|
||||
|
||||
// Verify that writeJSON was not called due to read error
|
||||
expect(writeJSON).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should handle empty task ID input', async () => {
|
||||
// Arrange
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const emptyTaskId = '';
|
||||
const newStatus = 'done';
|
||||
|
||||
// Act & Assert
|
||||
await expect(
|
||||
setTaskStatus(tasksPath, emptyTaskId, newStatus, {
|
||||
mcpLog: { info: jest.fn() }
|
||||
})
|
||||
).rejects.toThrow();
|
||||
|
||||
// Verify that updateSingleTaskStatus was not called
|
||||
expect(updateSingleTaskStatus).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should handle whitespace in comma-separated IDs', async () => {
|
||||
// Arrange
|
||||
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const taskIds = ' 1 , 2 , 3 '; // IDs with whitespace
|
||||
const newStatus = 'in-progress';
|
||||
|
||||
readJSON.mockReturnValue(testTasksData);
|
||||
|
||||
// Act
|
||||
const result = await setTaskStatus(tasksPath, taskIds, newStatus, {
|
||||
mcpLog: { info: jest.fn() }
|
||||
});
|
||||
|
||||
// Assert
|
||||
expect(updateSingleTaskStatus).toHaveBeenCalledTimes(3);
|
||||
expect(updateSingleTaskStatus).toHaveBeenCalledWith(
|
||||
tasksPath,
|
||||
'1',
|
||||
newStatus,
|
||||
testTasksData,
|
||||
false
|
||||
);
|
||||
expect(updateSingleTaskStatus).toHaveBeenCalledWith(
|
||||
tasksPath,
|
||||
'2',
|
||||
newStatus,
|
||||
testTasksData,
|
||||
false
|
||||
);
|
||||
expect(updateSingleTaskStatus).toHaveBeenCalledWith(
|
||||
tasksPath,
|
||||
'3',
|
||||
newStatus,
|
||||
testTasksData,
|
||||
false
|
||||
);
|
||||
expect(result).toBeDefined();
|
||||
});
|
||||
});
|
||||
121
tests/unit/scripts/modules/task-manager/setup.js
Normal file
121
tests/unit/scripts/modules/task-manager/setup.js
Normal file
@@ -0,0 +1,121 @@
|
||||
/**
|
||||
* Common setup for task-manager module tests
|
||||
*/
|
||||
import { jest } from '@jest/globals';
|
||||
|
||||
// Sample test data
|
||||
export const sampleTasks = {
|
||||
meta: { projectName: 'Test Project' },
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Task 1',
|
||||
description: 'First task description',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
priority: 'high',
|
||||
details: 'Detailed information for task 1',
|
||||
testStrategy: 'Test strategy for task 1'
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Task 2',
|
||||
description: 'Second task description',
|
||||
status: 'pending',
|
||||
dependencies: [1],
|
||||
priority: 'medium',
|
||||
details: 'Detailed information for task 2',
|
||||
testStrategy: 'Test strategy for task 2'
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
title: 'Task with Subtasks',
|
||||
description: 'Task with subtasks description',
|
||||
status: 'pending',
|
||||
dependencies: [1, 2],
|
||||
priority: 'high',
|
||||
details: 'Detailed information for task 3',
|
||||
testStrategy: 'Test strategy for task 3',
|
||||
subtasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Subtask 1',
|
||||
description: 'First subtask',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
details: 'Details for subtask 1'
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Subtask 2',
|
||||
description: 'Second subtask',
|
||||
status: 'pending',
|
||||
dependencies: [1],
|
||||
details: 'Details for subtask 2'
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
export const emptySampleTasks = {
|
||||
meta: { projectName: 'Empty Project' },
|
||||
tasks: []
|
||||
};
|
||||
|
||||
export const sampleClaudeResponse = {
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Setup Project',
|
||||
description: 'Initialize the project structure',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
priority: 'high',
|
||||
details:
|
||||
'Create repository, configure build system, and setup dev environment',
|
||||
testStrategy: 'Verify project builds and tests run'
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Implement Core Feature',
|
||||
description: 'Create the main functionality',
|
||||
status: 'pending',
|
||||
dependencies: [1],
|
||||
priority: 'high',
|
||||
details: 'Implement the core business logic for the application',
|
||||
testStrategy:
|
||||
'Unit tests for core functions, integration tests for workflows'
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
// Common mock setup function
|
||||
export const setupCommonMocks = () => {
|
||||
// Clear mocks before setup
|
||||
jest.clearAllMocks();
|
||||
|
||||
// Mock implementations
|
||||
const mocks = {
|
||||
readFileSync: jest.fn(),
|
||||
existsSync: jest.fn(),
|
||||
mkdirSync: jest.fn(),
|
||||
writeFileSync: jest.fn(),
|
||||
readJSON: jest.fn(),
|
||||
writeJSON: jest.fn(),
|
||||
log: jest.fn(),
|
||||
isTaskDependentOn: jest.fn().mockReturnValue(false),
|
||||
formatDependenciesWithStatus: jest.fn(),
|
||||
displayTaskList: jest.fn(),
|
||||
validateAndFixDependencies: jest.fn(),
|
||||
generateObjectService: jest.fn().mockResolvedValue({
|
||||
mainResult: { tasks: [] },
|
||||
telemetryData: {}
|
||||
})
|
||||
};
|
||||
|
||||
return mocks;
|
||||
};
|
||||
|
||||
// Helper to create a deep copy of objects to avoid test pollution
|
||||
export const cloneData = (data) => JSON.parse(JSON.stringify(data));
|
||||
@@ -0,0 +1,188 @@
|
||||
/**
|
||||
* Tests for the updateSingleTaskStatus function
|
||||
*/
|
||||
import { jest } from '@jest/globals';
|
||||
|
||||
// Import test fixtures
|
||||
import {
|
||||
isValidTaskStatus,
|
||||
TASK_STATUS_OPTIONS
|
||||
} from '../../../../../src/constants/task-status.js';
|
||||
|
||||
// Sample tasks data for testing
|
||||
const sampleTasks = {
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Task 1',
|
||||
description: 'First task',
|
||||
status: 'pending',
|
||||
dependencies: []
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Task 2',
|
||||
description: 'Second task',
|
||||
status: 'pending',
|
||||
dependencies: []
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
title: 'Task 3',
|
||||
description: 'Third task with subtasks',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
subtasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Subtask 3.1',
|
||||
description: 'First subtask',
|
||||
status: 'pending',
|
||||
dependencies: []
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Subtask 3.2',
|
||||
description: 'Second subtask',
|
||||
status: 'pending',
|
||||
dependencies: []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
// Simplified version of updateSingleTaskStatus for testing
|
||||
const testUpdateSingleTaskStatus = (tasksData, taskIdInput, newStatus) => {
|
||||
if (!isValidTaskStatus(newStatus)) {
|
||||
throw new Error(
|
||||
`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
|
||||
);
|
||||
}
|
||||
|
||||
// Check if it's a subtask (e.g., "1.2")
|
||||
if (taskIdInput.includes('.')) {
|
||||
const [parentId, subtaskId] = taskIdInput
|
||||
.split('.')
|
||||
.map((id) => parseInt(id, 10));
|
||||
|
||||
// Find the parent task
|
||||
const parentTask = tasksData.tasks.find((t) => t.id === parentId);
|
||||
if (!parentTask) {
|
||||
throw new Error(`Parent task ${parentId} not found`);
|
||||
}
|
||||
|
||||
// Find the subtask
|
||||
if (!parentTask.subtasks) {
|
||||
throw new Error(`Parent task ${parentId} has no subtasks`);
|
||||
}
|
||||
|
||||
const subtask = parentTask.subtasks.find((st) => st.id === subtaskId);
|
||||
if (!subtask) {
|
||||
throw new Error(
|
||||
`Subtask ${subtaskId} not found in parent task ${parentId}`
|
||||
);
|
||||
}
|
||||
|
||||
// Update the subtask status
|
||||
subtask.status = newStatus;
|
||||
|
||||
// Check if all subtasks are done (if setting to 'done')
|
||||
if (
|
||||
newStatus.toLowerCase() === 'done' ||
|
||||
newStatus.toLowerCase() === 'completed'
|
||||
) {
|
||||
const allSubtasksDone = parentTask.subtasks.every(
|
||||
(st) => st.status === 'done' || st.status === 'completed'
|
||||
);
|
||||
|
||||
// For testing, we don't need to output suggestions
|
||||
}
|
||||
} else {
|
||||
// Handle regular task
|
||||
const taskId = parseInt(taskIdInput, 10);
|
||||
const task = tasksData.tasks.find((t) => t.id === taskId);
|
||||
|
||||
if (!task) {
|
||||
throw new Error(`Task ${taskId} not found`);
|
||||
}
|
||||
|
||||
// Update the task status
|
||||
task.status = newStatus;
|
||||
|
||||
// If marking as done, also mark all subtasks as done
|
||||
if (
|
||||
(newStatus.toLowerCase() === 'done' ||
|
||||
newStatus.toLowerCase() === 'completed') &&
|
||||
task.subtasks &&
|
||||
task.subtasks.length > 0
|
||||
) {
|
||||
task.subtasks.forEach((subtask) => {
|
||||
subtask.status = newStatus;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
describe('updateSingleTaskStatus function', () => {
|
||||
test('should update regular task status', async () => {
|
||||
// Arrange
|
||||
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
||||
|
||||
// Act
|
||||
const result = testUpdateSingleTaskStatus(testTasksData, '2', 'done');
|
||||
|
||||
// Assert
|
||||
expect(result).toBe(true);
|
||||
expect(testTasksData.tasks[1].status).toBe('done');
|
||||
});
|
||||
|
||||
test('should throw error for invalid status', async () => {
|
||||
// Arrange
|
||||
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
||||
|
||||
// Assert
|
||||
expect(() =>
|
||||
testUpdateSingleTaskStatus(testTasksData, '2', 'Done')
|
||||
).toThrow(/Error: Invalid status value: Done./);
|
||||
});
|
||||
|
||||
test('should update subtask status', async () => {
|
||||
// Arrange
|
||||
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
||||
|
||||
// Act
|
||||
const result = testUpdateSingleTaskStatus(testTasksData, '3.1', 'done');
|
||||
|
||||
// Assert
|
||||
expect(result).toBe(true);
|
||||
expect(testTasksData.tasks[2].subtasks[0].status).toBe('done');
|
||||
});
|
||||
|
||||
test('should handle parent tasks without subtasks', async () => {
|
||||
// Arrange
|
||||
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
||||
|
||||
// Remove subtasks from task 3
|
||||
const taskWithoutSubtasks = { ...testTasksData.tasks[2] };
|
||||
delete taskWithoutSubtasks.subtasks;
|
||||
testTasksData.tasks[2] = taskWithoutSubtasks;
|
||||
|
||||
// Assert
|
||||
expect(() =>
|
||||
testUpdateSingleTaskStatus(testTasksData, '3.1', 'done')
|
||||
).toThrow('has no subtasks');
|
||||
});
|
||||
|
||||
test('should handle non-existent subtask ID', async () => {
|
||||
// Arrange
|
||||
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
||||
|
||||
// Assert
|
||||
expect(() =>
|
||||
testUpdateSingleTaskStatus(testTasksData, '3.99', 'done')
|
||||
).toThrow('Subtask 99 not found');
|
||||
});
|
||||
});
|
||||
217
tests/unit/scripts/modules/task-manager/update-tasks.test.js
Normal file
217
tests/unit/scripts/modules/task-manager/update-tasks.test.js
Normal file
@@ -0,0 +1,217 @@
|
||||
/**
|
||||
* Tests for the update-tasks.js module
|
||||
*/
|
||||
import { jest } from '@jest/globals';
|
||||
|
||||
// Mock the dependencies before importing the module under test
|
||||
jest.unstable_mockModule('../../../../../scripts/modules/utils.js', () => ({
|
||||
readJSON: jest.fn(),
|
||||
writeJSON: jest.fn(),
|
||||
log: jest.fn(),
|
||||
CONFIG: {
|
||||
model: 'mock-claude-model',
|
||||
maxTokens: 4000,
|
||||
temperature: 0.7,
|
||||
debug: false
|
||||
},
|
||||
sanitizePrompt: jest.fn((prompt) => prompt),
|
||||
truncate: jest.fn((text) => text),
|
||||
isSilentMode: jest.fn(() => false)
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/ai-services-unified.js',
|
||||
() => ({
|
||||
generateTextService: jest.fn().mockResolvedValue({
|
||||
mainResult: '[]', // mainResult is the text string directly
|
||||
telemetryData: {}
|
||||
})
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule('../../../../../scripts/modules/ui.js', () => ({
|
||||
getStatusWithColor: jest.fn((status) => status),
|
||||
startLoadingIndicator: jest.fn(),
|
||||
stopLoadingIndicator: jest.fn(),
|
||||
displayAiUsageSummary: jest.fn()
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/config-manager.js',
|
||||
() => ({
|
||||
getDebugFlag: jest.fn(() => false)
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/task-manager/generate-task-files.js',
|
||||
() => ({
|
||||
default: jest.fn().mockResolvedValue()
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/task-manager/models.js',
|
||||
() => ({
|
||||
getModelConfiguration: jest.fn(() => ({
|
||||
model: 'mock-model',
|
||||
maxTokens: 4000,
|
||||
temperature: 0.7
|
||||
}))
|
||||
})
|
||||
);
|
||||
|
||||
// Import the mocked modules
|
||||
const { readJSON, writeJSON, log, CONFIG } = await import(
|
||||
'../../../../../scripts/modules/utils.js'
|
||||
);
|
||||
|
||||
const { generateTextService } = await import(
|
||||
'../../../../../scripts/modules/ai-services-unified.js'
|
||||
);
|
||||
|
||||
// Import the module under test
|
||||
const { default: updateTasks } = await import(
|
||||
'../../../../../scripts/modules/task-manager/update-tasks.js'
|
||||
);
|
||||
|
||||
describe('updateTasks', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
test('should update tasks based on new context', async () => {
|
||||
// Arrange
|
||||
const mockTasksPath = '/mock/path/tasks.json';
|
||||
const mockFromId = 2;
|
||||
const mockPrompt = 'New project direction';
|
||||
const mockInitialTasks = {
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Old Task 1',
|
||||
status: 'done',
|
||||
details: 'Done details'
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Old Task 2',
|
||||
status: 'pending',
|
||||
details: 'Old details 2'
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
title: 'Old Task 3',
|
||||
status: 'in-progress',
|
||||
details: 'Old details 3'
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
const mockUpdatedTasks = [
|
||||
{
|
||||
id: 2,
|
||||
title: 'Updated Task 2',
|
||||
status: 'pending',
|
||||
details: 'New details 2 based on direction',
|
||||
description: 'Updated description',
|
||||
dependencies: [],
|
||||
priority: 'medium'
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
title: 'Updated Task 3',
|
||||
status: 'pending',
|
||||
details: 'New details 3 based on direction',
|
||||
description: 'Updated description',
|
||||
dependencies: [],
|
||||
priority: 'medium'
|
||||
}
|
||||
];
|
||||
|
||||
const mockApiResponse = {
|
||||
mainResult: JSON.stringify(mockUpdatedTasks), // mainResult is the JSON string directly
|
||||
telemetryData: {}
|
||||
};
|
||||
|
||||
// Configure mocks
|
||||
readJSON.mockReturnValue(mockInitialTasks);
|
||||
generateTextService.mockResolvedValue(mockApiResponse);
|
||||
|
||||
// Act
|
||||
const result = await updateTasks(
|
||||
mockTasksPath,
|
||||
mockFromId,
|
||||
mockPrompt,
|
||||
false,
|
||||
{},
|
||||
'json'
|
||||
); // Use json format to avoid console output and process.exit
|
||||
|
||||
// Assert
|
||||
// 1. Read JSON called
|
||||
expect(readJSON).toHaveBeenCalledWith(mockTasksPath);
|
||||
|
||||
// 2. AI Service called with correct args
|
||||
expect(generateTextService).toHaveBeenCalledWith(expect.any(Object));
|
||||
|
||||
// 3. Write JSON called with correctly merged tasks
|
||||
expect(writeJSON).toHaveBeenCalledWith(
|
||||
mockTasksPath,
|
||||
expect.objectContaining({
|
||||
tasks: expect.arrayContaining([
|
||||
expect.objectContaining({ id: 1 }),
|
||||
expect.objectContaining({ id: 2, title: 'Updated Task 2' }),
|
||||
expect.objectContaining({ id: 3, title: 'Updated Task 3' })
|
||||
])
|
||||
})
|
||||
);
|
||||
|
||||
// 4. Check return value
|
||||
expect(result).toEqual(
|
||||
expect.objectContaining({
|
||||
success: true,
|
||||
updatedTasks: mockUpdatedTasks,
|
||||
telemetryData: {}
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
test('should handle no tasks to update', async () => {
|
||||
// Arrange
|
||||
const mockTasksPath = '/mock/path/tasks.json';
|
||||
const mockFromId = 99; // Non-existent ID
|
||||
const mockPrompt = 'Update non-existent tasks';
|
||||
const mockInitialTasks = {
|
||||
tasks: [
|
||||
{ id: 1, status: 'done' },
|
||||
{ id: 2, status: 'done' }
|
||||
]
|
||||
};
|
||||
|
||||
// Configure mocks
|
||||
readJSON.mockReturnValue(mockInitialTasks);
|
||||
|
||||
// Act
|
||||
const result = await updateTasks(
|
||||
mockTasksPath,
|
||||
mockFromId,
|
||||
mockPrompt,
|
||||
false,
|
||||
{},
|
||||
'json'
|
||||
);
|
||||
|
||||
// Assert
|
||||
expect(readJSON).toHaveBeenCalledWith(mockTasksPath);
|
||||
expect(generateTextService).not.toHaveBeenCalled();
|
||||
expect(writeJSON).not.toHaveBeenCalled();
|
||||
expect(log).toHaveBeenCalledWith(
|
||||
'info',
|
||||
expect.stringContaining('No tasks to update')
|
||||
);
|
||||
|
||||
// Should return early with no updates
|
||||
expect(result).toBeUndefined();
|
||||
});
|
||||
});
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user