Compare commits

..

46 Commits

Author SHA1 Message Date
github-actions[bot]
f4682cea0a docs: auto-update documentation based on changes in next branch
This PR was automatically generated to update documentation based on recent changes.

  Original commit: feat: implement export tasks (#1260)\n\n\n

  Co-authored-by: Claude <claude-assistant@anthropic.com>
2025-10-06 14:11:11 +00:00
Ralph Khreish
7265a6cf53 feat: implement export tasks (#1260) 2025-10-06 16:03:56 +02:00
Ralph Khreish
db6f405f23 feat: add api-storage improvements (#1278) 2025-10-06 15:23:48 +02:00
Ralph Khreish
7b5a7c4495 fix: remove deprecated generateTaskFiles calls from MCP tools (#1277)
Co-authored-by: Ralph Khreish <Crunchyman-ralph@users.noreply.github.com>
Co-authored-by: claude[bot] <209825114+claude[bot]@users.noreply.github.com>
Resolves issue #1271 - MCP Connection Closed Error After Upgrading to v0.27.3
2025-10-06 11:55:26 +02:00
Ralph Khreish
caee040907 fix(mcp-server): construct default tasks.json path when file parameter not provided (#1276)
Co-authored-by: Ralph Khreish <Crunchyman-ralph@users.noreply.github.com>
Co-authored-by: claude[bot] <209825114+claude[bot]@users.noreply.github.com>
Fixes #1272
2025-10-06 11:50:45 +02:00
github-actions[bot]
4b5473860b docs: Auto-update and format models.md 2025-10-05 20:04:58 +00:00
Ben Vargas
b43b7ce201 feat: Add Codex CLI provider with OAuth authentication (#1273)
Co-authored-by: Ralph Khreish <35776126+Crunchyman-ralph@users.noreply.github.com>
2025-10-05 22:04:45 +02:00
github-actions[bot]
86027f1ee4 chore: rc version bump 2025-10-04 17:26:07 +00:00
Ralph Khreish
4f984f8a69 chore: fix build issues (#1274) 2025-10-04 19:24:31 +02:00
github-actions[bot]
f7646f41b5 chore: rc version bump 2025-10-04 16:56:52 +00:00
Ralph Khreish
20004a39ea fix: add complexity score to tm list and tm show (#1270) 2025-10-03 18:47:05 +02:00
Ralph Khreish
f1393f47b1 fix: pricing show 0 when it is defined (#1266) 2025-10-03 16:21:32 +02:00
Ralph Khreish
738ec51c04 feat: Migrate Task Master to generateObject for structured AI responses (#1262)
Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
Co-authored-by: Ben Vargas <ben@example.com>
2025-10-02 16:23:34 +02:00
Ralph Khreish
c7418c4594 fix: make tag listing table use dynamic column widths to prevent truncation (#1264)
Co-authored-by: Ralph Khreish <Crunchyman-ralph@users.noreply.github.com>
Co-authored-by: claude[bot] <209825114+claude[bot]@users.noreply.github.com>
2025-10-02 15:39:31 +02:00
Ralph Khreish
0747f1c772 Merge pull request #1265 from eyaltoledano/ralph/chore/update.from.main 2025-10-02 15:31:42 +02:00
Ralph Khreish
ffe24a2e35 Merge remote-tracking branch 'origin/main' into ralph/chore/update.from.main 2025-10-02 15:11:24 +02:00
Ralph Khreish
604b94baa9 chore: replace dotenv-mono with dotenv and try to fix env variables (#1261) 2025-10-02 11:52:25 +02:00
Ralph Khreish
2ea4bb6a81 chore: fix CI 2025-09-30 10:41:43 +02:00
Ralph Khreish
3e96387715 chore: fix extension CI 2025-09-30 10:41:43 +02:00
Ralph Khreish
100c3dc47d chore: apply requested changes 2025-09-30 10:41:43 +02:00
Ralph Khreish
986ac117ae feat: update grok-cli ai sdk provider to v5 (#1252) 2025-09-30 10:41:43 +02:00
tommy-ca
18aa416035 feat: Claude Code AI SDK v5 Integration (#1114)
Co-authored-by: Claude <noreply@anthropic.com>
Co-authored-by: Ralph Khreish <35776126+Crunchyman-ralph@users.noreply.github.com>
2025-09-30 10:41:43 +02:00
github-actions[bot]
3b3dbabed1 Version Packages (#1255)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: Ralph Khreish <35776126+Crunchyman-ralph@users.noreply.github.com>
2025-09-27 08:56:38 +02:00
Ralph Khreish
af53525cbc fix: handle subtasks in getTask method (#1254)
Co-authored-by: Ralph Khreish <Crunchyman-ralph@users.noreply.github.com>
Co-authored-by: claude[bot] <209825114+claude[bot]@users.noreply.github.com>
2025-09-26 20:58:15 +02:00
Joe Danziger
0079b7defd feat: Add Cursor IDE custom slash commands support (#1215) 2025-09-26 19:21:16 +02:00
Ralph Khreish
0b2c6967c4 fix: improve subtask & parent task management (#1251) 2025-09-26 11:04:38 +02:00
Ralph Khreish
c0682ac795 Merge pull request #1250 from eyaltoledano/chore/merge.main.september 2025-09-26 01:10:57 +02:00
Ralph Khreish
01a7faea8f Merge remote-tracking branch 'origin/main' into chore/merge.main.september 2025-09-26 01:10:12 +02:00
github-actions[bot]
b7f32eac5a Version Packages (#1249)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: Ralph Khreish <35776126+Crunchyman-ralph@users.noreply.github.com>
2025-09-26 01:06:52 +02:00
Ralph Khreish
044a7bfc98 fix: implement subtask status update functionality (#1248)
Co-authored-by: Ralph Khreish <Crunchyman-ralph@users.noreply.github.com>
Co-authored-by: claude[bot] <209825114+claude[bot]@users.noreply.github.com>
2025-09-26 01:01:55 +02:00
Ralph Khreish
814265cd33 chore: adjust CI to run on all PRs (#1244) 2025-09-24 20:19:09 +02:00
Ralph Khreish
9b7b2ca7b2 Merge pull request #1245 from eyaltoledano/ralph/chore/update.from.main 2025-09-24 20:14:00 +02:00
Ralph Khreish
949f091179 Merge remote-tracking branch 'origin/main' into ralph/chore/update.from.main 2025-09-24 20:10:19 +02:00
Ralph Khreish
32c2b03c23 Merge pull request #1242 from eyaltoledano/ralph/fix.main.merges
fix CI failing to release (#1232)
2025-09-24 14:28:33 +02:00
Ralph Khreish
3bfd999d81 Merge remote-tracking branch 'origin/main' into ralph/fix.main.merges 2025-09-24 14:28:09 +02:00
Ralph Khreish
9fa79eb026 chore: fix CI failing to release (#1232) 2025-09-24 14:26:41 +02:00
Jungwoo Song
875134247a Add Q Developer CLI at README.md (#1159) 2025-09-24 11:06:16 +02:00
Ralph Khreish
c2fc61ddb3 chore: mintlify fix broken links (#1237) 2025-09-23 18:32:51 +02:00
Ralph Khreish
aaacc3dae3 fix: improve docs and command help for analzye-complexity (#1235) 2025-09-23 18:19:32 +02:00
Ralph Khreish
46cd5dc186 fix: add installation instructions for claude-code mcp (#1236) 2025-09-23 18:16:40 +02:00
github-actions[bot]
49a31be416 docs: Auto-update and format models.md 2025-09-23 15:45:03 +00:00
JeonSeongHyeon
2b69936ee7 fix: update model ID for sonar deep research (#1192)
Co-authored-by: Ralph Khreish <35776126+Crunchyman-ralph@users.noreply.github.com>
2025-09-23 17:44:40 +02:00
Julien Pelletier
b5fe723f8e Fix/claude code path executable setting (#1172)
Co-authored-by: Ralph Khreish <35776126+Crunchyman-ralph@users.noreply.github.com>
2025-09-22 22:39:37 +02:00
olssonsten
d67b81d25d feat: add MCP timeout configuration for long-running operations (#1112) 2025-09-22 19:55:10 +02:00
Ralph Khreish
66c05053c0 Merge pull request #1231 from eyaltoledano/ralph/merge.from.main 2025-09-22 19:54:12 +02:00
Ralph Khreish
d7ab4609aa chore: fix CI 2025-09-22 19:25:44 +02:00
186 changed files with 13023 additions and 11917 deletions

View File

@@ -0,0 +1,11 @@
---
"task-master-ai": minor
---
Add Codex CLI provider with OAuth authentication
- Added codex-cli provider for GPT-5 and GPT-5-Codex models (272K input / 128K output)
- OAuth-first authentication via `codex login` - no API key required
- Optional OPENAI_CODEX_API_KEY support
- Codebase analysis capabilities automatically enabled
- Command-specific settings and approval/sandbox modes

View File

@@ -0,0 +1,5 @@
---
"task-master-ai": patch
---
Improve `analyze-complexity` cli docs and `--research` flag documentation

View File

@@ -0,0 +1,7 @@
---
"task-master-ai": minor
---
Add Cursor IDE custom slash command support
Expose Task Master commands as Cursor slash commands by copying assets/claude/commands to .cursor/commands on profile add and cleaning up on remove.

View File

@@ -0,0 +1,5 @@
---
"task-master-ai": patch
---
Change parent task back to "pending" when all subtasks are in "pending" state

View File

@@ -0,0 +1,5 @@
---
"task-master-ai": patch
---
Do a quick fix on build

View File

@@ -0,0 +1,5 @@
---
"task-master-ai": patch
---
Fix MCP connection errors caused by deprecated generateTaskFiles calls. Resolves "Cannot read properties of null (reading 'toString')" errors when using MCP tools for task management operations.

View File

@@ -0,0 +1,5 @@
---
"task-master-ai": patch
---
Fix MCP server error when file parameter not provided - now properly constructs default tasks.json path instead of failing with 'tasksJsonPath is required' error.

View File

@@ -0,0 +1,5 @@
---
"task-master-ai": minor
---
Added api keys page on docs website: docs.task-master.dev/getting-started/api-keys

View File

@@ -0,0 +1,10 @@
---
"task-master-ai": minor
---
Move to AI SDK v5:
- Works better with claude-code and gemini-cli as ai providers
- Improved openai model family compatibility
- Migrate ollama provider to v2
- Closes #1223, #1013, #1161, #1174

View File

@@ -0,0 +1,30 @@
---
"task-master-ai": minor
---
Migrate AI services to use generateObject for structured data generation
This update migrates all AI service calls from generateText to generateObject, ensuring more reliable and structured responses across all commands.
### Key Changes:
- **Unified AI Service**: Replaced separate generateText implementations with a single generateObjectService that handles structured data generation
- **JSON Mode Support**: Added proper JSON mode configuration for providers that support it (OpenAI, Anthropic, Google, Groq)
- **Schema Validation**: Integrated Zod schemas for all AI-generated content with automatic validation
- **Provider Compatibility**: Maintained compatibility with all existing providers while leveraging their native structured output capabilities
- **Improved Reliability**: Structured output generation reduces parsing errors and ensures consistent data formats
### Technical Improvements:
- Centralized provider configuration in `ai-providers-unified.js`
- Added `generateObject` support detection for each provider
- Implemented proper error handling for schema validation failures
- Maintained backward compatibility with existing prompt structures
### Bug Fixes:
- Fixed subtask ID numbering issue where AI was generating inconsistent IDs (101-105, 601-603) instead of sequential numbering (1, 2, 3...)
- Enhanced prompt instructions to enforce proper ID generation patterns
- Ensured subtasks display correctly as X.1, X.2, X.3 format
This migration improves the reliability and consistency of AI-generated content throughout the Task Master application.

View File

@@ -0,0 +1,13 @@
---
"task-master-ai": minor
---
Enhanced Roo Code profile with MCP timeout configuration for improved reliability during long-running AI operations. The Roo profile now automatically configures a 300-second timeout for MCP server operations, preventing timeouts during complex tasks like `parse-prd`, `expand-all`, `analyze-complexity`, and `research` operations. This change also replaces static MCP configuration files with programmatic generation for better maintainability.
**What's New:**
- 300-second timeout for MCP operations (up from default 60 seconds)
- Programmatic MCP configuration generation (replaces static asset files)
- Enhanced reliability for AI-powered operations
- Consistent with other AI coding assistant profiles
**Migration:** No user action required - existing Roo Code installations will automatically receive the enhanced MCP configuration on next initialization.

View File

@@ -0,0 +1,5 @@
---
"task-master-ai": patch
---
Fix Claude Code settings validation for pathToClaudeCodeExecutable

23
.changeset/pre.json Normal file
View File

@@ -0,0 +1,23 @@
{
"mode": "pre",
"tag": "rc",
"initialVersions": {
"task-master-ai": "0.27.3",
"docs": "0.0.4",
"extension": "0.25.4"
},
"changesets": [
"chore-fix-docs",
"cursor-slash-commands",
"curvy-weeks-flow",
"easy-spiders-wave",
"flat-cities-say",
"forty-tables-invite",
"gentle-cats-dance",
"mcp-timeout-configuration",
"petite-ideas-grab",
"silly-pandas-find",
"sweet-maps-rule",
"whole-pigs-say"
]
}

View File

@@ -0,0 +1,5 @@
---
"task-master-ai": patch
---
Fix sonar deep research model failing, should be called `sonar-deep-research`

View File

@@ -0,0 +1,5 @@
---
"task-master-ai": minor
---
Upgrade grok-cli ai provider to ai sdk v5

View File

@@ -0,0 +1,8 @@
---
"task-master-ai": patch
---
Fix complexity score not showing for `task-master show` and `task-master list`
- Added complexity score on "next task" when running `task-master list`
- Added colors to complexity to reflect complexity (easy, medium, hard)

View File

@@ -6,9 +6,6 @@ on:
- main
- next
pull_request:
branches:
- main
- next
workflow_dispatch:
concurrency:

View File

@@ -41,8 +41,7 @@ jobs:
restore-keys: |
${{ runner.os }}-node-
- name: Install Extension Dependencies
working-directory: apps/extension
- name: Install Monorepo Dependencies
run: npm ci
timeout-minutes: 5
@@ -68,7 +67,6 @@ jobs:
${{ runner.os }}-node-
- name: Install if cache miss
working-directory: apps/extension
run: npm ci
timeout-minutes: 3
@@ -100,7 +98,6 @@ jobs:
${{ runner.os }}-node-
- name: Install if cache miss
working-directory: apps/extension
run: npm ci
timeout-minutes: 3

View File

@@ -31,8 +31,7 @@ jobs:
restore-keys: |
${{ runner.os }}-node-
- name: Install Extension Dependencies
working-directory: apps/extension
- name: Install Monorepo Dependencies
run: npm ci
timeout-minutes: 5

View File

@@ -1,5 +1,92 @@
# task-master-ai
## 0.28.0-rc.1
### Patch Changes
- [#1274](https://github.com/eyaltoledano/claude-task-master/pull/1274) [`4f984f8`](https://github.com/eyaltoledano/claude-task-master/commit/4f984f8a6965da9f9c7edd60ddfd6560ac022917) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Do a quick fix on build
## 0.28.0-rc.0
### Minor Changes
- [#1215](https://github.com/eyaltoledano/claude-task-master/pull/1215) [`0079b7d`](https://github.com/eyaltoledano/claude-task-master/commit/0079b7defdad550811f704c470fdd01955d91d4d) Thanks [@joedanz](https://github.com/joedanz)! - Add Cursor IDE custom slash command support
Expose Task Master commands as Cursor slash commands by copying assets/claude/commands to .cursor/commands on profile add and cleaning up on remove.
- [#1246](https://github.com/eyaltoledano/claude-task-master/pull/1246) [`18aa416`](https://github.com/eyaltoledano/claude-task-master/commit/18aa416035f44345bde1c7321490345733a5d042) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Added api keys page on docs website: docs.task-master.dev/getting-started/api-keys
- [#1246](https://github.com/eyaltoledano/claude-task-master/pull/1246) [`18aa416`](https://github.com/eyaltoledano/claude-task-master/commit/18aa416035f44345bde1c7321490345733a5d042) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Move to AI SDK v5:
- Works better with claude-code and gemini-cli as ai providers
- Improved openai model family compatibility
- Migrate ollama provider to v2
- Closes #1223, #1013, #1161, #1174
- [#1262](https://github.com/eyaltoledano/claude-task-master/pull/1262) [`738ec51`](https://github.com/eyaltoledano/claude-task-master/commit/738ec51c049a295a12839b2dfddaf05e23b8fede) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Migrate AI services to use generateObject for structured data generation
This update migrates all AI service calls from generateText to generateObject, ensuring more reliable and structured responses across all commands.
### Key Changes:
- **Unified AI Service**: Replaced separate generateText implementations with a single generateObjectService that handles structured data generation
- **JSON Mode Support**: Added proper JSON mode configuration for providers that support it (OpenAI, Anthropic, Google, Groq)
- **Schema Validation**: Integrated Zod schemas for all AI-generated content with automatic validation
- **Provider Compatibility**: Maintained compatibility with all existing providers while leveraging their native structured output capabilities
- **Improved Reliability**: Structured output generation reduces parsing errors and ensures consistent data formats
### Technical Improvements:
- Centralized provider configuration in `ai-providers-unified.js`
- Added `generateObject` support detection for each provider
- Implemented proper error handling for schema validation failures
- Maintained backward compatibility with existing prompt structures
### Bug Fixes:
- Fixed subtask ID numbering issue where AI was generating inconsistent IDs (101-105, 601-603) instead of sequential numbering (1, 2, 3...)
- Enhanced prompt instructions to enforce proper ID generation patterns
- Ensured subtasks display correctly as X.1, X.2, X.3 format
This migration improves the reliability and consistency of AI-generated content throughout the Task Master application.
- [#1112](https://github.com/eyaltoledano/claude-task-master/pull/1112) [`d67b81d`](https://github.com/eyaltoledano/claude-task-master/commit/d67b81d25ddd927fabb6f5deb368e8993519c541) Thanks [@olssonsten](https://github.com/olssonsten)! - Enhanced Roo Code profile with MCP timeout configuration for improved reliability during long-running AI operations. The Roo profile now automatically configures a 300-second timeout for MCP server operations, preventing timeouts during complex tasks like `parse-prd`, `expand-all`, `analyze-complexity`, and `research` operations. This change also replaces static MCP configuration files with programmatic generation for better maintainability.
**What's New:**
- 300-second timeout for MCP operations (up from default 60 seconds)
- Programmatic MCP configuration generation (replaces static asset files)
- Enhanced reliability for AI-powered operations
- Consistent with other AI coding assistant profiles
**Migration:** No user action required - existing Roo Code installations will automatically receive the enhanced MCP configuration on next initialization.
- [#1246](https://github.com/eyaltoledano/claude-task-master/pull/1246) [`986ac11`](https://github.com/eyaltoledano/claude-task-master/commit/986ac117aee00bcd3e6830a0f76e1ad6d10e0bca) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Upgrade grok-cli ai provider to ai sdk v5
### Patch Changes
- [#1235](https://github.com/eyaltoledano/claude-task-master/pull/1235) [`aaacc3d`](https://github.com/eyaltoledano/claude-task-master/commit/aaacc3dae36247b4de72b2d2697f49e5df6d01e3) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Improve `analyze-complexity` cli docs and `--research` flag documentation
- [#1251](https://github.com/eyaltoledano/claude-task-master/pull/1251) [`0b2c696`](https://github.com/eyaltoledano/claude-task-master/commit/0b2c6967c4605c33a100cff16f6ce8ff09ad06f0) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Change parent task back to "pending" when all subtasks are in "pending" state
- [#1172](https://github.com/eyaltoledano/claude-task-master/pull/1172) [`b5fe723`](https://github.com/eyaltoledano/claude-task-master/commit/b5fe723f8ead928e9f2dbde13b833ee70ac3382d) Thanks [@jujax](https://github.com/jujax)! - Fix Claude Code settings validation for pathToClaudeCodeExecutable
- [#1192](https://github.com/eyaltoledano/claude-task-master/pull/1192) [`2b69936`](https://github.com/eyaltoledano/claude-task-master/commit/2b69936ee7b34346d6de5175af20e077359e2e2a) Thanks [@nukunga](https://github.com/nukunga)! - Fix sonar deep research model failing, should be called `sonar-deep-research`
- [#1270](https://github.com/eyaltoledano/claude-task-master/pull/1270) [`20004a3`](https://github.com/eyaltoledano/claude-task-master/commit/20004a39ea848f747e1ff48981bfe176554e4055) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix complexity score not showing for `task-master show` and `task-master list`
- Added complexity score on "next task" when running `task-master list`
- Added colors to complexity to reflect complexity (easy, medium, hard)
## 0.27.3
### Patch Changes
- [#1254](https://github.com/eyaltoledano/claude-task-master/pull/1254) [`af53525`](https://github.com/eyaltoledano/claude-task-master/commit/af53525cbc660a595b67d4bb90d906911c71f45d) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fixed issue where `tm show` command could not find subtasks using dotted notation IDs (e.g., '8.1').
- The command now properly searches within parent task subtasks and returns the correct subtask information.
## 0.27.2
### Patch Changes
- [#1248](https://github.com/eyaltoledano/claude-task-master/pull/1248) [`044a7bf`](https://github.com/eyaltoledano/claude-task-master/commit/044a7bfc98049298177bc655cf341d7a8b6a0011) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix set-status for subtasks:
- Parent tasks are now set as `done` when subtasks are all `done`
- Parent tasks are now set as `in-progress` when at least one subtask is `in-progress` or `done`
## 0.27.1
### Patch Changes

View File

@@ -4,6 +4,28 @@
**Import Task Master's development workflow commands and guidelines, treat as if import is in the main CLAUDE.md file.**
@./.taskmaster/CLAUDE.md
## Test Guidelines
### Synchronous Tests
- **NEVER use async/await in test functions** unless testing actual asynchronous operations
- Use synchronous top-level imports instead of dynamic `await import()`
- Test bodies should be synchronous whenever possible
- Example:
```javascript
// ✅ CORRECT - Synchronous imports
import { MyClass } from '../src/my-class.js';
it('should verify behavior', () => {
expect(new MyClass().property).toBe(value);
});
// ❌ INCORRECT - Async imports
it('should verify behavior', async () => {
const { MyClass } = await import('../src/my-class.js');
expect(new MyClass().property).toBe(value);
});
```
## Changeset Guidelines
- When creating changesets, remember that it's user-facing, meaning we don't have to get into the specifics of the code, but rather mention what the end-user is getting or fixing from this changeset.

View File

@@ -60,6 +60,19 @@ The following documentation is also available in the `docs` directory:
> **Note:** After clicking the link, you'll still need to add your API keys to the configuration. The link installs the MCP server with placeholder keys that you'll need to replace with your actual API keys.
#### Claude Code Quick Install
For Claude Code users:
```bash
claude mcp add taskmaster-ai -- npx -y task-master-ai
```
Don't forget to add your API keys to the configuration:
- in the root .env of your Project
- in the "env" section of your mcp config for taskmaster-ai
## Requirements
Taskmaster utilizes AI across several commands, and those require a separate API key. You can use a variety of models from different AI providers provided you add your API keys. For example, if you want to use Claude 3.7, you'll need an Anthropic API key.
@@ -75,8 +88,9 @@ At least one (1) of the following is required:
- xAI API Key (for research or main model)
- OpenRouter API Key (for research or main model)
- Claude Code (no API key required - requires Claude Code CLI)
- Codex CLI (OAuth via ChatGPT subscription - requires Codex CLI)
Using the research model is optional but highly recommended. You will need at least ONE API key (unless using Claude Code). Adding all API keys enables you to seamlessly switch between model providers at will.
Using the research model is optional but highly recommended. You will need at least ONE API key (unless using Claude Code or Codex CLI with OAuth). Adding all API keys enables you to seamlessly switch between model providers at will.
## Quick Start
@@ -92,10 +106,11 @@ MCP (Model Control Protocol) lets you run Task Master directly from your editor.
| | Project | `<project_folder>/.cursor/mcp.json` | `<project_folder>\.cursor\mcp.json` | `mcpServers` |
| **Windsurf** | Global | `~/.codeium/windsurf/mcp_config.json` | `%USERPROFILE%\.codeium\windsurf\mcp_config.json` | `mcpServers` |
| **VS Code** | Project | `<project_folder>/.vscode/mcp.json` | `<project_folder>\.vscode\mcp.json` | `servers` |
| **Q CLI** | Global | `~/.aws/amazonq/mcp.json` | | `mcpServers` |
##### Manual Configuration
###### Cursor & Windsurf (`mcpServers`)
###### Cursor & Windsurf & Q Developer CLI (`mcpServers`)
```json
{

View File

@@ -35,7 +35,7 @@
"@types/inquirer": "^9.0.3",
"@types/node": "^22.10.5",
"tsx": "^4.20.4",
"typescript": "^5.7.3",
"typescript": "^5.9.2",
"vitest": "^2.1.8"
},
"engines": {

View File

@@ -0,0 +1,255 @@
/**
* @fileoverview Centralized Command Registry
* Provides a single location for registering all CLI commands
*/
import { Command } from 'commander';
// Import all commands
import { ListTasksCommand } from './commands/list.command.js';
import { ShowCommand } from './commands/show.command.js';
import { AuthCommand } from './commands/auth.command.js';
import { ContextCommand } from './commands/context.command.js';
import { StartCommand } from './commands/start.command.js';
import { SetStatusCommand } from './commands/set-status.command.js';
import { ExportCommand } from './commands/export.command.js';
/**
* Command metadata for registration
*/
export interface CommandMetadata {
name: string;
description: string;
commandClass: typeof Command;
category?: 'task' | 'auth' | 'utility' | 'development';
}
/**
* Registry of all available commands
*/
export class CommandRegistry {
/**
* All available commands with their metadata
*/
private static commands: CommandMetadata[] = [
// Task Management Commands
{
name: 'list',
description: 'List all tasks with filtering and status overview',
commandClass: ListTasksCommand as any,
category: 'task'
},
{
name: 'show',
description: 'Display detailed information about a specific task',
commandClass: ShowCommand as any,
category: 'task'
},
{
name: 'start',
description: 'Start working on a task with claude-code',
commandClass: StartCommand as any,
category: 'task'
},
{
name: 'set-status',
description: 'Update the status of one or more tasks',
commandClass: SetStatusCommand as any,
category: 'task'
},
{
name: 'export',
description: 'Export tasks to external systems',
commandClass: ExportCommand as any,
category: 'task'
},
// Authentication & Context Commands
{
name: 'auth',
description: 'Manage authentication with tryhamster.com',
commandClass: AuthCommand as any,
category: 'auth'
},
{
name: 'context',
description: 'Manage workspace context (organization/brief)',
commandClass: ContextCommand as any,
category: 'auth'
}
];
/**
* Register all commands on a program instance
* @param program - Commander program to register commands on
*/
static registerAll(program: Command): void {
for (const cmd of this.commands) {
this.registerCommand(program, cmd);
}
}
/**
* Register specific commands by category
* @param program - Commander program to register commands on
* @param category - Category of commands to register
*/
static registerByCategory(
program: Command,
category: 'task' | 'auth' | 'utility' | 'development'
): void {
const categoryCommands = this.commands.filter(
(cmd) => cmd.category === category
);
for (const cmd of categoryCommands) {
this.registerCommand(program, cmd);
}
}
/**
* Register a single command by name
* @param program - Commander program to register the command on
* @param name - Name of the command to register
*/
static registerByName(program: Command, name: string): void {
const cmd = this.commands.find((c) => c.name === name);
if (cmd) {
this.registerCommand(program, cmd);
} else {
throw new Error(`Command '${name}' not found in registry`);
}
}
/**
* Register a single command
* @param program - Commander program to register the command on
* @param metadata - Command metadata
*/
private static registerCommand(
program: Command,
metadata: CommandMetadata
): void {
const CommandClass = metadata.commandClass as any;
// Use the static registration method that all commands have
if (CommandClass.registerOn) {
CommandClass.registerOn(program);
} else if (CommandClass.register) {
CommandClass.register(program);
} else {
// Fallback to creating instance and adding
const instance = new CommandClass();
program.addCommand(instance);
}
}
/**
* Get all registered command names
*/
static getCommandNames(): string[] {
return this.commands.map((cmd) => cmd.name);
}
/**
* Get commands by category
*/
static getCommandsByCategory(
category: 'task' | 'auth' | 'utility' | 'development'
): CommandMetadata[] {
return this.commands.filter((cmd) => cmd.category === category);
}
/**
* Add a new command to the registry
* @param metadata - Command metadata to add
*/
static addCommand(metadata: CommandMetadata): void {
// Check if command already exists
if (this.commands.some((cmd) => cmd.name === metadata.name)) {
throw new Error(`Command '${metadata.name}' already exists in registry`);
}
this.commands.push(metadata);
}
/**
* Remove a command from the registry
* @param name - Name of the command to remove
*/
static removeCommand(name: string): boolean {
const index = this.commands.findIndex((cmd) => cmd.name === name);
if (index >= 0) {
this.commands.splice(index, 1);
return true;
}
return false;
}
/**
* Get command metadata by name
* @param name - Name of the command
*/
static getCommand(name: string): CommandMetadata | undefined {
return this.commands.find((cmd) => cmd.name === name);
}
/**
* Check if a command exists
* @param name - Name of the command
*/
static hasCommand(name: string): boolean {
return this.commands.some((cmd) => cmd.name === name);
}
/**
* Get a formatted list of all commands for display
*/
static getFormattedCommandList(): string {
const categories = {
task: 'Task Management',
auth: 'Authentication & Context',
utility: 'Utilities',
development: 'Development'
};
let output = '';
for (const [category, title] of Object.entries(categories)) {
const cmds = this.getCommandsByCategory(
category as keyof typeof categories
);
if (cmds.length > 0) {
output += `\n${title}:\n`;
for (const cmd of cmds) {
output += ` ${cmd.name.padEnd(20)} ${cmd.description}\n`;
}
}
}
return output;
}
}
/**
* Convenience function to register all CLI commands
* @param program - Commander program instance
*/
export function registerAllCommands(program: Command): void {
CommandRegistry.registerAll(program);
}
/**
* Convenience function to register commands by category
* @param program - Commander program instance
* @param category - Category to register
*/
export function registerCommandsByCategory(
program: Command,
category: 'task' | 'auth' | 'utility' | 'development'
): void {
CommandRegistry.registerByCategory(program, category);
}
// Export the registry for direct access if needed
export default CommandRegistry;

View File

@@ -493,18 +493,7 @@ export class AuthCommand extends Command {
}
/**
* Static method to register this command on an existing program
* This is for gradual migration - allows commands.js to use this
*/
static registerOn(program: Command): Command {
const authCommand = new AuthCommand();
program.addCommand(authCommand);
return authCommand;
}
/**
* Alternative registration that returns the command for chaining
* Can also configure the command name if needed
* Register this command on an existing program
*/
static register(program: Command, name?: string): AuthCommand {
const authCommand = new AuthCommand(name);

View File

@@ -694,16 +694,7 @@ export class ContextCommand extends Command {
}
/**
* Static method to register this command on an existing program
*/
static registerOn(program: Command): Command {
const contextCommand = new ContextCommand();
program.addCommand(contextCommand);
return contextCommand;
}
/**
* Alternative registration that returns the command for chaining
* Register this command on an existing program
*/
static register(program: Command, name?: string): ContextCommand {
const contextCommand = new ContextCommand(name);

View File

@@ -0,0 +1,379 @@
/**
* @fileoverview Export command for exporting tasks to external systems
* Provides functionality to export tasks to Hamster briefs
*/
import { Command } from 'commander';
import chalk from 'chalk';
import inquirer from 'inquirer';
import ora, { Ora } from 'ora';
import {
AuthManager,
AuthenticationError,
type UserContext
} from '@tm/core/auth';
import { TaskMasterCore, type ExportResult } from '@tm/core';
import * as ui from '../utils/ui.js';
/**
* Result type from export command
*/
export interface ExportCommandResult {
success: boolean;
action: 'export' | 'validate' | 'cancelled';
result?: ExportResult;
message?: string;
}
/**
* ExportCommand extending Commander's Command class
* Handles task export to external systems
*/
export class ExportCommand extends Command {
private authManager: AuthManager;
private taskMasterCore?: TaskMasterCore;
private lastResult?: ExportCommandResult;
constructor(name?: string) {
super(name || 'export');
// Initialize auth manager
this.authManager = AuthManager.getInstance();
// Configure the command
this.description('Export tasks to external systems (e.g., Hamster briefs)');
// Add options
this.option('--org <id>', 'Organization ID to export to');
this.option('--brief <id>', 'Brief ID to export tasks to');
this.option('--tag <tag>', 'Export tasks from a specific tag');
this.option(
'--status <status>',
'Filter tasks by status (pending, in-progress, done, etc.)'
);
this.option('--exclude-subtasks', 'Exclude subtasks from export');
this.option('-y, --yes', 'Skip confirmation prompt');
// Accept optional positional argument for brief ID or Hamster URL
this.argument('[briefOrUrl]', 'Brief ID or Hamster brief URL');
// Default action
this.action(async (briefOrUrl?: string, options?: any) => {
await this.executeExport(briefOrUrl, options);
});
}
/**
* Initialize the TaskMasterCore
*/
private async initializeServices(): Promise<void> {
if (this.taskMasterCore) {
return;
}
try {
// Initialize TaskMasterCore
this.taskMasterCore = await TaskMasterCore.create({
projectPath: process.cwd()
});
} catch (error) {
throw new Error(
`Failed to initialize services: ${(error as Error).message}`
);
}
}
/**
* Execute the export command
*/
private async executeExport(
briefOrUrl?: string,
options?: any
): Promise<void> {
let spinner: Ora | undefined;
try {
// Check authentication
if (!this.authManager.isAuthenticated()) {
ui.displayError('Not authenticated. Run "tm auth login" first.');
process.exit(1);
}
// Initialize services
await this.initializeServices();
// Get current context
const context = this.authManager.getContext();
// Determine org and brief IDs
let orgId = options?.org || context?.orgId;
let briefId = options?.brief || briefOrUrl || context?.briefId;
// If a URL/ID was provided as argument, resolve it
if (briefOrUrl && !options?.brief) {
spinner = ora('Resolving brief...').start();
const resolvedBrief = await this.resolveBriefInput(briefOrUrl);
if (resolvedBrief) {
briefId = resolvedBrief.briefId;
orgId = resolvedBrief.orgId;
spinner.succeed('Brief resolved');
} else {
spinner.fail('Could not resolve brief');
process.exit(1);
}
}
// Validate we have necessary IDs
if (!orgId) {
ui.displayError(
'No organization selected. Run "tm context org" or use --org flag.'
);
process.exit(1);
}
if (!briefId) {
ui.displayError(
'No brief specified. Run "tm context brief", provide a brief ID/URL, or use --brief flag.'
);
process.exit(1);
}
// Confirm export if not auto-confirmed
if (!options?.yes) {
const confirmed = await this.confirmExport(orgId, briefId, context);
if (!confirmed) {
ui.displayWarning('Export cancelled');
this.lastResult = {
success: false,
action: 'cancelled',
message: 'User cancelled export'
};
process.exit(0);
}
}
// Perform export
spinner = ora('Exporting tasks...').start();
const exportResult = await this.taskMasterCore!.exportTasks({
orgId,
briefId,
tag: options?.tag,
status: options?.status,
excludeSubtasks: options?.excludeSubtasks || false
});
if (exportResult.success) {
spinner.succeed(
`Successfully exported ${exportResult.taskCount} task(s) to brief`
);
// Display summary
console.log(chalk.cyan('\n📤 Export Summary\n'));
console.log(chalk.white(` Organization: ${orgId}`));
console.log(chalk.white(` Brief: ${briefId}`));
console.log(chalk.white(` Tasks exported: ${exportResult.taskCount}`));
if (options?.tag) {
console.log(chalk.gray(` Tag: ${options.tag}`));
}
if (options?.status) {
console.log(chalk.gray(` Status filter: ${options.status}`));
}
if (exportResult.message) {
console.log(chalk.gray(`\n ${exportResult.message}`));
}
} else {
spinner.fail('Export failed');
if (exportResult.error) {
console.error(chalk.red(`\n✗ ${exportResult.error.message}`));
}
}
this.lastResult = {
success: exportResult.success,
action: 'export',
result: exportResult
};
} catch (error: any) {
if (spinner?.isSpinning) spinner.fail('Export failed');
this.handleError(error);
process.exit(1);
}
}
/**
* Resolve brief input to get brief and org IDs
*/
private async resolveBriefInput(
briefOrUrl: string
): Promise<{ briefId: string; orgId: string } | null> {
try {
// Extract brief ID from input
const briefId = this.extractBriefId(briefOrUrl);
if (!briefId) {
return null;
}
// Fetch brief to get organization
const brief = await this.authManager.getBrief(briefId);
if (!brief) {
ui.displayError('Brief not found or you do not have access');
return null;
}
return {
briefId: brief.id,
orgId: brief.accountId
};
} catch (error) {
console.error(chalk.red(`Failed to resolve brief: ${error}`));
return null;
}
}
/**
* Extract a brief ID from raw input (ID or URL)
*/
private extractBriefId(input: string): string | null {
const raw = input?.trim() ?? '';
if (!raw) return null;
const parseUrl = (s: string): URL | null => {
try {
return new URL(s);
} catch {}
try {
return new URL(`https://${s}`);
} catch {}
return null;
};
const fromParts = (path: string): string | null => {
const parts = path.split('/').filter(Boolean);
const briefsIdx = parts.lastIndexOf('briefs');
const candidate =
briefsIdx >= 0 && parts.length > briefsIdx + 1
? parts[briefsIdx + 1]
: parts[parts.length - 1];
return candidate?.trim() || null;
};
// Try URL parsing
const url = parseUrl(raw);
if (url) {
const qId = url.searchParams.get('id') || url.searchParams.get('briefId');
const candidate = (qId || fromParts(url.pathname)) ?? null;
if (candidate) {
if (this.isLikelyId(candidate) || candidate.length >= 8) {
return candidate;
}
}
}
// Check if it looks like a path
if (raw.includes('/')) {
const candidate = fromParts(raw);
if (candidate && (this.isLikelyId(candidate) || candidate.length >= 8)) {
return candidate;
}
}
// Return raw if it looks like an ID
return raw;
}
/**
* Check if a string looks like a brief ID
*/
private isLikelyId(value: string): boolean {
const uuidRegex =
/^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$/;
const ulidRegex = /^[0-9A-HJKMNP-TV-Z]{26}$/i;
const slugRegex = /^[A-Za-z0-9_-]{16,}$/;
return (
uuidRegex.test(value) || ulidRegex.test(value) || slugRegex.test(value)
);
}
/**
* Confirm export with the user
*/
private async confirmExport(
orgId: string,
briefId: string,
context: UserContext | null
): Promise<boolean> {
console.log(chalk.cyan('\n📤 Export Tasks\n'));
// Show org name if available
if (context?.orgName) {
console.log(chalk.white(` Organization: ${context.orgName}`));
console.log(chalk.gray(` ID: ${orgId}`));
} else {
console.log(chalk.white(` Organization ID: ${orgId}`));
}
// Show brief info
if (context?.briefName) {
console.log(chalk.white(`\n Brief: ${context.briefName}`));
console.log(chalk.gray(` ID: ${briefId}`));
} else {
console.log(chalk.white(`\n Brief ID: ${briefId}`));
}
const { confirmed } = await inquirer.prompt([
{
type: 'confirm',
name: 'confirmed',
message: 'Do you want to proceed with export?',
default: true
}
]);
return confirmed;
}
/**
* Handle errors
*/
private handleError(error: any): void {
if (error instanceof AuthenticationError) {
console.error(chalk.red(`\n✗ ${error.message}`));
if (error.code === 'NOT_AUTHENTICATED') {
ui.displayWarning('Please authenticate first: tm auth login');
}
} else {
const msg = error?.message ?? String(error);
console.error(chalk.red(`Error: ${msg}`));
if (error.stack && process.env.DEBUG) {
console.error(chalk.gray(error.stack));
}
}
}
/**
* Get the last export result (useful for testing)
*/
public getLastResult(): ExportCommandResult | undefined {
return this.lastResult;
}
/**
* Clean up resources
*/
async cleanup(): Promise<void> {
// No resources to clean up
}
/**
* Register this command on an existing program
*/
static register(program: Command, name?: string): ExportCommand {
const exportCommand = new ExportCommand(name);
program.addCommand(exportCommand);
return exportCommand;
}
}

View File

@@ -246,7 +246,7 @@ export class ListTasksCommand extends Command {
task.subtasks.forEach((subtask) => {
const subIcon = STATUS_ICONS[subtask.status];
console.log(
` ${chalk.gray(`${task.id}.${subtask.id}`)} ${subIcon} ${chalk.gray(subtask.title)}`
` ${chalk.gray(String(subtask.id))} ${subIcon} ${chalk.gray(subtask.title)}`
);
});
}
@@ -281,9 +281,14 @@ export class ListTasksCommand extends Command {
const priorityBreakdown = getPriorityBreakdown(tasks);
// Find next task following the same logic as findNextTask
const nextTask = this.findNextTask(tasks);
const nextTaskInfo = this.findNextTask(tasks);
// Display dashboard boxes
// Get the full task object with complexity data already included
const nextTask = nextTaskInfo
? tasks.find((t) => String(t.id) === String(nextTaskInfo.id))
: undefined;
// Display dashboard boxes (nextTask already has complexity from storage enrichment)
displayDashboards(
taskStats,
subtaskStats,
@@ -292,7 +297,7 @@ export class ListTasksCommand extends Command {
nextTask
);
// Task table - no title, just show the table directly
// Task table
console.log(
ui.createTaskTable(tasks, {
showSubtasks: withSubtasks,
@@ -303,14 +308,16 @@ export class ListTasksCommand extends Command {
// Display recommended next task section immediately after table
if (nextTask) {
// Find the full task object to get description
const fullTask = tasks.find((t) => String(t.id) === String(nextTask.id));
const description = fullTask ? getTaskDescription(fullTask) : undefined;
const description = getTaskDescription(nextTask);
displayRecommendedNextTask({
...nextTask,
status: 'pending', // Next task is typically pending
description
id: nextTask.id,
title: nextTask.title,
priority: nextTask.priority,
status: nextTask.status,
dependencies: nextTask.dependencies,
description,
complexity: nextTask.complexity as number | undefined
});
} else {
displayRecommendedNextTask(undefined);
@@ -467,18 +474,7 @@ export class ListTasksCommand extends Command {
}
/**
* Static method to register this command on an existing program
* This is for gradual migration - allows commands.js to use this
*/
static registerOn(program: Command): Command {
const listCommand = new ListTasksCommand();
program.addCommand(listCommand);
return listCommand;
}
/**
* Alternative registration that returns the command for chaining
* Can also configure the command name if needed
* Register this command on an existing program
*/
static register(program: Command, name?: string): ListTasksCommand {
const listCommand = new ListTasksCommand(name);

View File

@@ -258,9 +258,6 @@ export class SetStatusCommand extends Command {
)
);
}
// Show storage info
console.log(chalk.gray(`\nUsing ${result.storageType} storage`));
}
/**
@@ -290,18 +287,7 @@ export class SetStatusCommand extends Command {
}
/**
* Static method to register this command on an existing program
* This is for gradual migration - allows commands.js to use this
*/
static registerOn(program: Command): Command {
const setStatusCommand = new SetStatusCommand();
program.addCommand(setStatusCommand);
return setStatusCommand;
}
/**
* Alternative registration that returns the command for chaining
* Can also configure the command name if needed
* Register this command on an existing program
*/
static register(program: Command, name?: string): SetStatusCommand {
const setStatusCommand = new SetStatusCommand(name);

View File

@@ -322,18 +322,7 @@ export class ShowCommand extends Command {
}
/**
* Static method to register this command on an existing program
* This is for gradual migration - allows commands.js to use this
*/
static registerOn(program: Command): Command {
const showCommand = new ShowCommand();
program.addCommand(showCommand);
return showCommand;
}
/**
* Alternative registration that returns the command for chaining
* Can also configure the command name if needed
* Register this command on an existing program
*/
static register(program: Command, name?: string): ShowCommand {
const showCommand = new ShowCommand(name);

View File

@@ -493,16 +493,7 @@ export class StartCommand extends Command {
}
/**
* Static method to register this command on an existing program
*/
static registerOn(program: Command): Command {
const startCommand = new StartCommand();
program.addCommand(startCommand);
return startCommand;
}
/**
* Alternative registration that returns the command for chaining
* Register this command on an existing program
*/
static register(program: Command, name?: string): StartCommand {
const startCommand = new StartCommand(name);

View File

@@ -10,6 +10,15 @@ export { AuthCommand } from './commands/auth.command.js';
export { ContextCommand } from './commands/context.command.js';
export { StartCommand } from './commands/start.command.js';
export { SetStatusCommand } from './commands/set-status.command.js';
export { ExportCommand } from './commands/export.command.js';
// Command Registry
export {
CommandRegistry,
registerAllCommands,
registerCommandsByCategory,
type CommandMetadata
} from './command-registry.js';
// UI utilities (for other commands to use)
export * as ui from './utils/ui.js';

View File

@@ -6,6 +6,7 @@
import chalk from 'chalk';
import boxen from 'boxen';
import type { Task, TaskPriority } from '@tm/core/types';
import { getComplexityWithColor } from '../../utils/ui.js';
/**
* Statistics for task collection
@@ -479,7 +480,7 @@ export function displayDependencyDashboard(
? chalk.cyan(nextTask.dependencies.join(', '))
: chalk.gray('None')
}\n` +
`Complexity: ${nextTask?.complexity || chalk.gray('N/A')}`;
`Complexity: ${nextTask?.complexity !== undefined ? getComplexityWithColor(nextTask.complexity) : chalk.gray('N/A')}`;
return content;
}

View File

@@ -6,6 +6,7 @@
import chalk from 'chalk';
import boxen from 'boxen';
import type { Task } from '@tm/core/types';
import { getComplexityWithColor } from '../../utils/ui.js';
/**
* Next task display options
@@ -17,6 +18,7 @@ export interface NextTaskDisplayOptions {
status?: string;
dependencies?: (string | number)[];
description?: string;
complexity?: number;
}
/**
@@ -82,6 +84,11 @@ export function displayRecommendedNextTask(
: chalk.cyan(task.dependencies.join(', '));
content.push(`Dependencies: ${depsDisplay}`);
// Complexity with color and label
if (typeof task.complexity === 'number') {
content.push(`Complexity: ${getComplexityWithColor(task.complexity)}`);
}
// Description if available
if (task.description) {
content.push('');

View File

@@ -9,7 +9,11 @@ import Table from 'cli-table3';
import { marked, MarkedExtension } from 'marked';
import { markedTerminal } from 'marked-terminal';
import type { Task } from '@tm/core/types';
import { getStatusWithColor, getPriorityWithColor } from '../../utils/ui.js';
import {
getStatusWithColor,
getPriorityWithColor,
getComplexityWithColor
} from '../../utils/ui.js';
// Configure marked to use terminal renderer with subtle colors
marked.use(
@@ -108,7 +112,9 @@ export function displayTaskProperties(task: Task): void {
getStatusWithColor(task.status),
getPriorityWithColor(task.priority),
deps,
'N/A',
typeof task.complexity === 'number'
? getComplexityWithColor(task.complexity)
: chalk.gray('N/A'),
task.description || ''
].join('\n');
@@ -186,8 +192,7 @@ export function displaySubtasks(
status: any;
description?: string;
dependencies?: string[];
}>,
parentId: string | number
}>
): void {
const terminalWidth = process.stdout.columns * 0.95 || 100;
// Display subtasks header
@@ -222,7 +227,7 @@ export function displaySubtasks(
});
subtasks.forEach((subtask) => {
const subtaskId = `${parentId}.${subtask.id}`;
const subtaskId = String(subtask.id);
// Format dependencies
const deps =
@@ -323,7 +328,7 @@ export function displayTaskDetails(
console.log(chalk.gray(` No subtasks with status '${statusFilter}'`));
} else if (filteredSubtasks.length > 0) {
console.log(); // Empty line for spacing
displaySubtasks(filteredSubtasks, task.id);
displaySubtasks(filteredSubtasks);
}
}

View File

@@ -158,10 +158,18 @@ export function displayUpgradeNotification(
export async function performAutoUpdate(
latestVersion: string
): Promise<boolean> {
if (process.env.TASKMASTER_SKIP_AUTO_UPDATE === '1' || process.env.CI) {
console.log(
chalk.dim('Skipping auto-update (TASKMASTER_SKIP_AUTO_UPDATE/CI).')
);
if (
process.env.TASKMASTER_SKIP_AUTO_UPDATE === '1' ||
process.env.CI ||
process.env.NODE_ENV === 'test'
) {
const reason =
process.env.TASKMASTER_SKIP_AUTO_UPDATE === '1'
? 'TASKMASTER_SKIP_AUTO_UPDATE=1'
: process.env.CI
? 'CI environment'
: 'NODE_ENV=test';
console.log(chalk.dim(`Skipping auto-update (${reason})`));
return false;
}
const spinner = ora({

View File

@@ -84,7 +84,23 @@ export function getPriorityWithColor(priority: TaskPriority): string {
}
/**
* Get colored complexity display
* Get complexity color and label based on score thresholds
*/
function getComplexityLevel(score: number): {
color: (text: string) => string;
label: string;
} {
if (score >= 7) {
return { color: chalk.hex('#CC0000'), label: 'High' };
} else if (score >= 4) {
return { color: chalk.hex('#FF8800'), label: 'Medium' };
} else {
return { color: chalk.green, label: 'Low' };
}
}
/**
* Get colored complexity display with dot indicator (simple format)
*/
export function getComplexityWithColor(complexity: number | string): string {
const score =
@@ -94,13 +110,20 @@ export function getComplexityWithColor(complexity: number | string): string {
return chalk.gray('N/A');
}
if (score >= 8) {
return chalk.red.bold(`${score} (High)`);
} else if (score >= 5) {
return chalk.yellow(`${score} (Medium)`);
} else {
return chalk.green(`${score} (Low)`);
const { color } = getComplexityLevel(score);
return color(`${score}`);
}
/**
* Get colored complexity display with /10 format (for dashboards)
*/
export function getComplexityWithScore(complexity: number | undefined): string {
if (typeof complexity !== 'number') {
return chalk.gray('N/A');
}
const { color, label } = getComplexityLevel(complexity);
return color(`${complexity}/10 (${label})`);
}
/**
@@ -263,12 +286,12 @@ export function createTaskTable(
// Adjust column widths to better match the original layout
const baseColWidths = showComplexity
? [
Math.floor(terminalWidth * 0.06),
Math.floor(terminalWidth * 0.1),
Math.floor(terminalWidth * 0.4),
Math.floor(terminalWidth * 0.15),
Math.floor(terminalWidth * 0.12),
Math.floor(terminalWidth * 0.1),
Math.floor(terminalWidth * 0.2),
Math.floor(terminalWidth * 0.12)
Math.floor(terminalWidth * 0.1)
] // ID, Title, Status, Priority, Dependencies, Complexity
: [
Math.floor(terminalWidth * 0.08),
@@ -323,8 +346,12 @@ export function createTaskTable(
}
if (showComplexity) {
// Show N/A if no complexity score
row.push(chalk.gray('N/A'));
// Show complexity score from report if available
if (typeof task.complexity === 'number') {
row.push(getComplexityWithColor(task.complexity));
} else {
row.push(chalk.gray('N/A'));
}
}
table.push(row);
@@ -350,7 +377,11 @@ export function createTaskTable(
}
if (showComplexity) {
subRow.push(chalk.gray('--'));
const complexityDisplay =
typeof subtask.complexity === 'number'
? getComplexityWithColor(subtask.complexity)
: '--';
subRow.push(chalk.gray(complexityDisplay));
}
table.push(subRow);

View File

@@ -1,22 +1,24 @@
# Task Master Documentation
Welcome to the Task Master documentation. Use the links below to navigate to the information you need:
Welcome to the Task Master documentation. This documentation site provides comprehensive guides for getting started with Task Master.
## Getting Started
- [Configuration Guide](archive/configuration.md) - Set up environment variables and customize Task Master
- [Tutorial](archive/ctutorial.md) - Step-by-step guide to getting started with Task Master
- [Quick Start Guide](/getting-started/quick-start) - Complete setup and first-time usage guide
- [Requirements](/getting-started/quick-start/requirements) - What you need to get started
- [Installation](/getting-started/quick-start/installation) - How to install Task Master
## Reference
## Core Capabilities
- [Command Reference](archive/ccommand-reference.md) - Complete list of all available commands
- [Task Structure](archive/ctask-structure.md) - Understanding the task format and features
- [MCP Tools](/capabilities/mcp) - Model Control Protocol integration
- [CLI Commands](/capabilities/cli-root-commands) - Command line interface reference
- [Task Structure](/capabilities/task-structure) - Understanding tasks and subtasks
## Examples & Licensing
## Best Practices
- [Example Interactions](archive/cexamples.md) - Common Cursor AI interaction examples
- [Licensing Information](archive/clicensing.md) - Detailed information about the license
- [Advanced Configuration](/best-practices/configuration-advanced) - Detailed configuration options
- [Advanced Tasks](/best-practices/advanced-tasks) - Working with complex task structures
## Need More Help?
If you can't find what you're looking for in these docs, please check the [main README](../README.md) or visit our [GitHub repository](https://github.com/eyaltoledano/claude-task-master).
If you can't find what you're looking for in these docs, please check the root README.md or visit our [GitHub repository](https://github.com/eyaltoledano/claude-task-master).

View File

@@ -156,7 +156,7 @@ sidebarTitle: "CLI Commands"
# Use an alternative tasks file
task-master analyze-complexity --file=custom-tasks.json
# Use Perplexity AI for research-backed complexity analysis
# Use your configured research model for research-backed complexity analysis
task-master analyze-complexity --research
```
</Accordion>
@@ -200,6 +200,75 @@ sidebarTitle: "CLI Commands"
```
</Accordion>
<Accordion title="Authentication">
```bash
# Log in to tryhamster.com
task-master auth login
# Check authentication status
task-master auth status
# Log out
task-master auth logout
# Refresh authentication token
task-master auth refresh
```
Authentication is required for task export functionality. The login command will open your browser to authenticate with tryhamster.com.
</Accordion>
<Accordion title="Context Management">
```bash
# Show current workspace context (organization and brief)
task-master context
# Select an organization interactively
task-master context org
# Select a brief within your organization
task-master context brief
# Set context using a brief URL or ID
task-master context <brief-url-or-id>
# Clear current context
task-master context clear
# Set context explicitly
task-master context set --org=<org-id> --brief=<brief-id>
```
Context management allows you to select which organization and brief to work with. This is required for task export operations.
</Accordion>
<Accordion title="Export Tasks">
```bash
# Export tasks to a Hamster brief using current context
task-master export
# Export tasks to a specific brief by ID
task-master export --brief=<brief-id>
# Export tasks to a specific organization and brief
task-master export --org=<org-id> --brief=<brief-id>
# Export tasks using a Hamster brief URL
task-master export <hamster-brief-url>
# Export tasks with filtering options
task-master export --status=pending --exclude-subtasks
# Export tasks from a specific tag
task-master export --tag=<tag-name>
# Skip confirmation prompt
task-master export --yes
```
The export command requires authentication (`tm auth login`) and exports local tasks to Hamster briefs. Tasks are exported with their titles, descriptions, implementation details, test strategies, and metadata preserved.
</Accordion>
<Accordion title="Initialize a Project">
```bash
# Initialize a new project with Task Master structure

View File

@@ -32,6 +32,7 @@
"getting-started/quick-start/execute-quick"
]
},
"getting-started/api-keys",
"getting-started/faq",
"getting-started/contribute"
]

View File

@@ -0,0 +1,267 @@
# API Keys Configuration
Task Master supports multiple AI providers through environment variables. This page lists all available API keys and their configuration requirements.
## Required API Keys
> **Note**: At least one required API key must be configured for Task Master to function.
>
> "Required: Yes" below means "required to use that specific provider," not "required globally." You only need at least one provider configured.
### ANTHROPIC_API_KEY (Recommended)
- **Provider**: Anthropic Claude models
- **Format**: `sk-ant-api03-...`
- **Required**: ✅ **Yes**
- **Models**: Claude 3.5 Sonnet, Claude 3 Haiku, Claude 3 Opus
- **Get Key**: [Anthropic Console](https://console.anthropic.com/)
```bash
ANTHROPIC_API_KEY="sk-ant-api03-your-key-here"
```
### PERPLEXITY_API_KEY (Highly Recommended for Research)
- **Provider**: Perplexity AI (Research features)
- **Format**: `pplx-...`
- **Required**: ✅ **Yes**
- **Purpose**: Enables research-backed task expansions and updates
- **Models**: Perplexity Sonar models
- **Get Key**: [Perplexity API](https://www.perplexity.ai/settings/api)
```bash
PERPLEXITY_API_KEY="pplx-your-key-here"
```
### OPENAI_API_KEY
- **Provider**: OpenAI GPT models
- **Format**: `sk-proj-...` or `sk-...`
- **Required**: ✅ **Yes**
- **Models**: GPT-4, GPT-4 Turbo, GPT-3.5 Turbo, O1 models
- **Get Key**: [OpenAI Platform](https://platform.openai.com/api-keys)
```bash
OPENAI_API_KEY="sk-proj-your-key-here"
```
### GOOGLE_API_KEY
- **Provider**: Google Gemini models
- **Format**: Various formats
- **Required**: ✅ **Yes**
- **Models**: Gemini Pro, Gemini Flash, Gemini Ultra
- **Get Key**: [Google AI Studio](https://aistudio.google.com/app/apikey)
- **Alternative**: Use `GOOGLE_APPLICATION_CREDENTIALS` for service account (Google Vertex)
```bash
GOOGLE_API_KEY="your-google-api-key-here"
```
### GROQ_API_KEY
- **Provider**: Groq (High-performance inference)
- **Required**: ✅ **Yes**
- **Models**: Llama models, Mixtral models (via Groq)
- **Get Key**: [Groq Console](https://console.groq.com/keys)
```bash
GROQ_API_KEY="your-groq-key-here"
```
### OPENROUTER_API_KEY
- **Provider**: OpenRouter (Multiple model access)
- **Required**: ✅ **Yes**
- **Models**: Access to various models through single API
- **Get Key**: [OpenRouter](https://openrouter.ai/keys)
```bash
OPENROUTER_API_KEY="your-openrouter-key-here"
```
### AZURE_OPENAI_API_KEY
- **Provider**: Azure OpenAI Service
- **Required**: ✅ **Yes**
- **Requirements**: Also requires `AZURE_OPENAI_ENDPOINT` configuration
- **Models**: GPT models via Azure
- **Get Key**: [Azure Portal](https://portal.azure.com/)
```bash
AZURE_OPENAI_API_KEY="your-azure-key-here"
```
### XAI_API_KEY
- **Provider**: xAI (Grok) models
- **Required**: ✅ **Yes**
- **Models**: Grok models
- **Get Key**: [xAI Console](https://console.x.ai/)
```bash
XAI_API_KEY="your-xai-key-here"
```
## Optional API Keys
> **Note**: These API keys are optional - providers will work without them or use alternative authentication methods.
### AWS_ACCESS_KEY_ID (Bedrock)
- **Provider**: AWS Bedrock
- **Required**: ❌ **No** (uses AWS credential chain)
- **Models**: Claude models via AWS Bedrock
- **Authentication**: Uses AWS credential chain (profiles, IAM roles, etc.)
- **Get Key**: [AWS Console](https://console.aws.amazon.com/iam/)
```bash
# Optional - AWS credential chain is preferred
AWS_ACCESS_KEY_ID="your-aws-access-key"
AWS_SECRET_ACCESS_KEY="your-aws-secret-key"
```
### CLAUDE_CODE_API_KEY
- **Provider**: Claude Code CLI
- **Required**: ❌ **No** (uses OAuth tokens)
- **Purpose**: Integration with local Claude Code CLI
- **Authentication**: Uses OAuth tokens, no API key needed
```bash
# Not typically needed
CLAUDE_CODE_API_KEY="not-usually-required"
```
### GEMINI_API_KEY
- **Provider**: Gemini CLI
- **Required**: ❌ **No** (uses OAuth authentication)
- **Purpose**: Integration with Gemini CLI
- **Authentication**: Primarily uses OAuth via CLI, API key is optional
```bash
# Optional - OAuth via CLI is preferred
GEMINI_API_KEY="your-gemini-key-here"
```
### GROK_CLI_API_KEY
- **Provider**: Grok CLI
- **Required**: ❌ **No** (can use CLI config)
- **Purpose**: Integration with Grok CLI
- **Authentication**: Can use Grok CLI's own config file
```bash
# Optional - CLI config is preferred
GROK_CLI_API_KEY="your-grok-cli-key"
```
### OLLAMA_API_KEY
- **Provider**: Ollama (Local/Remote)
- **Required**: ❌ **No** (local installation doesn't need key)
- **Purpose**: For remote Ollama servers that require authentication
- **Requirements**: Only needed for remote servers with authentication
- **Note**: Not needed for local Ollama installations
```bash
# Only needed for remote Ollama servers
OLLAMA_API_KEY="your-ollama-api-key-here"
```
### GITHUB_API_KEY
- **Provider**: GitHub (Import/Export features)
- **Format**: `ghp_...` or `github_pat_...`
- **Required**: ❌ **No** (for GitHub features only)
- **Purpose**: GitHub import/export features
- **Get Key**: [GitHub Settings](https://github.com/settings/tokens)
```bash
GITHUB_API_KEY="ghp-your-github-key-here"
```
## Configuration Methods
### Method 1: Environment File (.env)
Create a `.env` file in your project root:
```bash
# Copy from .env.example
cp .env.example .env
# Edit with your keys
vim .env
```
### Method 2: System Environment Variables
```bash
export ANTHROPIC_API_KEY="your-key-here"
export PERPLEXITY_API_KEY="your-key-here"
# ... other keys
```
### Method 3: MCP Server Configuration
For Claude Code integration, configure keys in `.mcp.json`:
```json
{
"mcpServers": {
"task-master-ai": {
"command": "npx",
"args": ["-y", "task-master-ai"],
"env": {
"ANTHROPIC_API_KEY": "your-key-here",
"PERPLEXITY_API_KEY": "your-key-here",
"OPENAI_API_KEY": "your-key-here"
}
}
}
}
```
## Key Requirements
### Minimum Requirements
- **At least one** AI provider key is required
- **ANTHROPIC_API_KEY** is recommended as the primary provider
- **PERPLEXITY_API_KEY** is highly recommended for research features
### Provider-Specific Requirements
- **Azure OpenAI**: Requires both `AZURE_OPENAI_API_KEY` and `AZURE_OPENAI_ENDPOINT` configuration
- **Google Vertex**: Requires `VERTEX_PROJECT_ID` and `VERTEX_LOCATION` environment variables
- **AWS Bedrock**: Uses AWS credential chain (profiles, IAM roles, etc.) instead of API keys
- **Ollama**: Only needs API key for remote servers with authentication
- **CLI Providers**: Gemini CLI, Grok CLI, and Claude Code use OAuth/CLI config instead of API keys
## Model Configuration
After setting up API keys, configure which models to use:
```bash
# Interactive model setup
task-master models --setup
# Set specific models
task-master models --set-main claude-3-5-sonnet-20241022
task-master models --set-research perplexity-llama-3.1-sonar-large-128k-online
task-master models --set-fallback gpt-4o-mini
```
## Security Best Practices
1. **Never commit API keys** to version control
2. **Use .env files** and add them to `.gitignore`
3. **Rotate keys regularly** especially if compromised
4. **Use minimal permissions** for service accounts
5. **Monitor usage** to detect unauthorized access
## Troubleshooting
### Key Validation
```bash
# Check if keys are properly configured
task-master models
# Test specific provider
task-master add-task --prompt="test task" --model=claude-3-5-sonnet-20241022
```
### Common Issues
- **Invalid key format**: Check the expected format for each provider
- **Insufficient permissions**: Ensure keys have necessary API access
- **Rate limits**: Some providers have usage limits
- **Regional restrictions**: Some models may not be available in all regions
### Getting Help
If you encounter issues with API key configuration:
- Check the [FAQ](/getting-started/faq) for common solutions
- Join our [Discord community](https://discord.gg/fWJkU7rf) for support
- Report issues on [GitHub](https://github.com/eyaltoledano/claude-task-master/issues)

View File

@@ -108,5 +108,5 @@ You dont need to configure everything up front. Most settings can be left as
</Accordion>
<Note>
For advanced configuration options and detailed customization, see our [Advanced Configuration Guide](/docs/best-practices/configuration-advanced) page.
For advanced configuration options and detailed customization, see our [Advanced Configuration Guide](/best-practices/configuration-advanced) page.
</Note>

View File

@@ -56,4 +56,4 @@ If you ran into problems and had to debug errors you can create new rules as you
By now you have all you need to get started executing code faster and smarter with Task Master.
If you have any questions please check out [Frequently Asked Questions](/docs/getting-started/faq)
If you have any questions please check out [Frequently Asked Questions](/getting-started/faq)

View File

@@ -30,6 +30,19 @@ cursor://anysphere.cursor-deeplink/mcp/install?name=taskmaster-ai&config=eyJjb21
```
> **Note:** After clicking the link, you'll still need to add your API keys to the configuration. The link installs the MCP server with placeholder keys that you'll need to replace with your actual API keys.
### Claude Code Quick Install
For Claude Code users:
```bash
claude mcp add taskmaster-ai -- npx -y task-master-ai
```
Don't forget to add your API keys to the configuration:
- in the root .env of your Project
- in the "env" section of your mcp config for taskmaster-ai
</Accordion>
## Installation Options

View File

@@ -6,13 +6,13 @@ sidebarTitle: "Quick Start"
This guide is for new users who want to start using Task Master with minimal setup time.
It covers:
- [Requirements](/docs/getting-started/quick-start/requirements): You will need Node.js and an AI model API Key.
- [Installation](/docs/getting-started/quick-start/installation): How to Install Task Master.
- [Configuration](/docs/getting-started/quick-start/configuration-quick): Setting up your API Key, MCP, and more.
- [PRD](/docs/getting-started/quick-start/prd-quick): Writing and parsing your first PRD.
- [Task Setup](/docs/getting-started/quick-start/tasks-quick): Preparing your tasks for execution.
- [Executing Tasks](/docs/getting-started/quick-start/execute-quick): Using Task Master to execute tasks.
- [Rules & Context](/docs/getting-started/quick-start/rules-quick): Learn how and why to build context in your project over time.
- [Requirements](/getting-started/quick-start/requirements): You will need Node.js and an AI model API Key.
- [Installation](/getting-started/quick-start/installation): How to Install Task Master.
- [Configuration](/getting-started/quick-start/configuration-quick): Setting up your API Key, MCP, and more.
- [PRD](/getting-started/quick-start/prd-quick): Writing and parsing your first PRD.
- [Task Setup](/getting-started/quick-start/tasks-quick): Preparing your tasks for execution.
- [Executing Tasks](/getting-started/quick-start/execute-quick): Using Task Master to execute tasks.
- [Rules & Context](/getting-started/quick-start/rules-quick): Learn how and why to build context in your project over time.
<Tip>
By the end of this guide, you'll have everything you need to begin working productively with Task Master.

View File

@@ -61,9 +61,25 @@ Task Master can provide a complexity report which can be helpful to read before
Can you analyze the complexity of our tasks to help me understand which ones need to be broken down further?
```
The agent will use the `analyze_project_complexity` MCP tool, or you can run it directly with the CLI command:
```bash
task-master analyze-complexity
```
For more comprehensive analysis using your configured research model, you can use:
```bash
task-master analyze-complexity --research
```
<Tip>
The `--research` flag uses whatever research model you have configured in `.taskmaster/config.json` (configurable via `task-master models --setup`) for research-backed complexity analysis, providing more informed recommendations.
</Tip>
You can view the report in a friendly table using:
```
Can you show me the complexity report in a more readable format?
```
<Check>Now you are ready to begin [executing tasks](/docs/getting-started/quick-start/execute-quick)</Check>
For more detailed CLI options, see the [Analyze Task Complexity](/capabilities/cli-root-commands#analyze-task-complexity) section.
<Check>Now you are ready to begin [executing tasks](/getting-started/quick-start/execute-quick)</Check>

View File

@@ -4,7 +4,7 @@ Welcome to v1 of the Task Master Docs. Expect weekly updates as we expand and re
We've organized the docs into three sections depending on your experience level and goals:
### Getting Started - Jump in to [Quick Start](/docs/getting-started/quick-start)
### Getting Started - Jump in to [Quick Start](/getting-started/quick-start)
Designed for first-time users. Get set up, create your first PRD, and run your first task.
### Best Practices

View File

@@ -3,4 +3,31 @@ title: "What's New"
sidebarTitle: "What's New"
---
## Latest Features
### Task Export to Hamster Briefs
Export your local Task Master tasks directly to Hamster briefs for seamless integration with your workflow management system.
**New Commands:**
- `task-master export` - Export tasks to Hamster briefs
- `task-master auth` - Manage authentication with tryhamster.com
- `task-master context` - Manage workspace context (organization/brief selection)
**Key Features:**
- Export tasks with full metadata preservation (titles, descriptions, implementation details, test strategies)
- Flexible filtering options (by status, tag, exclude subtasks)
- Support for both brief IDs and URLs
- Interactive authentication and context management
- Batch export with error handling and progress reporting
**Getting Started:**
1. Authenticate: `task-master auth login`
2. Select context: `task-master context org` and `task-master context brief`
3. Export tasks: `task-master export`
See the [CLI Commands](/capabilities/cli-root-commands) documentation for complete usage examples.
---
An easy way to see the latest releases

View File

@@ -1,5 +1,26 @@
# Change Log
## 0.25.5-rc.0
### Patch Changes
- Updated dependencies [[`aaacc3d`](https://github.com/eyaltoledano/claude-task-master/commit/aaacc3dae36247b4de72b2d2697f49e5df6d01e3), [`0079b7d`](https://github.com/eyaltoledano/claude-task-master/commit/0079b7defdad550811f704c470fdd01955d91d4d), [`0b2c696`](https://github.com/eyaltoledano/claude-task-master/commit/0b2c6967c4605c33a100cff16f6ce8ff09ad06f0), [`18aa416`](https://github.com/eyaltoledano/claude-task-master/commit/18aa416035f44345bde1c7321490345733a5d042), [`18aa416`](https://github.com/eyaltoledano/claude-task-master/commit/18aa416035f44345bde1c7321490345733a5d042), [`738ec51`](https://github.com/eyaltoledano/claude-task-master/commit/738ec51c049a295a12839b2dfddaf05e23b8fede), [`d67b81d`](https://github.com/eyaltoledano/claude-task-master/commit/d67b81d25ddd927fabb6f5deb368e8993519c541), [`b5fe723`](https://github.com/eyaltoledano/claude-task-master/commit/b5fe723f8ead928e9f2dbde13b833ee70ac3382d), [`2b69936`](https://github.com/eyaltoledano/claude-task-master/commit/2b69936ee7b34346d6de5175af20e077359e2e2a), [`986ac11`](https://github.com/eyaltoledano/claude-task-master/commit/986ac117aee00bcd3e6830a0f76e1ad6d10e0bca), [`20004a3`](https://github.com/eyaltoledano/claude-task-master/commit/20004a39ea848f747e1ff48981bfe176554e4055)]:
- task-master-ai@0.28.0-rc.0
## 0.25.4
### Patch Changes
- Updated dependencies [[`af53525`](https://github.com/eyaltoledano/claude-task-master/commit/af53525cbc660a595b67d4bb90d906911c71f45d)]:
- task-master-ai@0.27.3
## 0.25.3
### Patch Changes
- Updated dependencies [[`044a7bf`](https://github.com/eyaltoledano/claude-task-master/commit/044a7bfc98049298177bc655cf341d7a8b6a0011)]:
- task-master-ai@0.27.2
## 0.25.2
### Patch Changes

View File

@@ -3,7 +3,7 @@
"private": true,
"displayName": "TaskMaster",
"description": "A visual Kanban board interface for TaskMaster projects in VS Code",
"version": "0.25.2",
"version": "0.25.5-rc.0",
"publisher": "Hamster",
"icon": "assets/icon.png",
"engines": {
@@ -240,7 +240,7 @@
"check-types": "tsc --noEmit"
},
"dependencies": {
"task-master-ai": "0.27.1"
"task-master-ai": "*"
},
"devDependencies": {
"@dnd-kit/core": "^6.3.1",
@@ -276,7 +276,8 @@
"react-dom": "^19.0.0",
"tailwind-merge": "^3.3.1",
"tailwindcss": "4.1.11",
"typescript": "^5.7.3"
"typescript": "^5.9.2",
"@tm/core": "*"
},
"overrides": {
"glob@<8": "^10.4.5",

View File

@@ -2,7 +2,7 @@
"name": "task-master-hamster",
"displayName": "Taskmaster AI",
"description": "A visual Kanban board interface for Taskmaster projects in VS Code",
"version": "0.23.1",
"version": "0.25.3",
"publisher": "Hamster",
"icon": "assets/icon.png",
"engines": {

View File

@@ -5,7 +5,6 @@
"outDir": "out",
"lib": ["ES2022", "DOM"],
"sourceMap": true,
"rootDir": "src",
"strict": true /* enable all strict type-checking options */,
"moduleResolution": "Node",
"esModuleInterop": true,
@@ -21,8 +20,10 @@
"@/*": ["./src/*"],
"@/components/*": ["./src/components/*"],
"@/lib/*": ["./src/lib/*"],
"@tm/core": ["../core/src"]
"@tm/core": ["../../packages/tm-core/src/index.ts"],
"@tm/core/*": ["../../packages/tm-core/src/*"]
}
},
"include": ["src/**/*"],
"exclude": ["node_modules", ".vscode-test", "out", "dist"]
}

View File

@@ -0,0 +1,231 @@
# TODO: Move to apps/docs inside our documentation website
# Claude Code Integration Guide
This guide covers how to use Task Master with Claude Code AI SDK integration for enhanced AI-powered development workflows.
## Overview
Claude Code integration allows Task Master to leverage the Claude Code CLI for AI operations without requiring direct API keys. The integration uses OAuth tokens managed by the Claude Code CLI itself.
## Authentication Setup
The Claude Code provider uses token authentication managed by the Claude Code CLI.
### Prerequisites
1. **Install Claude Code CLI** (if not already installed):
```bash
# Installation method depends on your system
# Follow Claude Code documentation for installation
```
2. **Set up OAuth token** using Claude Code CLI:
```bash
claude setup-token
```
This command will:
- Guide you through OAuth authentication
- Store the token securely for CLI usage
- Enable Task Master to use Claude Code without manual API key configuration
### Authentication Priority
Task Master will attempt authentication in this order:
1. **Environment Variable** (optional): `CLAUDE_CODE_OAUTH_TOKEN`
- Useful for CI/CD environments or when you want to override the default token
- Not required if you've set up the CLI token
2. **Claude Code CLI Token** (recommended): Token managed by `claude setup-token`
- Automatically used when available
- Most convenient for local development
3. **Fallback**: Error if neither is available
## Configuration
### Basic Configuration
Add Claude Code to your Task Master configuration:
```javascript
// In your .taskmaster/config.json or via task-master models command
{
"models": {
"main": "claude-code:sonnet", // Use Claude Code with Sonnet
"research": "perplexity-llama-3.1-sonar-large-128k-online",
"fallback": "claude-code:opus" // Use Claude Code with Opus as fallback
}
}
```
### Supported Models
- `claude-code:sonnet` - Claude 3.5 Sonnet via Claude Code CLI
- `claude-code:opus` - Claude 3 Opus via Claude Code CLI
### Environment Variables (Optional)
While not required, you can optionally set:
```bash
export CLAUDE_CODE_OAUTH_TOKEN="your_oauth_token_here"
```
This is only needed in specific scenarios like:
- CI/CD pipelines
- Docker containers
- When you want to use a different token than the CLI default
## Usage Examples
### Basic Task Operations
```bash
# Use Claude Code for task operations
task-master add-task --prompt="Implement user authentication system" --research
task-master expand --id=1 --research
task-master update-task --id=1.1 --prompt="Add JWT token validation"
```
### Model Configuration Commands
```bash
# Set Claude Code as main model
task-master models --set-main claude-code:sonnet
# Use interactive setup
task-master models --setup
# Then select "claude-code" from the provider list
```
## Troubleshooting
### Common Issues
#### 1. "Claude Code CLI not available" Error
**Problem**: Task Master cannot connect to Claude Code CLI.
**Solutions**:
- Ensure Claude Code CLI is installed and in your PATH
- Run `claude setup-token` to configure authentication
- Verify Claude Code CLI works: `claude --help`
#### 2. Authentication Failures
**Problem**: Token authentication is failing.
**Solutions**:
- Re-run `claude setup-token` to refresh your OAuth token
- Check if your token has expired
- Verify Claude Code CLI can authenticate: try a simple `claude` command
#### 3. Model Not Available
**Problem**: Specified Claude Code model is not supported.
**Solutions**:
- Use supported models: `sonnet` or `opus`
- Check model availability: `task-master models --list`
- Verify your Claude Code CLI has access to the requested model
### Debug Steps
1. **Test Claude Code CLI directly**:
```bash
claude --help
# Should show help without errors
```
2. **Test authentication**:
```bash
claude setup-token --verify
# Should confirm token is valid
```
3. **Test Task Master integration**:
```bash
task-master models --test claude-code:sonnet
# Should successfully connect and test the model
```
4. **Check logs**:
- Task Master logs will show detailed error messages
- Use `--verbose` flag for more detailed output
### Environment-Specific Configuration
#### Docker/Containers
When running in Docker, you'll need to:
1. Install Claude Code CLI in your container
2. Set up authentication via environment variable:
```dockerfile
ENV CLAUDE_CODE_OAUTH_TOKEN="your_token_here"
```
#### CI/CD Pipelines
For automated environments:
1. Set up a service account token or use environment variables
2. Ensure Claude Code CLI is available in the pipeline environment
3. Configure authentication before running Task Master commands
## Integration with AI SDK
Task Master's Claude Code integration uses the official `ai-sdk-provider-claude-code` package, providing:
- **Streaming Support**: Real-time token streaming for interactive experiences
- **Full AI SDK Compatibility**: Works with generateText, streamText, and other AI SDK functions
- **Automatic Error Handling**: Graceful degradation when Claude Code is unavailable
- **Type Safety**: Full TypeScript support with proper type definitions
### Example AI SDK Usage
```javascript
import { generateText } from 'ai';
import { ClaudeCodeProvider } from './src/ai-providers/claude-code.js';
const provider = new ClaudeCodeProvider();
const client = provider.getClient();
const result = await generateText({
model: client('sonnet'),
messages: [
{ role: 'user', content: 'Hello Claude!' }
]
});
console.log(result.text);
```
## Security Notes
- OAuth tokens are managed securely by Claude Code CLI
- No API keys need to be stored in your project files
- Tokens are automatically refreshed by the Claude Code CLI
- Environment variables should only be used in secure environments
## Getting Help
If you encounter issues:
1. Check the Claude Code CLI documentation
2. Verify your authentication setup with `claude setup-token --verify`
3. Review Task Master logs for detailed error messages
4. Open an issue with both Task Master and Claude Code version information

View File

@@ -383,6 +383,12 @@ task-master models --set-main=my-local-llama --ollama
# Set a custom OpenRouter model for the research role
task-master models --set-research=google/gemini-pro --openrouter
# Set Codex CLI model for the main role (uses ChatGPT subscription via OAuth)
task-master models --set-main=gpt-5-codex --codex-cli
# Set Codex CLI model for the fallback role
task-master models --set-fallback=gpt-5 --codex-cli
# Run interactive setup to configure models, including custom ones
task-master models --setup
```

View File

@@ -235,6 +235,60 @@ node scripts/init.js
- "MCP provider requires session context" → Ensure running in MCP environment
- See the [MCP Provider Guide](./mcp-provider-guide.md) for detailed troubleshooting
### MCP Timeout Configuration
Long-running AI operations in taskmaster-ai can exceed the default 60-second MCP timeout. Operations like `parse_prd`, `expand_task`, `research`, and `analyze_project_complexity` may take 2-5 minutes to complete.
#### Adding Timeout Configuration
Add a `timeout` parameter to your MCP configuration to extend the timeout limit. The timeout configuration works identically across MCP clients including Cursor, Windsurf, and RooCode:
```json
{
"mcpServers": {
"task-master-ai": {
"command": "npx",
"args": ["-y", "--package=task-master-ai", "task-master-ai"],
"timeout": 300,
"env": {
"ANTHROPIC_API_KEY": "your-anthropic-api-key"
}
}
}
}
```
**Configuration Details:**
- **`timeout: 300`** - Sets timeout to 300 seconds (5 minutes)
- **Value range**: 1-3600 seconds (1 second to 1 hour)
- **Recommended**: 300 seconds provides sufficient time for most AI operations
- **Format**: Integer value in seconds (not milliseconds)
#### Automatic Setup
When adding taskmaster rules for supported editors, the timeout configuration is automatically included:
```bash
# Automatically includes timeout configuration
task-master rules add cursor
task-master rules add roo
task-master rules add windsurf
task-master rules add vscode
```
#### Troubleshooting Timeouts
If you're still experiencing timeout errors:
1. **Verify configuration**: Check that `timeout: 300` is present in your MCP config
2. **Restart editor**: Restart your editor after making configuration changes
3. **Increase timeout**: For very complex operations, try `timeout: 600` (10 minutes)
4. **Check API keys**: Ensure required API keys are properly configured
**Expected behavior:**
- **Before fix**: Operations fail after 60 seconds with `MCP request timed out after 60000ms`
- **After fix**: Operations complete successfully within the configured timeout limit
### Google Vertex AI Configuration
Google Vertex AI is Google Cloud's enterprise AI platform and requires specific configuration:
@@ -375,3 +429,153 @@ Azure OpenAI provides enterprise-grade OpenAI models through Microsoft's Azure c
- Verify the deployment name matches your configuration exactly (case-sensitive)
- Ensure the model deployment is in a "Succeeded" state in Azure OpenAI Studio
- Ensure youre not getting rate limited by `maxTokens` maintain appropriate Tokens per Minute Rate Limit (TPM) in your deployment.
### Codex CLI Provider
The Codex CLI provider integrates Task Master with OpenAI's Codex CLI, allowing you to use ChatGPT subscription models via OAuth authentication.
1. **Prerequisites**:
- Node.js >= 18
- Codex CLI >= 0.42.0 (>= 0.44.0 recommended)
- ChatGPT subscription: Plus, Pro, Business, Edu, or Enterprise (for OAuth access to GPT-5 models)
2. **Installation**:
```bash
npm install -g @openai/codex
```
3. **Authentication** (OAuth - Primary Method):
```bash
codex login
```
This will open a browser window for OAuth authentication with your ChatGPT account. Once authenticated, Task Master will automatically use these credentials.
4. **Optional API Key Method**:
While OAuth is the primary and recommended authentication method, you can optionally set an OpenAI API key:
```bash
# In .env file
OPENAI_API_KEY=sk-your-openai-api-key-here
```
**Note**: The API key will only be injected if explicitly provided. OAuth is always preferred.
5. **Configuration**:
```json
// In .taskmaster/config.json
{
"models": {
"main": {
"provider": "codex-cli",
"modelId": "gpt-5-codex",
"maxTokens": 128000,
"temperature": 0.2
},
"fallback": {
"provider": "codex-cli",
"modelId": "gpt-5",
"maxTokens": 128000,
"temperature": 0.2
}
},
"codexCli": {
"allowNpx": true,
"skipGitRepoCheck": true,
"approvalMode": "on-failure",
"sandboxMode": "workspace-write"
}
}
```
6. **Available Models**:
- `gpt-5` - Latest GPT-5 model (272K max input, 128K max output)
- `gpt-5-codex` - GPT-5 optimized for agentic software engineering (272K max input, 128K max output)
7. **Codex CLI Settings (`codexCli` section)**:
The `codexCli` section in your configuration file supports the following options:
- **`allowNpx`** (boolean, default: `false`): Allow fallback to `npx @openai/codex` if CLI not found on PATH
- **`skipGitRepoCheck`** (boolean, default: `false`): Skip git repository safety check (recommended for CI/non-repo usage)
- **`approvalMode`** (string): Control command execution approval
- `"untrusted"`: Require approval for all commands
- `"on-failure"`: Only require approval after a command fails (default)
- `"on-request"`: Approve only when explicitly requested
- `"never"`: Never require approval (not recommended)
- **`sandboxMode`** (string): Control filesystem access
- `"read-only"`: Read-only access
- `"workspace-write"`: Allow writes to workspace (default)
- `"danger-full-access"`: Full filesystem access (use with caution)
- **`codexPath`** (string, optional): Custom path to codex CLI executable
- **`cwd`** (string, optional): Working directory for Codex CLI execution
- **`fullAuto`** (boolean, optional): Fully automatic mode (equivalent to `--full-auto` flag)
- **`dangerouslyBypassApprovalsAndSandbox`** (boolean, optional): Bypass all safety checks (dangerous!)
- **`color`** (string, optional): Color handling - `"always"`, `"never"`, or `"auto"`
- **`outputLastMessageFile`** (string, optional): Write last agent message to specified file
- **`verbose`** (boolean, optional): Enable verbose logging
- **`env`** (object, optional): Additional environment variables for Codex CLI
8. **Command-Specific Settings** (optional):
You can override settings for specific Task Master commands:
```json
{
"codexCli": {
"allowNpx": true,
"approvalMode": "on-failure",
"commandSpecific": {
"parse-prd": {
"approvalMode": "never",
"verbose": true
},
"expand": {
"sandboxMode": "read-only"
}
}
}
}
```
9. **Codebase Features**:
The Codex CLI provider is codebase-capable, meaning it can analyze and interact with your project files. Codebase analysis features are automatically enabled when using `codex-cli` as your provider and `enableCodebaseAnalysis` is set to `true` in your global configuration (default).
10. **Setup Commands**:
```bash
# Set Codex CLI for main role
task-master models --set-main gpt-5-codex --codex-cli
# Set Codex CLI for fallback role
task-master models --set-fallback gpt-5 --codex-cli
# Verify configuration
task-master models
```
11. **Troubleshooting**:
**"codex: command not found" error:**
- Install Codex CLI globally: `npm install -g @openai/codex`
- Verify installation: `codex --version`
- Alternatively, enable `allowNpx: true` in your codexCli configuration
**"Not logged in" errors:**
- Run `codex login` to authenticate with your ChatGPT account
- Verify authentication status: `codex` (opens interactive CLI)
**"Old version" warnings:**
- Check version: `codex --version`
- Upgrade: `npm install -g @openai/codex@latest`
- Minimum version: 0.42.0, recommended: >= 0.44.0
**"Model not available" errors:**
- Only `gpt-5` and `gpt-5-codex` are available via OAuth subscription
- Verify your ChatGPT subscription is active
- For other OpenAI models, use the standard `openai` provider with an API key
**API key not being used:**
- API key is only injected when explicitly provided
- OAuth authentication is always preferred
- If you want to use an API key, ensure `OPENAI_API_KEY` is set in your `.env` file
12. **Important Notes**:
- OAuth subscription required for model access (no API key needed for basic operation)
- Limited to OAuth-available models only (`gpt-5` and `gpt-5-codex`)
- Pricing information is not available for OAuth models (shows as "Unknown" in cost calculations)
- See [Codex CLI Provider Documentation](./providers/codex-cli.md) for more details

View File

@@ -0,0 +1,463 @@
# Codex CLI Provider Usage Examples
This guide provides practical examples of using Task Master with the Codex CLI provider.
## Prerequisites
Before using these examples, ensure you have:
```bash
# 1. Codex CLI installed
npm install -g @openai/codex
# 2. Authenticated with ChatGPT
codex login
# 3. Codex CLI configured as your provider
task-master models --set-main gpt-5-codex --codex-cli
```
## Example 1: Basic Task Creation
Use Codex CLI to create tasks from a simple description:
```bash
# Add a task with AI-powered enhancement
task-master add-task --prompt="Implement user authentication with JWT" --research
```
**What happens**:
1. Task Master sends your prompt to GPT-5-Codex via the CLI
2. The AI analyzes your request and generates a detailed task
3. The task is added to your `.taskmaster/tasks/tasks.json`
4. OAuth credentials are automatically used (no API key needed)
## Example 2: Parsing a Product Requirements Document
Create a comprehensive task list from a PRD:
```bash
# Create your PRD
cat > my-feature.txt <<EOF
# User Profile Feature
## Requirements
1. Users can view their profile
2. Users can edit their information
3. Profile pictures can be uploaded
4. Email verification required
## Technical Constraints
- Use React for frontend
- Node.js/Express backend
- PostgreSQL database
EOF
# Parse with Codex CLI
task-master parse-prd my-feature.txt --num-tasks 12
```
**What happens**:
1. GPT-5-Codex reads and analyzes your PRD
2. Generates structured tasks with dependencies
3. Creates subtasks for complex items
4. Saves everything to `.taskmaster/tasks/`
## Example 3: Expanding Tasks with Research
Break down a complex task into detailed subtasks:
```bash
# First, show your current tasks
task-master list
# Expand a specific task (e.g., task 1.2)
task-master expand --id=1.2 --research --force
```
**What happens**:
1. Codex CLI uses GPT-5 for research-level analysis
2. Breaks down the task into logical subtasks
3. Adds implementation details and test strategies
4. Updates the task with dependency information
## Example 4: Analyzing Project Complexity
Get AI-powered insights into your project's task complexity:
```bash
# Analyze all tasks
task-master analyze-complexity --research
# View the complexity report
task-master complexity-report
```
**What happens**:
1. GPT-5 analyzes each task's scope and requirements
2. Assigns complexity scores and estimates subtask counts
3. Generates a detailed report
4. Saves to `.taskmaster/reports/task-complexity-report.json`
## Example 5: Using Custom Codex CLI Settings
Configure Codex CLI behavior for different commands:
```json
// In .taskmaster/config.json
{
"models": {
"main": {
"provider": "codex-cli",
"modelId": "gpt-5-codex",
"maxTokens": 128000,
"temperature": 0.2
}
},
"codexCli": {
"allowNpx": true,
"approvalMode": "on-failure",
"sandboxMode": "workspace-write",
"commandSpecific": {
"parse-prd": {
"verbose": true,
"approvalMode": "never"
},
"expand": {
"sandboxMode": "read-only",
"verbose": true
}
}
}
}
```
```bash
# Now parse-prd runs with verbose output and no approvals
task-master parse-prd requirements.txt
# Expand runs with read-only mode
task-master expand --id=2.1
```
## Example 6: Workflow - Building a Feature End-to-End
Complete workflow from PRD to implementation tracking:
```bash
# Step 1: Initialize project
task-master init
# Step 2: Set up Codex CLI
task-master models --set-main gpt-5-codex --codex-cli
task-master models --set-fallback gpt-5 --codex-cli
# Step 3: Create PRD
cat > feature-prd.txt <<EOF
# Authentication System
Implement a complete authentication system with:
- User registration
- Email verification
- Password reset
- Two-factor authentication
- Session management
EOF
# Step 4: Parse PRD into tasks
task-master parse-prd feature-prd.txt --num-tasks 8
# Step 5: Analyze complexity
task-master analyze-complexity --research
# Step 6: Expand complex tasks
task-master expand --all --research
# Step 7: Start working
task-master next
# Shows: Task 1.1: User registration database schema
# Step 8: Mark completed as you work
task-master set-status --id=1.1 --status=done
# Step 9: Continue to next task
task-master next
```
## Example 7: Multi-Role Configuration
Use Codex CLI for main tasks, Perplexity for research:
```json
// In .taskmaster/config.json
{
"models": {
"main": {
"provider": "codex-cli",
"modelId": "gpt-5-codex",
"maxTokens": 128000,
"temperature": 0.2
},
"research": {
"provider": "perplexity",
"modelId": "sonar-pro",
"maxTokens": 8700,
"temperature": 0.1
},
"fallback": {
"provider": "codex-cli",
"modelId": "gpt-5",
"maxTokens": 128000,
"temperature": 0.2
}
}
}
```
```bash
# Main task operations use GPT-5-Codex
task-master add-task --prompt="Build REST API endpoint"
# Research operations use Perplexity
task-master analyze-complexity --research
# Fallback to GPT-5 if needed
task-master expand --id=3.2 --force
```
## Example 8: Troubleshooting Common Issues
### Issue: Codex CLI not found
```bash
# Check if Codex is installed
codex --version
# If not found, install globally
npm install -g @openai/codex
# Or enable npx fallback in config
cat >> .taskmaster/config.json <<EOF
{
"codexCli": {
"allowNpx": true
}
}
EOF
```
### Issue: Not authenticated
```bash
# Check auth status
codex
# Use /about command to see auth info
# Re-authenticate if needed
codex login
```
### Issue: Want more verbose output
```bash
# Enable verbose mode in config
cat >> .taskmaster/config.json <<EOF
{
"codexCli": {
"verbose": true
}
}
EOF
# Or for specific commands
task-master parse-prd my-prd.txt
# (verbose output shows detailed Codex CLI interactions)
```
## Example 9: CI/CD Integration
Use Codex CLI in automated workflows:
```yaml
# .github/workflows/task-analysis.yml
name: Analyze Task Complexity
on:
push:
paths:
- '.taskmaster/**'
jobs:
analyze:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
- name: Install Task Master
run: npm install -g task-master-ai
- name: Configure Codex CLI
run: |
npm install -g @openai/codex
echo "${{ secrets.OPENAI_CODEX_API_KEY }}" > ~/.codex-auth
env:
OPENAI_CODEX_API_KEY: ${{ secrets.OPENAI_CODEX_API_KEY }}
- name: Configure Task Master
run: |
cat > .taskmaster/config.json <<EOF
{
"models": {
"main": {
"provider": "codex-cli",
"modelId": "gpt-5"
}
},
"codexCli": {
"allowNpx": true,
"skipGitRepoCheck": true,
"approvalMode": "never",
"fullAuto": true
}
}
EOF
- name: Analyze Complexity
run: task-master analyze-complexity --research
- name: Upload Report
uses: actions/upload-artifact@v3
with:
name: complexity-report
path: .taskmaster/reports/task-complexity-report.json
```
## Best Practices
### 1. Use OAuth for Development
```bash
# For local development, use OAuth (no API key needed)
codex login
task-master models --set-main gpt-5-codex --codex-cli
```
### 2. Configure Approval Modes Appropriately
```json
{
"codexCli": {
"approvalMode": "on-failure", // Safe default
"sandboxMode": "workspace-write" // Restricts to project directory
}
}
```
### 3. Use Command-Specific Settings
```json
{
"codexCli": {
"commandSpecific": {
"parse-prd": {
"approvalMode": "never", // PRD parsing is safe
"verbose": true
},
"expand": {
"approvalMode": "on-request", // More cautious for task expansion
"verbose": false
}
}
}
}
```
### 4. Leverage Codebase Analysis
```json
{
"global": {
"enableCodebaseAnalysis": true // Let Codex analyze your code
}
}
```
### 5. Handle Errors Gracefully
```bash
# Always configure a fallback model
task-master models --set-fallback gpt-5 --codex-cli
# Or use a different provider as fallback
task-master models --set-fallback claude-3-5-sonnet
```
## Next Steps
- Read the [Codex CLI Provider Documentation](../providers/codex-cli.md)
- Explore [Configuration Options](../configuration.md#codex-cli-provider)
- Check out [Command Reference](../command-reference.md)
- Learn about [Task Structure](../task-structure.md)
## Common Patterns
### Pattern: Daily Development Workflow
```bash
# Morning: Review tasks
task-master list
# Get next task
task-master next
# Work on task...
# Update task with notes
task-master update-subtask --id=2.3 --prompt="Implemented authentication middleware"
# Mark complete
task-master set-status --id=2.3 --status=done
# Repeat
```
### Pattern: Feature Planning
```bash
# Write feature spec
vim new-feature.txt
# Generate tasks
task-master parse-prd new-feature.txt --num-tasks 10
# Analyze and expand
task-master analyze-complexity --research
task-master expand --all --research --force
# Review and adjust
task-master list
```
### Pattern: Sprint Planning
```bash
# Parse sprint requirements
task-master parse-prd sprint-requirements.txt
# Analyze complexity
task-master analyze-complexity --research
# View report
task-master complexity-report
# Adjust task estimates based on complexity scores
```
---
For more examples and advanced usage, see the [full documentation](https://docs.task-master.dev).

View File

@@ -1,4 +1,4 @@
# Available Models as of September 19, 2025
# Available Models as of October 5, 2025
## Main Models
@@ -10,6 +10,8 @@
| anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 |
| claude-code | opus | 0.725 | 0 | 0 |
| claude-code | sonnet | 0.727 | 0 | 0 |
| codex-cli | gpt-5 | 0.749 | 0 | 0 |
| codex-cli | gpt-5-codex | 0.749 | 0 | 0 |
| mcp | mcp-sampling | — | 0 | 0 |
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
@@ -100,6 +102,8 @@
| ----------- | -------------------------------------------- | --------- | ---------- | ----------- |
| claude-code | opus | 0.725 | 0 | 0 |
| claude-code | sonnet | 0.727 | 0 | 0 |
| codex-cli | gpt-5 | 0.749 | 0 | 0 |
| codex-cli | gpt-5-codex | 0.749 | 0 | 0 |
| mcp | mcp-sampling | — | 0 | 0 |
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
@@ -119,7 +123,7 @@
| groq | deepseek-r1-distill-llama-70b | 0.52 | 0.75 | 0.99 |
| perplexity | sonar-pro | — | 3 | 15 |
| perplexity | sonar | — | 1 | 1 |
| perplexity | deep-research | 0.211 | 2 | 8 |
| perplexity | sonar-deep-research | 0.211 | 2 | 8 |
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
| bedrock | us.anthropic.claude-3-opus-20240229-v1:0 | 0.725 | 15 | 75 |
@@ -140,6 +144,8 @@
| anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 |
| claude-code | opus | 0.725 | 0 | 0 |
| claude-code | sonnet | 0.727 | 0 | 0 |
| codex-cli | gpt-5 | 0.749 | 0 | 0 |
| codex-cli | gpt-5-codex | 0.749 | 0 | 0 |
| mcp | mcp-sampling | — | 0 | 0 |
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |

510
docs/providers/codex-cli.md Normal file
View File

@@ -0,0 +1,510 @@
# Codex CLI Provider
The `codex-cli` provider integrates Task Master with OpenAI's Codex CLI via the community AI SDK provider [`ai-sdk-provider-codex-cli`](https://github.com/ben-vargas/ai-sdk-provider-codex-cli). It uses your ChatGPT subscription (OAuth) via `codex login`, with optional `OPENAI_CODEX_API_KEY` support.
## Why Use Codex CLI?
The primary benefits of using the `codex-cli` provider include:
- **Use Latest OpenAI Models**: Access to cutting-edge models like GPT-5 and GPT-5-Codex via ChatGPT subscription
- **OAuth Authentication**: No API key management needed - authenticate once with `codex login`
- **Built-in Tool Execution**: Native support for command execution, file changes, MCP tools, and web search
- **Native JSON Schema Support**: Structured output generation without post-processing
- **Approval/Sandbox Modes**: Fine-grained control over command execution and filesystem access for safety
## Quickstart
Get up and running with Codex CLI in 3 steps:
```bash
# 1. Install Codex CLI globally
npm install -g @openai/codex
# 2. Authenticate with your ChatGPT account
codex login
# 3. Configure Task Master to use Codex CLI
task-master models --set-main gpt-5-codex --codex-cli
```
## Requirements
- **Node.js**: >= 18.0.0
- **Codex CLI**: >= 0.42.0 (>= 0.44.0 recommended)
- **ChatGPT Subscription**: Required for OAuth access (Plus, Pro, Business, Edu, or Enterprise)
- **Task Master**: >= 0.27.3 (version with Codex CLI support)
### Checking Your Versions
```bash
# Check Node.js version
node --version
# Check Codex CLI version
codex --version
# Check Task Master version
task-master --version
```
## Installation
### Install Codex CLI
```bash
# Install globally via npm
npm install -g @openai/codex
# Verify installation
codex --version
```
Expected output: `v0.44.0` or higher
### Install Task Master (if not already installed)
```bash
# Install globally
npm install -g task-master-ai
# Or install in your project
npm install --save-dev task-master-ai
```
## Authentication
### OAuth Authentication (Primary Method - Recommended)
The Codex CLI provider is designed to use OAuth authentication with your ChatGPT subscription:
```bash
# Launch Codex CLI and authenticate
codex login
```
This will:
1. Open a browser window for OAuth authentication
2. Prompt you to log in with your ChatGPT account
3. Store authentication credentials locally
4. Allow Task Master to automatically use these credentials
To verify your authentication:
```bash
# Open interactive Codex CLI
codex
# Use /about command to see auth status
/about
```
### Optional: API Key Method
While OAuth is the primary and recommended method, you can optionally use an OpenAI API key:
```bash
# In your .env file
OPENAI_CODEX_API_KEY=sk-your-openai-api-key-here
```
**Important Notes**:
- The API key will **only** be injected when explicitly provided
- OAuth authentication is always preferred when available
- Using an API key doesn't provide access to subscription-only models like GPT-5-Codex
- For full OpenAI API access with non-subscription models, consider using the standard `openai` provider instead
- `OPENAI_CODEX_API_KEY` is specific to the codex-cli provider to avoid conflicts with the `openai` provider's `OPENAI_API_KEY`
## Available Models
The Codex CLI provider supports only models available through ChatGPT subscription:
| Model ID | Description | Max Input Tokens | Max Output Tokens |
|----------|-------------|------------------|-------------------|
| `gpt-5` | Latest GPT-5 model | 272K | 128K |
| `gpt-5-codex` | GPT-5 optimized for agentic software engineering | 272K | 128K |
**Note**: These models are only available via OAuth subscription through Codex CLI (ChatGPT Plus, Pro, Business, Edu, or Enterprise plans). For other OpenAI models, use the standard `openai` provider with an API key.
**Research Capabilities**: Both GPT-5 models support web search tools, making them suitable for the `research` role in addition to `main` and `fallback` roles.
## Configuration
### Basic Configuration
Add Codex CLI to your `.taskmaster/config.json`:
```json
{
"models": {
"main": {
"provider": "codex-cli",
"modelId": "gpt-5-codex",
"maxTokens": 128000,
"temperature": 0.2
},
"fallback": {
"provider": "codex-cli",
"modelId": "gpt-5",
"maxTokens": 128000,
"temperature": 0.2
}
}
}
```
### Advanced Configuration with Codex CLI Settings
The `codexCli` section allows you to customize Codex CLI behavior:
```json
{
"models": {
"main": {
"provider": "codex-cli",
"modelId": "gpt-5-codex",
"maxTokens": 128000,
"temperature": 0.2
}
},
"codexCli": {
"allowNpx": true,
"skipGitRepoCheck": true,
"approvalMode": "on-failure",
"sandboxMode": "workspace-write",
"verbose": false
}
}
```
### Codex CLI Settings Reference
#### Core Settings
- **`allowNpx`** (boolean, default: `false`)
- Allow fallback to `npx @openai/codex` if the CLI is not found on PATH
- Useful for CI environments or systems without global npm installations
- Example: `"allowNpx": true`
- **`skipGitRepoCheck`** (boolean, default: `false`)
- Skip git repository safety check before execution
- Recommended for CI environments or non-repository usage
- Example: `"skipGitRepoCheck": true`
#### Execution Control
- **`approvalMode`** (string)
- Controls when to require user approval for command execution
- Options:
- `"untrusted"`: Require approval for all commands
- `"on-failure"`: Only require approval after a command fails (default)
- `"on-request"`: Approve only when explicitly requested
- `"never"`: Never require approval (use with caution)
- Example: `"approvalMode": "on-failure"`
- **`sandboxMode`** (string)
- Controls filesystem access permissions
- Options:
- `"read-only"`: Read-only access to filesystem
- `"workspace-write"`: Allow writes to workspace directory (default)
- `"danger-full-access"`: Full filesystem access (use with extreme caution)
- Example: `"sandboxMode": "workspace-write"`
#### Path and Environment
- **`codexPath`** (string, optional)
- Custom path to Codex CLI executable
- Useful when Codex is installed in a non-standard location
- Example: `"codexPath": "/usr/local/bin/codex"`
- **`cwd`** (string, optional)
- Working directory for Codex CLI execution
- Defaults to current working directory
- Example: `"cwd": "/path/to/project"`
- **`env`** (object, optional)
- Additional environment variables for Codex CLI
- Example: `"env": { "DEBUG": "true" }`
#### Advanced Settings
- **`fullAuto`** (boolean, optional)
- Fully automatic mode (equivalent to `--full-auto` flag)
- Bypasses most approvals for fully automated workflows
- Example: `"fullAuto": true`
- **`dangerouslyBypassApprovalsAndSandbox`** (boolean, optional)
- Bypass all safety checks including approvals and sandbox
- **WARNING**: Use with extreme caution - can execute arbitrary code
- Example: `"dangerouslyBypassApprovalsAndSandbox": false`
- **`color`** (string, optional)
- Force color handling in Codex CLI output
- Options: `"always"`, `"never"`, `"auto"`
- Example: `"color": "auto"`
- **`outputLastMessageFile`** (string, optional)
- Write last agent message to specified file
- Useful for debugging or logging
- Example: `"outputLastMessageFile": "./last-message.txt"`
- **`verbose`** (boolean, optional)
- Enable verbose provider logging
- Helpful for debugging issues
- Example: `"verbose": true`
### Command-Specific Settings
Override settings for specific Task Master commands:
```json
{
"codexCli": {
"allowNpx": true,
"approvalMode": "on-failure",
"commandSpecific": {
"parse-prd": {
"approvalMode": "never",
"verbose": true
},
"expand": {
"sandboxMode": "read-only"
},
"add-task": {
"approvalMode": "untrusted"
}
}
}
}
```
## Usage
### Setting Codex CLI Models
```bash
# Set Codex CLI for main role
task-master models --set-main gpt-5-codex --codex-cli
# Set Codex CLI for fallback role
task-master models --set-fallback gpt-5 --codex-cli
# Set Codex CLI for research role
task-master models --set-research gpt-5 --codex-cli
# Verify configuration
task-master models
```
### Using Codex CLI with Task Master Commands
Once configured, use Task Master commands as normal:
```bash
# Parse a PRD with Codex CLI
task-master parse-prd my-requirements.txt
# Analyze project complexity
task-master analyze-complexity --research
# Expand a task into subtasks
task-master expand --id=1.2
# Add a new task with AI assistance
task-master add-task --prompt="Implement user authentication" --research
```
The provider will automatically use your OAuth credentials when Codex CLI is configured.
## Codebase Features
The Codex CLI provider is **codebase-capable**, meaning it can analyze and interact with your project files. This enables advanced features like:
- **Code Analysis**: Understanding your project structure and dependencies
- **Intelligent Suggestions**: Context-aware task recommendations
- **File Operations**: Reading and analyzing project files for better task generation
- **Pattern Recognition**: Identifying common patterns and best practices in your codebase
### Enabling Codebase Analysis
Codebase analysis is automatically enabled when:
1. Your provider is set to `codex-cli`
2. `enableCodebaseAnalysis` is `true` in your global configuration (default)
To verify or configure:
```json
{
"global": {
"enableCodebaseAnalysis": true
}
}
```
## Troubleshooting
### "codex: command not found" Error
**Symptoms**: Task Master reports that the Codex CLI is not found.
**Solutions**:
1. **Install Codex CLI globally**:
```bash
npm install -g @openai/codex
```
2. **Verify installation**:
```bash
codex --version
```
3. **Alternative: Enable npx fallback**:
```json
{
"codexCli": {
"allowNpx": true
}
}
```
### "Not logged in" Errors
**Symptoms**: Authentication errors when trying to use Codex CLI.
**Solutions**:
1. **Authenticate with OAuth**:
```bash
codex login
```
2. **Verify authentication status**:
```bash
codex
# Then use /about command
```
3. **Re-authenticate if needed**:
```bash
# Logout first
codex
# Use /auth command to change auth method
# Then login again
codex login
```
### "Old version" Warnings
**Symptoms**: Warnings about Codex CLI version being outdated.
**Solutions**:
1. **Check current version**:
```bash
codex --version
```
2. **Upgrade to latest version**:
```bash
npm install -g @openai/codex@latest
```
3. **Verify upgrade**:
```bash
codex --version
```
Should show >= 0.44.0
### "Model not available" Errors
**Symptoms**: Error indicating the requested model is not available.
**Causes and Solutions**:
1. **Using unsupported model**:
- Only `gpt-5` and `gpt-5-codex` are available via Codex CLI
- For other OpenAI models, use the standard `openai` provider
2. **Subscription not active**:
- Verify your ChatGPT subscription is active
- Check subscription status at <https://platform.openai.com>
3. **Wrong provider selected**:
- Verify you're using `--codex-cli` flag when setting models
- Check `.taskmaster/config.json` shows `"provider": "codex-cli"`
### API Key Not Being Used
**Symptoms**: You've set `OPENAI_CODEX_API_KEY` but it's not being used.
**Expected Behavior**:
- OAuth authentication is always preferred
- API key is only injected when explicitly provided
- API key doesn't grant access to subscription-only models
**Solutions**:
1. **Verify OAuth is working**:
```bash
codex
# Check /about for auth status
```
2. **If you want to force API key usage**:
- This is not recommended with Codex CLI
- Consider using the standard `openai` provider instead
3. **Verify .env file is being loaded**:
```bash
# Check if .env exists in project root
ls -la .env
# Verify OPENAI_CODEX_API_KEY is set
grep OPENAI_CODEX_API_KEY .env
```
### Approval/Sandbox Issues
**Symptoms**: Commands are blocked or filesystem access is denied.
**Solutions**:
1. **Adjust approval mode**:
```json
{
"codexCli": {
"approvalMode": "on-request"
}
}
```
2. **Adjust sandbox mode**:
```json
{
"codexCli": {
"sandboxMode": "workspace-write"
}
}
```
3. **For fully automated workflows** (use cautiously):
```json
{
"codexCli": {
"fullAuto": true
}
}
```
## Important Notes
- **OAuth subscription required**: No API key needed for basic operation, but requires active ChatGPT subscription
- **Limited model selection**: Only `gpt-5` and `gpt-5-codex` available via OAuth
- **Pricing information**: Not available for OAuth models (shows as "Unknown" in cost calculations)
- **No automatic dependency**: The `@openai/codex` package is not added to Task Master's dependencies - install it globally or enable `allowNpx`
- **Codebase analysis**: Automatically enabled when using `codex-cli` provider
- **Safety first**: Default settings prioritize safety with `approvalMode: "on-failure"` and `sandboxMode: "workspace-write"`
## See Also
- [Configuration Guide](../configuration.md#codex-cli-provider) - Complete Codex CLI configuration reference
- [Command Reference](../command-reference.md) - Using `--codex-cli` flag with commands
- [Gemini CLI Provider](./gemini-cli.md) - Similar CLI-based provider for Google Gemini
- [Claude Code Integration](../claude-code-integration.md) - Another CLI-based provider
- [ai-sdk-provider-codex-cli](https://github.com/ben-vargas/ai-sdk-provider-codex-cli) - Source code for the provider package

View File

@@ -69,11 +69,29 @@ export function resolveTasksPath(args, log = silentLogger) {
// Use core findTasksPath with explicit path and normalized projectRoot context
if (projectRoot) {
return coreFindTasksPath(explicitPath, { projectRoot }, log);
const foundPath = coreFindTasksPath(explicitPath, { projectRoot }, log);
// If core function returns null and no explicit path was provided,
// construct the expected default path as documented
if (foundPath === null && !explicitPath) {
const defaultPath = path.join(
projectRoot,
'.taskmaster',
'tasks',
'tasks.json'
);
log?.info?.(
`Core findTasksPath returned null, using default path: ${defaultPath}`
);
return defaultPath;
}
return foundPath;
}
// Fallback to core function without projectRoot context
return coreFindTasksPath(explicitPath, null, log);
const foundPath = coreFindTasksPath(explicitPath, null, log);
// Note: When no projectRoot is available, we can't construct a default path
// so we return null and let the calling code handle the error
return foundPath;
}
/**

View File

@@ -75,13 +75,50 @@ function generateExampleFromSchema(schema) {
return result;
case 'ZodString':
return 'string';
// Check for min/max length constraints
if (def.checks) {
const minCheck = def.checks.find((c) => c.kind === 'min');
const maxCheck = def.checks.find((c) => c.kind === 'max');
if (minCheck && maxCheck) {
return (
'<string between ' +
minCheck.value +
'-' +
maxCheck.value +
' characters>'
);
} else if (minCheck) {
return '<string with at least ' + minCheck.value + ' characters>';
} else if (maxCheck) {
return '<string up to ' + maxCheck.value + ' characters>';
}
}
return '<string>';
case 'ZodNumber':
return 0;
// Check for int, positive, min/max constraints
if (def.checks) {
const intCheck = def.checks.find((c) => c.kind === 'int');
const minCheck = def.checks.find((c) => c.kind === 'min');
const maxCheck = def.checks.find((c) => c.kind === 'max');
if (intCheck && minCheck && minCheck.value > 0) {
return '<positive integer>';
} else if (intCheck) {
return '<integer>';
} else if (minCheck || maxCheck) {
return (
'<number' +
(minCheck ? ' >= ' + minCheck.value : '') +
(maxCheck ? ' <= ' + maxCheck.value : '') +
'>'
);
}
}
return '<number>';
case 'ZodBoolean':
return false;
return '<boolean>';
case 'ZodArray':
const elementExample = generateExampleFromSchema(def.type);

75
output.txt Normal file

File diff suppressed because one or more lines are too long

11765
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{
"name": "task-master-ai",
"version": "0.27.1",
"version": "0.28.0-rc.1",
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
"main": "index.js",
"type": "module",
@@ -17,7 +17,7 @@
"turbo:build": "turbo build",
"turbo:typecheck": "turbo typecheck",
"build:build-config": "npm run build -w @tm/build-config",
"test": "node --experimental-vm-modules node_modules/.bin/jest",
"test": "cross-env NODE_ENV=test node --experimental-vm-modules node_modules/.bin/jest",
"test:unit": "node --experimental-vm-modules node_modules/.bin/jest --testPathPattern=unit",
"test:integration": "node --experimental-vm-modules node_modules/.bin/jest --testPathPattern=integration",
"test:fails": "node --experimental-vm-modules node_modules/.bin/jest --onlyFailures",
@@ -52,23 +52,27 @@
"author": "Eyal Toledano",
"license": "MIT WITH Commons-Clause",
"dependencies": {
"@ai-sdk/amazon-bedrock": "^2.2.9",
"@ai-sdk/anthropic": "^1.2.10",
"@ai-sdk/azure": "^1.3.17",
"@ai-sdk/google": "^1.2.13",
"@ai-sdk/google-vertex": "^2.2.23",
"@ai-sdk/groq": "^1.2.9",
"@ai-sdk/mistral": "^1.2.7",
"@ai-sdk/openai": "^1.3.20",
"@ai-sdk/perplexity": "^1.1.7",
"@ai-sdk/xai": "^1.2.15",
"@anthropic-ai/sdk": "^0.39.0",
"@aws-sdk/credential-providers": "^3.817.0",
"@ai-sdk/amazon-bedrock": "^3.0.23",
"@ai-sdk/anthropic": "^2.0.18",
"@ai-sdk/azure": "^2.0.34",
"@ai-sdk/google": "^2.0.16",
"@ai-sdk/google-vertex": "^3.0.29",
"@ai-sdk/groq": "^2.0.21",
"@ai-sdk/mistral": "^2.0.16",
"@ai-sdk/openai": "^2.0.34",
"@ai-sdk/perplexity": "^2.0.10",
"@ai-sdk/provider": "^2.0.0",
"@ai-sdk/provider-utils": "^3.0.10",
"@ai-sdk/xai": "^2.0.22",
"@aws-sdk/credential-providers": "^3.895.0",
"@inquirer/search": "^3.0.15",
"@openrouter/ai-sdk-provider": "^0.4.5",
"@openrouter/ai-sdk-provider": "^1.2.0",
"@streamparser/json": "^0.0.22",
"@supabase/supabase-js": "^2.57.4",
"ai": "^4.3.10",
"ai": "^5.0.51",
"ai-sdk-provider-claude-code": "^1.1.4",
"ai-sdk-provider-codex-cli": "^0.3.0",
"ai-sdk-provider-gemini-cli": "^1.1.1",
"ajv": "^8.17.1",
"ajv-formats": "^3.0.1",
"boxen": "^8.0.1",
@@ -78,7 +82,7 @@
"cli-table3": "^0.6.5",
"commander": "^12.1.0",
"cors": "^2.8.5",
"dotenv": "^16.3.1",
"dotenv": "^16.6.1",
"express": "^4.21.2",
"fastmcp": "^3.5.0",
"figlet": "^1.8.0",
@@ -93,17 +97,14 @@
"lru-cache": "^10.2.0",
"marked": "^15.0.12",
"marked-terminal": "^7.3.0",
"ollama-ai-provider": "^1.2.0",
"openai": "^4.89.0",
"ollama-ai-provider-v2": "^1.3.1",
"ora": "^8.2.0",
"uuid": "^11.1.0",
"zod": "^3.23.8",
"zod-to-json-schema": "^3.24.5"
"zod": "^4.1.11"
},
"optionalDependencies": {
"@anthropic-ai/claude-code": "^1.0.88",
"@biomejs/cli-linux-x64": "^1.9.4",
"ai-sdk-provider-gemini-cli": "^0.1.3"
"@biomejs/cli-linux-x64": "^1.9.4"
},
"engines": {
"node": ">=18.0.0"
@@ -127,12 +128,12 @@
"@changesets/changelog-github": "^0.5.1",
"@changesets/cli": "^2.28.1",
"@manypkg/cli": "^0.25.1",
"@tm/ai-sdk-provider-grok-cli": "*",
"@tm/cli": "*",
"@types/jest": "^29.5.14",
"@types/marked-terminal": "^6.1.1",
"concurrently": "^9.2.1",
"cross-env": "^10.0.0",
"dotenv-mono": "^1.5.1",
"execa": "^8.0.1",
"jest": "^29.7.0",
"jest-environment-node": "^29.7.0",
@@ -142,7 +143,7 @@
"ts-jest": "^29.4.2",
"tsdown": "^0.15.2",
"tsx": "^4.20.4",
"turbo": "^2.5.6",
"typescript": "^5.7.3"
"turbo": "2.5.6",
"typescript": "^5.9.2"
}
}

View File

@@ -0,0 +1,165 @@
# AI SDK Provider for Grok CLI
A provider for the [AI SDK](https://sdk.vercel.ai) that integrates with [Grok CLI](https://docs.x.ai/api) for accessing xAI's Grok language models.
## Features
-**AI SDK v5 Compatible** - Full support for the latest AI SDK interfaces
-**Streaming & Non-streaming** - Both generation modes supported
-**Error Handling** - Comprehensive error handling with retry logic
-**Type Safety** - Full TypeScript support with proper type definitions
-**JSON Mode** - Automatic JSON extraction from responses
-**Abort Signals** - Proper cancellation support
## Installation
```bash
npm install @tm/ai-sdk-provider-grok-cli
# or
yarn add @tm/ai-sdk-provider-grok-cli
```
## Prerequisites
1. Install the Grok CLI:
```bash
npm install -g grok-cli
# or follow xAI's installation instructions
```
2. Set up authentication:
```bash
export GROK_CLI_API_KEY="your-api-key"
# or configure via grok CLI: grok config set api-key your-key
```
## Usage
### Basic Usage
```typescript
import { grokCli } from '@tm/ai-sdk-provider-grok-cli';
import { generateText } from 'ai';
const result = await generateText({
model: grokCli('grok-3-latest'),
prompt: 'Write a haiku about TypeScript'
});
console.log(result.text);
```
### Streaming
```typescript
import { grokCli } from '@tm/ai-sdk-provider-grok-cli';
import { streamText } from 'ai';
const { textStream } = await streamText({
model: grokCli('grok-4-latest'),
prompt: 'Explain quantum computing'
});
for await (const delta of textStream) {
process.stdout.write(delta);
}
```
### JSON Mode
```typescript
import { grokCli } from '@tm/ai-sdk-provider-grok-cli';
import { generateObject } from 'ai';
import { z } from 'zod';
const result = await generateObject({
model: grokCli('grok-3-latest'),
schema: z.object({
name: z.string(),
age: z.number(),
hobbies: z.array(z.string())
}),
prompt: 'Generate a person profile'
});
console.log(result.object);
```
## Supported Models
- `grok-3-latest` - Grok 3 (latest version)
- `grok-4-latest` - Grok 4 (latest version)
- `grok-4` - Grok 4 (stable)
- Custom model strings supported
## Configuration
### Provider Settings
```typescript
import { createGrokCli } from '@tm/ai-sdk-provider-grok-cli';
const grok = createGrokCli({
apiKey: 'your-api-key', // Optional if set via env/CLI
timeout: 120000, // 2 minutes default
workingDirectory: '/path/to/project', // Optional
baseURL: 'https://api.x.ai' // Optional
});
```
### Model Settings
```typescript
const model = grok('grok-4-latest', {
timeout: 300000, // 5 minutes for grok-4
// Other CLI-specific settings
});
```
## Error Handling
The provider includes comprehensive error handling:
```typescript
import {
isAuthenticationError,
isTimeoutError,
isInstallationError
} from '@tm/ai-sdk-provider-grok-cli';
try {
const result = await generateText({
model: grokCli('grok-4-latest'),
prompt: 'Hello!'
});
} catch (error) {
if (isAuthenticationError(error)) {
console.error('Authentication failed:', error.message);
} else if (isTimeoutError(error)) {
console.error('Request timed out:', error.message);
} else if (isInstallationError(error)) {
console.error('Grok CLI not installed or not found in PATH');
}
}
```
## Development
```bash
# Install dependencies
npm install
# Start development mode (keep running during development)
npm run dev
# Type check
npm run typecheck
# Run tests (requires build first)
NODE_ENV=production npm run build
npm test
```
**Important**: Always run `npm run dev` and keep it running during development. This ensures proper compilation and hot-reloading of TypeScript files.

View File

@@ -0,0 +1,35 @@
{
"name": "@tm/ai-sdk-provider-grok-cli",
"private": true,
"description": "AI SDK provider for Grok CLI integration",
"type": "module",
"types": "./src/index.ts",
"main": "./dist/index.js",
"exports": {
".": "./src/index.ts"
},
"scripts": {
"test": "vitest run",
"test:watch": "vitest",
"test:ui": "vitest --ui",
"typecheck": "tsc --noEmit"
},
"dependencies": {
"@ai-sdk/provider": "^2.0.0",
"@ai-sdk/provider-utils": "^3.0.10",
"jsonc-parser": "^3.3.1"
},
"devDependencies": {
"@types/node": "^22.18.6",
"typescript": "^5.9.2",
"vitest": "^3.2.4"
},
"engines": {
"node": ">=18"
},
"keywords": ["ai", "grok", "x.ai", "cli", "language-model", "provider"],
"files": ["dist/**/*", "README.md"],
"publishConfig": {
"access": "public"
}
}

View File

@@ -0,0 +1,188 @@
/**
* Tests for error handling utilities
*/
import { APICallError, LoadAPIKeyError } from '@ai-sdk/provider';
import { describe, expect, it } from 'vitest';
import {
createAPICallError,
createAuthenticationError,
createInstallationError,
createTimeoutError,
getErrorMetadata,
isAuthenticationError,
isInstallationError,
isTimeoutError
} from './errors.js';
describe('createAPICallError', () => {
it('should create APICallError with metadata', () => {
const error = createAPICallError({
message: 'Test error',
code: 'TEST_ERROR',
exitCode: 1,
stderr: 'Error output',
stdout: 'Success output',
promptExcerpt: 'Test prompt',
isRetryable: true
});
expect(error).toBeInstanceOf(APICallError);
expect(error.message).toBe('Test error');
expect(error.isRetryable).toBe(true);
expect(error.url).toBe('grok-cli://command');
expect(error.data).toEqual({
code: 'TEST_ERROR',
exitCode: 1,
stderr: 'Error output',
stdout: 'Success output',
promptExcerpt: 'Test prompt'
});
});
it('should create APICallError with minimal parameters', () => {
const error = createAPICallError({
message: 'Simple error'
});
expect(error).toBeInstanceOf(APICallError);
expect(error.message).toBe('Simple error');
expect(error.isRetryable).toBe(false);
});
});
describe('createAuthenticationError', () => {
it('should create LoadAPIKeyError with custom message', () => {
const error = createAuthenticationError({
message: 'Custom auth error'
});
expect(error).toBeInstanceOf(LoadAPIKeyError);
expect(error.message).toBe('Custom auth error');
});
it('should create LoadAPIKeyError with default message', () => {
const error = createAuthenticationError({});
expect(error).toBeInstanceOf(LoadAPIKeyError);
expect(error.message).toContain('Authentication failed');
});
});
describe('createTimeoutError', () => {
it('should create APICallError for timeout', () => {
const error = createTimeoutError({
message: 'Operation timed out',
timeoutMs: 5000,
promptExcerpt: 'Test prompt'
});
expect(error).toBeInstanceOf(APICallError);
expect(error.message).toBe('Operation timed out');
expect(error.isRetryable).toBe(true);
expect(error.data).toEqual({
code: 'TIMEOUT',
promptExcerpt: 'Test prompt',
timeoutMs: 5000
});
});
});
describe('createInstallationError', () => {
it('should create APICallError for installation issues', () => {
const error = createInstallationError({
message: 'CLI not found'
});
expect(error).toBeInstanceOf(APICallError);
expect(error.message).toBe('CLI not found');
expect(error.isRetryable).toBe(false);
expect(error.url).toBe('grok-cli://installation');
});
it('should create APICallError with default message', () => {
const error = createInstallationError({});
expect(error).toBeInstanceOf(APICallError);
expect(error.message).toContain('Grok CLI is not installed');
});
});
describe('isAuthenticationError', () => {
it('should return true for LoadAPIKeyError', () => {
const error = new LoadAPIKeyError({ message: 'Auth failed' });
expect(isAuthenticationError(error)).toBe(true);
});
it('should return true for APICallError with 401 exit code', () => {
const error = new APICallError({
message: 'Unauthorized',
data: { exitCode: 401 }
});
expect(isAuthenticationError(error)).toBe(true);
});
it('should return false for other errors', () => {
const error = new Error('Generic error');
expect(isAuthenticationError(error)).toBe(false);
});
});
describe('isTimeoutError', () => {
it('should return true for timeout APICallError', () => {
const error = new APICallError({
message: 'Timeout',
data: { code: 'TIMEOUT' }
});
expect(isTimeoutError(error)).toBe(true);
});
it('should return false for other errors', () => {
const error = new APICallError({ message: 'Other error' });
expect(isTimeoutError(error)).toBe(false);
});
});
describe('isInstallationError', () => {
it('should return true for installation APICallError', () => {
const error = new APICallError({
message: 'Not installed',
url: 'grok-cli://installation'
});
expect(isInstallationError(error)).toBe(true);
});
it('should return false for other errors', () => {
const error = new APICallError({ message: 'Other error' });
expect(isInstallationError(error)).toBe(false);
});
});
describe('getErrorMetadata', () => {
it('should return metadata from APICallError', () => {
const metadata = {
code: 'TEST_ERROR',
exitCode: 1,
stderr: 'Error output'
};
const error = new APICallError({
message: 'Test error',
data: metadata
});
const result = getErrorMetadata(error);
expect(result).toEqual(metadata);
});
it('should return undefined for errors without metadata', () => {
const error = new Error('Generic error');
const result = getErrorMetadata(error);
expect(result).toBeUndefined();
});
it('should return undefined for APICallError without data', () => {
const error = new APICallError({ message: 'Test error' });
const result = getErrorMetadata(error);
expect(result).toBeUndefined();
});
});

View File

@@ -0,0 +1,187 @@
/**
* Error handling utilities for Grok CLI provider
*/
import { APICallError, LoadAPIKeyError } from '@ai-sdk/provider';
import type { GrokCliErrorMetadata } from './types.js';
/**
* Parameters for creating API call errors
*/
interface CreateAPICallErrorParams {
/** Error message */
message: string;
/** Error code */
code?: string;
/** Process exit code */
exitCode?: number;
/** Standard error output */
stderr?: string;
/** Standard output */
stdout?: string;
/** Excerpt of the prompt */
promptExcerpt?: string;
/** Whether the error is retryable */
isRetryable?: boolean;
}
/**
* Parameters for creating authentication errors
*/
interface CreateAuthenticationErrorParams {
/** Error message */
message?: string;
}
/**
* Parameters for creating timeout errors
*/
interface CreateTimeoutErrorParams {
/** Error message */
message: string;
/** Excerpt of the prompt */
promptExcerpt?: string;
/** Timeout in milliseconds */
timeoutMs: number;
}
/**
* Parameters for creating installation errors
*/
interface CreateInstallationErrorParams {
/** Error message */
message?: string;
}
/**
* Create an API call error with Grok CLI specific metadata
*/
export function createAPICallError({
message,
code,
exitCode,
stderr,
stdout,
promptExcerpt,
isRetryable = false
}: CreateAPICallErrorParams): APICallError {
const metadata: GrokCliErrorMetadata = {
code,
exitCode,
stderr,
stdout,
promptExcerpt
};
return new APICallError({
message,
isRetryable,
url: 'grok-cli://command',
requestBodyValues: promptExcerpt ? { prompt: promptExcerpt } : undefined,
data: metadata
});
}
/**
* Create an authentication error
*/
export function createAuthenticationError({
message
}: CreateAuthenticationErrorParams): LoadAPIKeyError {
return new LoadAPIKeyError({
message:
message ||
'Authentication failed. Please ensure Grok CLI is properly configured with API key.'
});
}
/**
* Create a timeout error
*/
export function createTimeoutError({
message,
promptExcerpt,
timeoutMs
}: CreateTimeoutErrorParams): APICallError {
const metadata: GrokCliErrorMetadata & { timeoutMs: number } = {
code: 'TIMEOUT',
promptExcerpt,
timeoutMs
};
return new APICallError({
message,
isRetryable: true,
url: 'grok-cli://command',
requestBodyValues: promptExcerpt ? { prompt: promptExcerpt } : undefined,
data: metadata
});
}
/**
* Create a CLI installation error
*/
export function createInstallationError({
message
}: CreateInstallationErrorParams): APICallError {
return new APICallError({
message:
message ||
'Grok CLI is not installed or not found in PATH. Please install with: npm install -g @vibe-kit/grok-cli',
isRetryable: false,
url: 'grok-cli://installation',
requestBodyValues: undefined
});
}
/**
* Check if an error is an authentication error
*/
export function isAuthenticationError(
error: unknown
): error is LoadAPIKeyError {
if (error instanceof LoadAPIKeyError) return true;
if (error instanceof APICallError) {
const metadata = error.data as GrokCliErrorMetadata | undefined;
if (!metadata) return false;
return (
metadata.exitCode === 401 ||
metadata.code === 'AUTHENTICATION_ERROR' ||
metadata.code === 'UNAUTHORIZED'
);
}
return false;
}
/**
* Check if an error is a timeout error
*/
export function isTimeoutError(error: unknown): error is APICallError {
if (
error instanceof APICallError &&
(error.data as GrokCliErrorMetadata)?.code === 'TIMEOUT'
)
return true;
return false;
}
/**
* Check if an error is an installation error
*/
export function isInstallationError(error: unknown): error is APICallError {
if (error instanceof APICallError && error.url === 'grok-cli://installation')
return true;
return false;
}
/**
* Get error metadata from an error
*/
export function getErrorMetadata(
error: unknown
): GrokCliErrorMetadata | undefined {
if (error instanceof APICallError && error.data) {
return error.data as GrokCliErrorMetadata;
}
return undefined;
}

View File

@@ -1,53 +1,51 @@
/**
* @fileoverview Grok CLI Language Model implementation
* Grok CLI Language Model implementation for AI SDK v5
*/
import { spawn } from 'child_process';
import { promises as fs } from 'fs';
import { homedir } from 'os';
import { join } from 'path';
import type {
LanguageModelV2,
LanguageModelV2CallOptions,
LanguageModelV2CallWarning
} from '@ai-sdk/provider';
import { NoSuchModelError } from '@ai-sdk/provider';
import { generateId } from '@ai-sdk/provider-utils';
import {
createPromptFromMessages,
convertFromGrokCliResponse,
escapeShellArg
} from './message-converter.js';
import { extractJson } from './json-extractor.js';
import {
createAPICallError,
createAuthenticationError,
createInstallationError,
createTimeoutError
} from './errors.js';
import { spawn } from 'child_process';
import { promises as fs } from 'fs';
import { join } from 'path';
import { homedir } from 'os';
import { extractJson } from './json-extractor.js';
import {
convertFromGrokCliResponse,
createPromptFromMessages,
escapeShellArg
} from './message-converter.js';
import type {
GrokCliLanguageModelOptions,
GrokCliModelId,
GrokCliSettings
} from './types.js';
/**
* @typedef {import('./types.js').GrokCliSettings} GrokCliSettings
* @typedef {import('./types.js').GrokCliModelId} GrokCliModelId
* Grok CLI Language Model implementation for AI SDK v5
*/
export class GrokCliLanguageModel implements LanguageModelV2 {
readonly specificationVersion = 'v2' as const;
readonly defaultObjectGenerationMode = 'json' as const;
readonly supportsImageUrls = false;
readonly supportsStructuredOutputs = false;
readonly supportedUrls: Record<string, RegExp[]> = {};
/**
* @typedef {Object} GrokCliLanguageModelOptions
* @property {GrokCliModelId} id - Model ID
* @property {GrokCliSettings} [settings] - Model settings
*/
readonly modelId: GrokCliModelId;
readonly settings: GrokCliSettings;
export class GrokCliLanguageModel {
specificationVersion = 'v1';
defaultObjectGenerationMode = 'json';
supportsImageUrls = false;
supportsStructuredOutputs = false;
/** @type {GrokCliModelId} */
modelId;
/** @type {GrokCliSettings} */
settings;
/**
* @param {GrokCliLanguageModelOptions} options
*/
constructor(options) {
constructor(options: GrokCliLanguageModelOptions) {
this.modelId = options.id;
this.settings = options.settings ?? {};
@@ -64,15 +62,14 @@ export class GrokCliLanguageModel {
}
}
get provider() {
get provider(): string {
return 'grok-cli';
}
/**
* Check if Grok CLI is installed and available
* @returns {Promise<boolean>}
*/
async checkGrokCliInstallation() {
private async checkGrokCliInstallation(): Promise<boolean> {
return new Promise((resolve) => {
const child = spawn('grok', ['--version'], {
stdio: 'pipe'
@@ -85,9 +82,8 @@ export class GrokCliLanguageModel {
/**
* Get API key from settings or environment
* @returns {Promise<string|null>}
*/
async getApiKey() {
private async getApiKey(): Promise<string | null> {
// Check settings first
if (this.settings.apiKey) {
return this.settings.apiKey;
@@ -111,22 +107,32 @@ export class GrokCliLanguageModel {
/**
* Execute Grok CLI command
* @param {Array<string>} args - Command line arguments
* @param {Object} options - Execution options
* @returns {Promise<{stdout: string, stderr: string, exitCode: number}>}
*/
async executeGrokCli(args, options = {}) {
const timeout = options.timeout || this.settings.timeout || 120000; // 2 minutes default
private async executeGrokCli(
args: string[],
options: { timeout?: number; apiKey?: string } = {}
): Promise<{ stdout: string; stderr: string; exitCode: number }> {
// Default timeout based on model type
let defaultTimeout = 120000; // 2 minutes default
if (this.modelId.includes('grok-4')) {
defaultTimeout = 600000; // 10 minutes for grok-4 models (they seem to hang during setup)
}
const timeout = options.timeout ?? this.settings.timeout ?? defaultTimeout;
return new Promise((resolve, reject) => {
const child = spawn('grok', args, {
stdio: 'pipe',
cwd: this.settings.workingDirectory || process.cwd()
cwd: this.settings.workingDirectory || process.cwd(),
env:
options.apiKey === undefined
? process.env
: { ...process.env, GROK_CLI_API_KEY: options.apiKey }
});
let stdout = '';
let stderr = '';
let timeoutId;
let timeoutId: NodeJS.Timeout | undefined;
// Set up timeout
if (timeout > 0) {
@@ -142,24 +148,26 @@ export class GrokCliLanguageModel {
}, timeout);
}
child.stdout.on('data', (data) => {
stdout += data.toString();
child.stdout?.on('data', (data) => {
const chunk = data.toString();
stdout += chunk;
});
child.stderr.on('data', (data) => {
stderr += data.toString();
child.stderr?.on('data', (data) => {
const chunk = data.toString();
stderr += chunk;
});
child.on('error', (error) => {
if (timeoutId) clearTimeout(timeoutId);
if (error.code === 'ENOENT') {
if ((error as any).code === 'ENOENT') {
reject(createInstallationError({}));
} else {
reject(
createAPICallError({
message: `Failed to execute Grok CLI: ${error.message}`,
code: error.code,
code: (error as any).code,
stderr: error.message,
isRetryable: false
})
@@ -180,15 +188,18 @@ export class GrokCliLanguageModel {
}
/**
* Generate unsupported parameter warnings
* @param {Object} options - Generation options
* @returns {Array} Warnings array
* Generate comprehensive warnings for unsupported parameters and validation issues
*/
generateUnsupportedWarnings(options) {
const warnings = [];
const unsupportedParams = [];
private generateAllWarnings(
options: LanguageModelV2CallOptions,
prompt: string
): LanguageModelV2CallWarning[] {
const warnings: LanguageModelV2CallWarning[] = [];
const unsupportedParams: string[] = [];
// Grok CLI supports some parameters but not all AI SDK parameters
// Check for unsupported parameters
if (options.temperature !== undefined)
unsupportedParams.push('temperature');
if (options.topP !== undefined) unsupportedParams.push('topP');
if (options.topK !== undefined) unsupportedParams.push('topK');
if (options.presencePenalty !== undefined)
@@ -200,24 +211,51 @@ export class GrokCliLanguageModel {
if (options.seed !== undefined) unsupportedParams.push('seed');
if (unsupportedParams.length > 0) {
// Add a warning for each unsupported parameter
for (const param of unsupportedParams) {
warnings.push({
type: 'unsupported-setting',
setting: param,
setting: param as
| 'temperature'
| 'topP'
| 'topK'
| 'presencePenalty'
| 'frequencyPenalty'
| 'stopSequences'
| 'seed',
details: `Grok CLI does not support the ${param} parameter. It will be ignored.`
});
}
}
// Add model validation warnings if needed
if (!this.modelId || this.modelId.trim() === '') {
warnings.push({
type: 'other',
message: 'Model ID is empty or invalid'
});
}
// Add prompt validation
if (!prompt || prompt.trim() === '') {
warnings.push({
type: 'other',
message: 'Prompt is empty'
});
}
return warnings;
}
/**
* Generate text using Grok CLI
* @param {Object} options - Generation options
* @returns {Promise<Object>}
*/
async doGenerate(options) {
async doGenerate(options: LanguageModelV2CallOptions) {
// Handle abort signal early
if (options.abortSignal?.aborted) {
throw options.abortSignal.reason || new Error('Request aborted');
}
// Check CLI installation
const isInstalled = await this.checkGrokCliInstallation();
if (!isInstalled) {
@@ -234,7 +272,7 @@ export class GrokCliLanguageModel {
}
const prompt = createPromptFromMessages(options.prompt);
const warnings = this.generateUnsupportedWarnings(options);
const warnings = this.generateAllWarnings(options, prompt);
// Build command arguments
const args = ['--prompt', escapeShellArg(prompt)];
@@ -244,10 +282,11 @@ export class GrokCliLanguageModel {
args.push('--model', this.modelId);
}
// Add API key if available
if (apiKey) {
args.push('--api-key', apiKey);
}
// Skip API key parameter if it's likely already configured to avoid hanging
// The CLI seems to hang when trying to save API keys for grok-4 models
// if (apiKey) {
// args.push('--api-key', apiKey);
// }
// Add base URL if provided in settings
if (this.settings.baseURL) {
@@ -260,9 +299,7 @@ export class GrokCliLanguageModel {
}
try {
const result = await this.executeGrokCli(args, {
timeout: this.settings.timeout
});
const result = await this.executeGrokCli(args, { apiKey });
if (result.exitCode !== 0) {
// Handle authentication errors
@@ -290,19 +327,37 @@ export class GrokCliLanguageModel {
let text = response.text || '';
// Extract JSON if in object-json mode
if (options.mode?.type === 'object-json' && text) {
const isObjectJson = (
o: unknown
): o is { mode: { type: 'object-json' } } =>
!!o &&
typeof o === 'object' &&
'mode' in o &&
(o as any).mode?.type === 'object-json';
if (isObjectJson(options) && text) {
text = extractJson(text);
}
return {
text: text || undefined,
usage: response.usage || { promptTokens: 0, completionTokens: 0 },
finishReason: 'stop',
content: [
{
type: 'text' as const,
text: text || ''
}
],
usage: response.usage
? {
inputTokens: response.usage.promptTokens,
outputTokens: response.usage.completionTokens,
totalTokens: response.usage.totalTokens
}
: { inputTokens: 0, outputTokens: 0, totalTokens: 0 },
finishReason: 'stop' as const,
rawCall: {
rawPrompt: prompt,
rawSettings: args
},
warnings: warnings.length > 0 ? warnings : undefined,
warnings: warnings,
response: {
id: generateId(),
timestamp: new Date(),
@@ -314,20 +369,23 @@ export class GrokCliLanguageModel {
providerMetadata: {
'grok-cli': {
exitCode: result.exitCode,
stderr: result.stderr || undefined
...(result.stderr && { stderr: result.stderr })
}
}
};
} catch (error) {
// Re-throw our custom errors
if (error.name === 'APICallError' || error.name === 'LoadAPIKeyError') {
if (
(error as any).name === 'APICallError' ||
(error as any).name === 'LoadAPIKeyError'
) {
throw error;
}
// Wrap other errors
throw createAPICallError({
message: `Grok CLI execution failed: ${error.message}`,
code: error.code,
message: `Grok CLI execution failed: ${(error as Error).message}`,
code: (error as any).code,
promptExcerpt: prompt.substring(0, 200),
isRetryable: false
});
@@ -338,15 +396,39 @@ export class GrokCliLanguageModel {
* Stream text using Grok CLI
* Note: Grok CLI doesn't natively support streaming, so this simulates streaming
* by generating the full response and then streaming it in chunks
* @param {Object} options - Stream options
* @returns {Promise<Object>}
*/
async doStream(options) {
const warnings = this.generateUnsupportedWarnings(options);
async doStream(options: LanguageModelV2CallOptions) {
const prompt = createPromptFromMessages(options.prompt);
const warnings = this.generateAllWarnings(options, prompt);
const stream = new ReadableStream({
start: async (controller) => {
let abortListener: (() => void) | undefined;
try {
// Handle abort signal
if (options.abortSignal?.aborted) {
throw options.abortSignal.reason || new Error('Request aborted');
}
// Set up abort listener
if (options.abortSignal) {
abortListener = () => {
controller.enqueue({
type: 'error',
error:
options.abortSignal?.reason || new Error('Request aborted')
});
controller.close();
};
options.abortSignal.addEventListener('abort', abortListener, {
once: true
});
}
// Emit stream-start with warnings
controller.enqueue({ type: 'stream-start', warnings });
// Generate the full response first
const result = await this.doGenerate(options);
@@ -359,20 +441,48 @@ export class GrokCliLanguageModel {
});
// Simulate streaming by chunking the text
const text = result.text || '';
const content = result.content || [];
const text =
content.length > 0 && content[0].type === 'text'
? content[0].text
: '';
const chunkSize = 50; // Characters per chunk
let textPartId: string | undefined;
// Emit text-start if we have content
if (text.length > 0) {
textPartId = generateId();
controller.enqueue({
type: 'text-start',
id: textPartId
});
}
for (let i = 0; i < text.length; i += chunkSize) {
// Check for abort during streaming
if (options.abortSignal?.aborted) {
throw options.abortSignal.reason || new Error('Request aborted');
}
const chunk = text.slice(i, i + chunkSize);
controller.enqueue({
type: 'text-delta',
textDelta: chunk
id: textPartId!,
delta: chunk
});
// Add small delay to simulate streaming
await new Promise((resolve) => setTimeout(resolve, 20));
}
// Close text part if opened
if (textPartId) {
controller.enqueue({
type: 'text-end',
id: textPartId
});
}
// Emit finish event
controller.enqueue({
type: 'finish',
@@ -388,19 +498,22 @@ export class GrokCliLanguageModel {
error
});
controller.close();
} finally {
// Clean up abort listener
if (options.abortSignal && abortListener) {
options.abortSignal.removeEventListener('abort', abortListener);
}
}
},
cancel: () => {
// Clean up if stream is cancelled
}
});
return {
stream,
rawCall: {
rawPrompt: createPromptFromMessages(options.prompt),
rawSettings: {}
},
warnings: warnings.length > 0 ? warnings : undefined,
request: {
body: createPromptFromMessages(options.prompt)
body: prompt
}
};
}

View File

@@ -0,0 +1,121 @@
/**
* Tests for Grok CLI provider
*/
import { NoSuchModelError } from '@ai-sdk/provider';
import { beforeEach, describe, expect, it, vi } from 'vitest';
import { GrokCliLanguageModel } from './grok-cli-language-model.js';
import { createGrokCli, grokCli } from './grok-cli-provider.js';
// Mock the GrokCliLanguageModel
vi.mock('./grok-cli-language-model.js', () => ({
GrokCliLanguageModel: vi.fn().mockImplementation((options) => ({
modelId: options.id,
settings: options.settings,
provider: 'grok-cli'
}))
}));
describe('createGrokCli', () => {
beforeEach(() => {
vi.clearAllMocks();
});
it('should create a provider with default settings', () => {
const provider = createGrokCli();
expect(typeof provider).toBe('function');
expect(typeof provider.languageModel).toBe('function');
expect(typeof provider.chat).toBe('function');
expect(typeof provider.textEmbeddingModel).toBe('function');
expect(typeof provider.imageModel).toBe('function');
});
it('should create a provider with custom default settings', () => {
const defaultSettings = {
timeout: 5000,
workingDirectory: '/custom/path'
};
const provider = createGrokCli({ defaultSettings });
const model = provider('grok-2-mini');
expect(GrokCliLanguageModel).toHaveBeenCalledWith({
id: 'grok-2-mini',
settings: defaultSettings
});
});
it('should create language models with merged settings', () => {
const defaultSettings = { timeout: 5000 };
const provider = createGrokCli({ defaultSettings });
const modelSettings = { apiKey: 'test-key' };
const model = provider('grok-2', modelSettings);
expect(GrokCliLanguageModel).toHaveBeenCalledWith({
id: 'grok-2',
settings: { timeout: 5000, apiKey: 'test-key' }
});
});
it('should create models via languageModel method', () => {
const provider = createGrokCli();
const model = provider.languageModel('grok-2-mini', { timeout: 1000 });
expect(GrokCliLanguageModel).toHaveBeenCalledWith({
id: 'grok-2-mini',
settings: { timeout: 1000 }
});
});
it('should create models via chat method (alias)', () => {
const provider = createGrokCli();
const model = provider.chat('grok-2');
expect(GrokCliLanguageModel).toHaveBeenCalledWith({
id: 'grok-2',
settings: {}
});
});
it('should throw error when called with new keyword', () => {
const provider = createGrokCli();
expect(() => {
// @ts-expect-error - intentionally testing invalid usage
new provider('grok-2');
}).toThrow(
'The Grok CLI model function cannot be called with the new keyword.'
);
});
it('should throw NoSuchModelError for textEmbeddingModel', () => {
const provider = createGrokCli();
expect(() => {
provider.textEmbeddingModel('test-model');
}).toThrow(NoSuchModelError);
});
it('should throw NoSuchModelError for imageModel', () => {
const provider = createGrokCli();
expect(() => {
provider.imageModel('test-model');
}).toThrow(NoSuchModelError);
});
});
describe('default grokCli provider', () => {
it('should be a pre-configured provider instance', () => {
expect(typeof grokCli).toBe('function');
expect(typeof grokCli.languageModel).toBe('function');
expect(typeof grokCli.chat).toBe('function');
});
it('should create models with default configuration', () => {
const model = grokCli('grok-2-mini');
expect(GrokCliLanguageModel).toHaveBeenCalledWith({
id: 'grok-2-mini',
settings: {}
});
});
});

View File

@@ -0,0 +1,108 @@
/**
* Grok CLI provider implementation for AI SDK v5
*/
import type { LanguageModelV2, ProviderV2 } from '@ai-sdk/provider';
import { NoSuchModelError } from '@ai-sdk/provider';
import { GrokCliLanguageModel } from './grok-cli-language-model.js';
import type { GrokCliModelId, GrokCliSettings } from './types.js';
/**
* Grok CLI provider interface that extends the AI SDK's ProviderV2
*/
export interface GrokCliProvider extends ProviderV2 {
/**
* Creates a language model instance for the specified model ID.
* This is a shorthand for calling `languageModel()`.
*/
(modelId: GrokCliModelId, settings?: GrokCliSettings): LanguageModelV2;
/**
* Creates a language model instance for text generation.
*/
languageModel(
modelId: GrokCliModelId,
settings?: GrokCliSettings
): LanguageModelV2;
/**
* Alias for `languageModel()` to maintain compatibility with AI SDK patterns.
*/
chat(modelId: GrokCliModelId, settings?: GrokCliSettings): LanguageModelV2;
textEmbeddingModel(modelId: string): never;
imageModel(modelId: string): never;
}
/**
* Configuration options for creating a Grok CLI provider instance
*/
export interface GrokCliProviderSettings {
/**
* Default settings to use for all models created by this provider.
* Individual model settings will override these defaults.
*/
defaultSettings?: GrokCliSettings;
}
/**
* Creates a Grok CLI provider instance with the specified configuration.
* The provider can be used to create language models for interacting with Grok models.
*/
export function createGrokCli(
options: GrokCliProviderSettings = {}
): GrokCliProvider {
const createModel = (
modelId: GrokCliModelId,
settings: GrokCliSettings = {}
): LanguageModelV2 => {
const mergedSettings = {
...options.defaultSettings,
...settings
};
return new GrokCliLanguageModel({
id: modelId,
settings: mergedSettings
});
};
const provider = function (
modelId: GrokCliModelId,
settings?: GrokCliSettings
) {
if (new.target) {
throw new Error(
'The Grok CLI model function cannot be called with the new keyword.'
);
}
return createModel(modelId, settings);
};
provider.languageModel = createModel;
provider.chat = createModel; // Alias for languageModel
// Add textEmbeddingModel method that throws NoSuchModelError
provider.textEmbeddingModel = (modelId: string) => {
throw new NoSuchModelError({
modelId,
modelType: 'textEmbeddingModel'
});
};
provider.imageModel = (modelId: string) => {
throw new NoSuchModelError({
modelId,
modelType: 'imageModel'
});
};
return provider as GrokCliProvider;
}
/**
* Default Grok CLI provider instance.
* Pre-configured provider for quick usage without custom settings.
*/
export const grokCli = createGrokCli();

View File

@@ -0,0 +1,64 @@
/**
* Provider exports for creating and configuring Grok CLI instances.
*/
/**
* Creates a new Grok CLI provider instance and the default provider instance.
*/
export { createGrokCli, grokCli } from './grok-cli-provider.js';
/**
* Type definitions for the Grok CLI provider.
*/
export type {
GrokCliProvider,
GrokCliProviderSettings
} from './grok-cli-provider.js';
/**
* Language model implementation for Grok CLI.
* This class implements the AI SDK's LanguageModelV2 interface.
*/
export { GrokCliLanguageModel } from './grok-cli-language-model.js';
/**
* Type definitions for Grok CLI language models.
*/
export type {
GrokCliModelId,
GrokCliLanguageModelOptions,
GrokCliSettings,
GrokCliMessage,
GrokCliResponse,
GrokCliErrorMetadata
} from './types.js';
/**
* Error handling utilities for Grok CLI.
* These functions help create and identify specific error types.
*/
export {
isAuthenticationError,
isTimeoutError,
isInstallationError,
getErrorMetadata,
createAPICallError,
createAuthenticationError,
createTimeoutError,
createInstallationError
} from './errors.js';
/**
* Message conversion utilities for Grok CLI communication.
*/
export {
convertToGrokCliMessages,
convertFromGrokCliResponse,
createPromptFromMessages,
escapeShellArg
} from './message-converter.js';
/**
* JSON extraction utilities for parsing Grok responses.
*/
export { extractJson } from './json-extractor.js';

View File

@@ -0,0 +1,81 @@
/**
* Tests for JSON extraction utilities
*/
import { describe, expect, it } from 'vitest';
import { extractJson } from './json-extractor.js';
describe('extractJson', () => {
it('should extract JSON from markdown code blocks', () => {
const text = '```json\n{"name": "test", "value": 42}\n```';
const result = extractJson(text);
expect(JSON.parse(result)).toEqual({ name: 'test', value: 42 });
});
it('should extract JSON from generic code blocks', () => {
const text = '```\n{"name": "test", "value": 42}\n```';
const result = extractJson(text);
expect(JSON.parse(result)).toEqual({ name: 'test', value: 42 });
});
it('should remove JavaScript variable declarations', () => {
const text = 'const result = {"name": "test", "value": 42};';
const result = extractJson(text);
expect(JSON.parse(result)).toEqual({ name: 'test', value: 42 });
});
it('should handle let variable declarations', () => {
const text = 'let data = {"name": "test", "value": 42};';
const result = extractJson(text);
expect(JSON.parse(result)).toEqual({ name: 'test', value: 42 });
});
it('should handle var variable declarations', () => {
const text = 'var config = {"name": "test", "value": 42};';
const result = extractJson(text);
expect(JSON.parse(result)).toEqual({ name: 'test', value: 42 });
});
it('should extract JSON arrays', () => {
const text = '[{"name": "test1"}, {"name": "test2"}]';
const result = extractJson(text);
expect(JSON.parse(result)).toEqual([{ name: 'test1' }, { name: 'test2' }]);
});
it('should convert JavaScript object literals to JSON', () => {
const text = "{name: 'test', value: 42}";
const result = extractJson(text);
expect(JSON.parse(result)).toEqual({ name: 'test', value: 42 });
});
it('should return valid JSON (canonical formatting)', () => {
const text = '{"name": "test", "value": 42}';
const result = extractJson(text);
expect(JSON.parse(result)).toEqual({ name: 'test', value: 42 });
});
it('should return original text when JSON parsing fails completely', () => {
const text = 'This is not JSON at all';
const result = extractJson(text);
expect(result).toBe('This is not JSON at all');
});
it('should handle complex nested objects', () => {
const text =
'```json\n{\n "user": {\n "name": "John",\n "age": 30\n },\n "items": [1, 2, 3]\n}\n```';
const result = extractJson(text);
expect(JSON.parse(result)).toEqual({
user: {
name: 'John',
age: 30
},
items: [1, 2, 3]
});
});
it('should handle mixed quotes in object literals', () => {
const text = `{name: "test", value: 'mixed quotes'}`;
const result = extractJson(text);
expect(JSON.parse(result)).toEqual({ name: 'test', value: 'mixed quotes' });
});
});

View File

@@ -0,0 +1,132 @@
/**
* Extract JSON from AI's response using a tolerant parser.
*
* The function removes common wrappers such as markdown fences or variable
* declarations and then attempts to parse the remaining text with
* `jsonc-parser`. If valid JSON (or JSONC) can be parsed, it is returned as a
* string via `JSON.stringify`. Otherwise the original text is returned.
*
* @param text - Raw text which may contain JSON
* @returns A valid JSON string if extraction succeeds, otherwise the original text
*/
import { parse, type ParseError } from 'jsonc-parser';
export function extractJson(text: string): string {
let content = text.trim();
// Strip ```json or ``` fences
const fenceMatch = /```(?:json)?\s*([\s\S]*?)\s*```/i.exec(content);
if (fenceMatch) {
content = fenceMatch[1];
}
// Strip variable declarations like `const foo =` or `let foo =`
const varMatch = /^\s*(?:const|let|var)\s+\w+\s*=\s*([\s\S]*)/i.exec(content);
if (varMatch) {
content = varMatch[1];
// Remove trailing semicolon if present
if (content.trim().endsWith(';')) {
content = content.trim().slice(0, -1);
}
}
// Find the first opening bracket
const firstObj = content.indexOf('{');
const firstArr = content.indexOf('[');
if (firstObj === -1 && firstArr === -1) {
return text;
}
const start =
firstArr === -1
? firstObj
: firstObj === -1
? firstArr
: Math.min(firstObj, firstArr);
content = content.slice(start);
// Try to parse the entire string with jsonc-parser
const tryParse = (value: string): string | undefined => {
const errors: ParseError[] = [];
try {
const result = parse(value, errors, { allowTrailingComma: true });
if (errors.length === 0) {
return JSON.stringify(result, null, 2);
}
} catch {
// ignore
}
return undefined;
};
const parsed = tryParse(content);
if (parsed !== undefined) {
return parsed;
}
// If parsing the full string failed, use a more efficient approach
// to find valid JSON boundaries
const openChar = content[0];
const closeChar = openChar === '{' ? '}' : ']';
// Find all potential closing positions by tracking nesting depth
const closingPositions: number[] = [];
let depth = 0;
let inString = false;
let escapeNext = false;
for (let i = 0; i < content.length; i++) {
const char = content[i];
if (escapeNext) {
escapeNext = false;
continue;
}
if (char === '\\') {
escapeNext = true;
continue;
}
if (char === '"' && !inString) {
inString = true;
continue;
}
if (char === '"' && inString) {
inString = false;
continue;
}
// Skip content inside strings
if (inString) continue;
if (char === openChar) {
depth++;
} else if (char === closeChar) {
depth--;
if (depth === 0) {
closingPositions.push(i + 1);
}
}
}
// Try parsing at each valid closing position, starting from the end
for (let i = closingPositions.length - 1; i >= 0; i--) {
const attempt = tryParse(content.slice(0, closingPositions[i]));
if (attempt !== undefined) {
return attempt;
}
}
// As a final fallback, try the original character-by-character approach
// but only for the last 1000 characters to limit performance impact
const searchStart = Math.max(0, content.length - 1000);
for (let end = content.length - 1; end > searchStart; end--) {
const attempt = tryParse(content.slice(0, end));
if (attempt !== undefined) {
return attempt;
}
}
return text;
}

View File

@@ -0,0 +1,163 @@
/**
* Tests for message conversion utilities
*/
import { describe, expect, it } from 'vitest';
import {
convertFromGrokCliResponse,
convertToGrokCliMessages,
createPromptFromMessages,
escapeShellArg
} from './message-converter.js';
describe('convertToGrokCliMessages', () => {
it('should convert string content messages', () => {
const messages = [
{ role: 'user', content: 'Hello, world!' },
{ role: 'assistant', content: 'Hi there!' }
];
const result = convertToGrokCliMessages(messages);
expect(result).toEqual([
{ role: 'user', content: 'Hello, world!' },
{ role: 'assistant', content: 'Hi there!' }
]);
});
it('should convert array content messages', () => {
const messages = [
{
role: 'user',
content: [
{ type: 'text', text: 'Hello' },
{ type: 'text', text: 'World' }
]
}
];
const result = convertToGrokCliMessages(messages);
expect(result).toEqual([{ role: 'user', content: 'Hello\nWorld' }]);
});
it('should convert object content messages', () => {
const messages = [
{
role: 'user',
content: { text: 'Hello from object' }
}
];
const result = convertToGrokCliMessages(messages);
expect(result).toEqual([{ role: 'user', content: 'Hello from object' }]);
});
});
describe('convertFromGrokCliResponse', () => {
it('should parse JSONL response format', () => {
const responseText = `{"role": "assistant", "content": "Hello there!", "usage": {"prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15}}`;
const result = convertFromGrokCliResponse(responseText);
expect(result).toEqual({
text: 'Hello there!',
usage: {
promptTokens: 10,
completionTokens: 5,
totalTokens: 15
}
});
});
it('should handle multiple lines in JSONL format', () => {
const responseText = `{"role": "user", "content": "Hello"}
{"role": "assistant", "content": "Hi there!", "usage": {"prompt_tokens": 5, "completion_tokens": 3}}`;
const result = convertFromGrokCliResponse(responseText);
expect(result).toEqual({
text: 'Hi there!',
usage: {
promptTokens: 5,
completionTokens: 3,
totalTokens: 0
}
});
});
it('should fallback to raw text when parsing fails', () => {
const responseText = 'Invalid JSON response';
const result = convertFromGrokCliResponse(responseText);
expect(result).toEqual({
text: 'Invalid JSON response',
usage: undefined
});
});
});
describe('createPromptFromMessages', () => {
it('should create formatted prompt from messages', () => {
const messages = [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'What is 2+2?' },
{ role: 'assistant', content: '2+2 equals 4.' }
];
const result = createPromptFromMessages(messages);
expect(result).toBe(
'System: You are a helpful assistant.\n\nUser: What is 2+2?\n\nAssistant: 2+2 equals 4.'
);
});
it('should handle custom role names', () => {
const messages = [{ role: 'custom', content: 'Custom message' }];
const result = createPromptFromMessages(messages);
expect(result).toBe('custom: Custom message');
});
it('should trim whitespace from message content', () => {
const messages = [
{ role: 'user', content: ' Hello with spaces ' },
{ role: 'assistant', content: '\n\nResponse with newlines\n\n' }
];
const result = createPromptFromMessages(messages);
expect(result).toBe(
'User: Hello with spaces\n\nAssistant: Response with newlines'
);
});
});
describe('escapeShellArg', () => {
it('should escape single quotes', () => {
const arg = "It's a test";
const result = escapeShellArg(arg);
expect(result).toBe("'It'\\''s a test'");
});
it('should handle strings without special characters', () => {
const arg = 'simple string';
const result = escapeShellArg(arg);
expect(result).toBe("'simple string'");
});
it('should convert non-string values to strings', () => {
const arg = 123;
const result = escapeShellArg(arg);
expect(result).toBe("'123'");
});
it('should handle empty strings', () => {
const arg = '';
const result = escapeShellArg(arg);
expect(result).toBe("''");
});
});

View File

@@ -1,17 +1,28 @@
/**
* @fileoverview Message format conversion utilities for Grok CLI provider
* Message format conversion utilities for Grok CLI provider
*/
import type { GrokCliMessage, GrokCliResponse } from './types.js';
/**
* @typedef {import('./types.js').GrokCliMessage} GrokCliMessage
* AI SDK message type (simplified interface)
*/
interface AISDKMessage {
role: string;
content:
| string
| Array<{ type: string; text?: string }>
| { text?: string; [key: string]: unknown };
}
/**
* Convert AI SDK messages to Grok CLI compatible format
* @param {Array<Object>} messages - AI SDK message array
* @returns {Array<GrokCliMessage>} Grok CLI compatible messages
* @param messages - AI SDK message array
* @returns Grok CLI compatible messages
*/
export function convertToGrokCliMessages(messages) {
export function convertToGrokCliMessages(
messages: AISDKMessage[]
): GrokCliMessage[] {
return messages.map((message) => {
// Handle different message content types
let content = '';
@@ -22,7 +33,7 @@ export function convertToGrokCliMessages(messages) {
// Handle multi-part content (text and images)
content = message.content
.filter((part) => part.type === 'text')
.map((part) => part.text)
.map((part) => part.text || '')
.join('\n');
} else if (message.content && typeof message.content === 'object') {
// Handle object content
@@ -38,10 +49,17 @@ export function convertToGrokCliMessages(messages) {
/**
* Convert Grok CLI response to AI SDK format
* @param {string} responseText - Raw response text from Grok CLI (JSONL format)
* @returns {Object} AI SDK compatible response object
* @param responseText - Raw response text from Grok CLI (JSONL format)
* @returns AI SDK compatible response object
*/
export function convertFromGrokCliResponse(responseText) {
export function convertFromGrokCliResponse(responseText: string): {
text: string;
usage?: {
promptTokens: number;
completionTokens: number;
totalTokens: number;
};
} {
try {
// Grok CLI outputs JSONL format - each line is a separate JSON message
const lines = responseText
@@ -50,10 +68,10 @@ export function convertFromGrokCliResponse(responseText) {
.filter((line) => line.trim());
// Parse each line as JSON and find assistant messages
const messages = [];
const messages: GrokCliResponse[] = [];
for (const line of lines) {
try {
const message = JSON.parse(line);
const message = JSON.parse(line) as GrokCliResponse;
messages.push(message);
} catch (parseError) {
// Skip invalid JSON lines
@@ -95,10 +113,10 @@ export function convertFromGrokCliResponse(responseText) {
/**
* Create a prompt string for Grok CLI from messages
* @param {Array<Object>} messages - AI SDK message array
* @returns {string} Formatted prompt string
* @param messages - AI SDK message array
* @returns Formatted prompt string
*/
export function createPromptFromMessages(messages) {
export function createPromptFromMessages(messages: AISDKMessage[]): string {
const grokMessages = convertToGrokCliMessages(messages);
// Create a conversation-style prompt
@@ -122,14 +140,14 @@ export function createPromptFromMessages(messages) {
/**
* Escape shell arguments for safe CLI execution
* @param {string} arg - Argument to escape
* @returns {string} Shell-escaped argument
* @param arg - Argument to escape
* @returns Shell-escaped argument
*/
export function escapeShellArg(arg) {
export function escapeShellArg(arg: string | unknown): string {
if (typeof arg !== 'string') {
arg = String(arg);
}
// Replace single quotes with '\''
return "'" + arg.replace(/'/g, "'\\''") + "'";
return "'" + (arg as string).replace(/'/g, "'\\''") + "'";
}

View File

@@ -0,0 +1,81 @@
/**
* Type definitions for Grok CLI provider
*/
/**
* Settings for configuring Grok CLI behavior
*/
export interface GrokCliSettings {
/** API key for Grok CLI */
apiKey?: string;
/** Base URL for Grok API */
baseURL?: string;
/** Default model to use */
model?: string;
/** Timeout in milliseconds */
timeout?: number;
/** Working directory for CLI commands */
workingDirectory?: string;
}
/**
* Model identifiers supported by Grok CLI
*/
export type GrokCliModelId = string;
/**
* Error metadata for Grok CLI operations
*/
export interface GrokCliErrorMetadata {
/** Error code */
code?: string;
/** Process exit code */
exitCode?: number;
/** Standard error output */
stderr?: string;
/** Standard output */
stdout?: string;
/** Excerpt of the prompt that caused the error */
promptExcerpt?: string;
/** Timeout value in milliseconds */
timeoutMs?: number;
}
/**
* Message format for Grok CLI communication
*/
export interface GrokCliMessage {
/** Message role (user, assistant, system) */
role: string;
/** Message content */
content: string;
}
/**
* Response format from Grok CLI
*/
export interface GrokCliResponse {
/** Message role */
role: string;
/** Response content */
content: string;
/** Token usage information */
usage?: {
/** Input tokens used */
prompt_tokens?: number;
/** Output tokens used */
completion_tokens?: number;
/** Total tokens used */
total_tokens?: number;
};
}
/**
* Configuration options for Grok CLI language model
*/
export interface GrokCliLanguageModelOptions {
/** Model identifier */
id: GrokCliModelId;
/** Model settings */
settings?: GrokCliSettings;
}

View File

@@ -0,0 +1,36 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "ESNext",
"lib": ["ES2022"],
"declaration": true,
"declarationMap": true,
"sourceMap": true,
"outDir": "./dist",
"baseUrl": ".",
"rootDir": "./src",
"strict": true,
"noImplicitAny": true,
"strictNullChecks": true,
"strictFunctionTypes": true,
"strictBindCallApply": true,
"strictPropertyInitialization": true,
"noImplicitThis": true,
"alwaysStrict": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"noImplicitReturns": true,
"noFallthroughCasesInSwitch": true,
"esModuleInterop": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true,
"moduleResolution": "bundler",
"moduleDetection": "force",
"types": ["node"],
"resolveJsonModule": true,
"isolatedModules": true,
"allowImportingTsExtensions": false
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist", "tests", "**/*.test.ts", "**/*.spec.ts"]
}

View File

@@ -20,8 +20,7 @@
"typecheck": "tsc --noEmit"
},
"devDependencies": {
"dotenv-mono": "^1.5.1",
"typescript": "^5.7.3"
"typescript": "^5.9.2"
},
"dependencies": {
"tsup": "^8.5.0"

View File

@@ -43,9 +43,9 @@ export const baseConfig: Partial<UserConfig> = {
export function mergeConfig(
base: Partial<UserConfig>,
overrides: Partial<UserConfig>
): Partial<UserConfig> {
): UserConfig {
return {
...base,
...overrides
};
} as UserConfig;
}

View File

@@ -31,21 +31,13 @@
},
"dependencies": {
"@supabase/supabase-js": "^2.57.4",
"zod": "^3.23.8"
"zod": "^4.1.11"
},
"devDependencies": {
"@biomejs/biome": "^1.9.4",
"@tm/build-config": "*",
"@types/node": "^22.10.5",
"@vitest/coverage-v8": "^2.0.5",
"dotenv-mono": "^1.5.1",
"ts-node": "^10.9.2",
"tsup": "^8.5.0",
"typescript": "^5.7.3",
"vitest": "^2.1.8"
},
"engines": {
"node": ">=18.0.0"
"@vitest/coverage-v8": "^3.2.4",
"typescript": "^5.9.2",
"vitest": "^3.2.4"
},
"files": ["src", "README.md", "CHANGELOG.md"],
"keywords": ["task-management", "typescript", "ai", "prd", "parser"],

View File

@@ -33,6 +33,9 @@ export class TaskEntity implements Task {
tags?: string[];
assignee?: string;
complexity?: Task['complexity'];
recommendedSubtasks?: number;
expansionPrompt?: string;
complexityReasoning?: string;
constructor(data: Task | (Omit<Task, 'id'> & { id: number | string })) {
this.validate(data);
@@ -50,7 +53,7 @@ export class TaskEntity implements Task {
// Normalize subtask IDs to strings
this.subtasks = (data.subtasks || []).map((subtask) => ({
...subtask,
id: Number(subtask.id), // Keep subtask IDs as numbers per interface
id: String(subtask.id),
parentId: String(subtask.parentId)
}));
@@ -62,6 +65,9 @@ export class TaskEntity implements Task {
this.tags = data.tags;
this.assignee = data.assignee;
this.complexity = data.complexity;
this.recommendedSubtasks = data.recommendedSubtasks;
this.expansionPrompt = data.expansionPrompt;
this.complexityReasoning = data.complexityReasoning;
}
/**
@@ -246,7 +252,10 @@ export class TaskEntity implements Task {
actualEffort: this.actualEffort,
tags: this.tags,
assignee: this.assignee,
complexity: this.complexity
complexity: this.complexity,
recommendedSubtasks: this.recommendedSubtasks,
expansionPrompt: this.expansionPrompt,
complexityReasoning: this.complexityReasoning
};
}

View File

@@ -51,7 +51,8 @@ export const ERROR_CODES = {
INTERNAL_ERROR: 'INTERNAL_ERROR',
INVALID_INPUT: 'INVALID_INPUT',
NOT_IMPLEMENTED: 'NOT_IMPLEMENTED',
UNKNOWN_ERROR: 'UNKNOWN_ERROR'
UNKNOWN_ERROR: 'UNKNOWN_ERROR',
NOT_FOUND: 'NOT_FOUND'
} as const;
export type ErrorCode = (typeof ERROR_CODES)[keyof typeof ERROR_CODES];

View File

@@ -11,7 +11,9 @@ export {
type ListTasksResult,
type StartTaskOptions,
type StartTaskResult,
type ConflictCheckResult
type ConflictCheckResult,
type ExportTasksOptions,
type ExportResult
} from './task-master-core.js';
// Re-export types
@@ -61,3 +63,12 @@ export { getLogger, createLogger, setGlobalLogger } from './logger/index.js';
// Re-export executors
export * from './executors/index.js';
// Re-export reports
export {
ComplexityReportManager,
type ComplexityReport,
type ComplexityReportMetadata,
type ComplexityAnalysis,
type TaskComplexityData
} from './reports/index.js';

View File

@@ -3,7 +3,27 @@
* This file defines the contract for all storage implementations
*/
import type { Task, TaskMetadata } from '../types/index.js';
import type { Task, TaskMetadata, TaskStatus } from '../types/index.js';
/**
* Options for loading tasks from storage
*/
export interface LoadTasksOptions {
/** Filter tasks by status */
status?: TaskStatus;
/** Exclude subtasks from loaded tasks (default: false) */
excludeSubtasks?: boolean;
}
/**
* Result type for updateTaskStatus operations
*/
export interface UpdateStatusResult {
success: boolean;
oldStatus: TaskStatus;
newStatus: TaskStatus;
taskId: string;
}
/**
* Interface for storage operations on tasks
@@ -11,11 +31,12 @@ import type { Task, TaskMetadata } from '../types/index.js';
*/
export interface IStorage {
/**
* Load all tasks from storage, optionally filtered by tag
* Load all tasks from storage, optionally filtered by tag and other criteria
* @param tag - Optional tag to filter tasks by
* @param options - Optional filtering options (status, excludeSubtasks)
* @returns Promise that resolves to an array of tasks
*/
loadTasks(tag?: string): Promise<Task[]>;
loadTasks(tag?: string, options?: LoadTasksOptions): Promise<Task[]>;
/**
* Load a single task by ID
@@ -54,6 +75,19 @@ export interface IStorage {
tag?: string
): Promise<void>;
/**
* Update task or subtask status by ID
* @param taskId - ID of the task or subtask (e.g., "1" or "1.2")
* @param newStatus - New status to set
* @param tag - Optional tag context for the task
* @returns Promise that resolves to update result with old and new status
*/
updateTaskStatus(
taskId: string,
newStatus: TaskStatus,
tag?: string
): Promise<UpdateStatusResult>;
/**
* Delete a task by ID
* @param taskId - ID of the task to delete
@@ -182,7 +216,7 @@ export abstract class BaseStorage implements IStorage {
}
// Abstract methods that must be implemented by concrete classes
abstract loadTasks(tag?: string): Promise<Task[]>;
abstract loadTasks(tag?: string, options?: LoadTasksOptions): Promise<Task[]>;
abstract loadTask(taskId: string, tag?: string): Promise<Task | null>;
abstract saveTasks(tasks: Task[], tag?: string): Promise<void>;
abstract appendTasks(tasks: Task[], tag?: string): Promise<void>;
@@ -191,6 +225,11 @@ export abstract class BaseStorage implements IStorage {
updates: Partial<Task>,
tag?: string
): Promise<void>;
abstract updateTaskStatus(
taskId: string,
newStatus: TaskStatus,
tag?: string
): Promise<UpdateStatusResult>;
abstract deleteTask(taskId: string, tag?: string): Promise<void>;
abstract exists(tag?: string): Promise<boolean>;
abstract loadMetadata(tag?: string): Promise<TaskMetadata | null>;

View File

@@ -0,0 +1,148 @@
import { describe, it, expect, vi } from 'vitest';
import { TaskMapper } from './TaskMapper.js';
import type { Tables } from '../types/database.types.js';
type TaskRow = Tables<'tasks'>;
describe('TaskMapper', () => {
describe('extractMetadataField', () => {
it('should extract string field from metadata', () => {
const taskRow: TaskRow = {
id: '123',
display_id: '1',
title: 'Test Task',
description: 'Test description',
status: 'todo',
priority: 'medium',
parent_task_id: null,
subtask_position: 0,
created_at: new Date().toISOString(),
updated_at: new Date().toISOString(),
metadata: {
details: 'Some details',
testStrategy: 'Test with unit tests'
},
complexity: null,
assignee_id: null,
estimated_hours: null,
actual_hours: null,
due_date: null,
completed_at: null
};
const task = TaskMapper.mapDatabaseTaskToTask(taskRow, [], new Map());
expect(task.details).toBe('Some details');
expect(task.testStrategy).toBe('Test with unit tests');
});
it('should use default value when metadata field is missing', () => {
const taskRow: TaskRow = {
id: '123',
display_id: '1',
title: 'Test Task',
description: 'Test description',
status: 'todo',
priority: 'medium',
parent_task_id: null,
subtask_position: 0,
created_at: new Date().toISOString(),
updated_at: new Date().toISOString(),
metadata: {},
complexity: null,
assignee_id: null,
estimated_hours: null,
actual_hours: null,
due_date: null,
completed_at: null
};
const task = TaskMapper.mapDatabaseTaskToTask(taskRow, [], new Map());
expect(task.details).toBe('');
expect(task.testStrategy).toBe('');
});
it('should use default value when metadata is null', () => {
const taskRow: TaskRow = {
id: '123',
display_id: '1',
title: 'Test Task',
description: 'Test description',
status: 'todo',
priority: 'medium',
parent_task_id: null,
subtask_position: 0,
created_at: new Date().toISOString(),
updated_at: new Date().toISOString(),
metadata: null,
complexity: null,
assignee_id: null,
estimated_hours: null,
actual_hours: null,
due_date: null,
completed_at: null
};
const task = TaskMapper.mapDatabaseTaskToTask(taskRow, [], new Map());
expect(task.details).toBe('');
expect(task.testStrategy).toBe('');
});
it('should use default value and warn when metadata field has wrong type', () => {
const consoleWarnSpy = vi
.spyOn(console, 'warn')
.mockImplementation(() => {});
const taskRow: TaskRow = {
id: '123',
display_id: '1',
title: 'Test Task',
description: 'Test description',
status: 'todo',
priority: 'medium',
parent_task_id: null,
subtask_position: 0,
created_at: new Date().toISOString(),
updated_at: new Date().toISOString(),
metadata: {
details: 12345, // Wrong type: number instead of string
testStrategy: ['test1', 'test2'] // Wrong type: array instead of string
},
complexity: null,
assignee_id: null,
estimated_hours: null,
actual_hours: null,
due_date: null,
completed_at: null
};
const task = TaskMapper.mapDatabaseTaskToTask(taskRow, [], new Map());
// Should use empty string defaults when type doesn't match
expect(task.details).toBe('');
expect(task.testStrategy).toBe('');
// Should have logged warnings
expect(consoleWarnSpy).toHaveBeenCalledWith(
expect.stringContaining('Type mismatch in metadata field "details"')
);
expect(consoleWarnSpy).toHaveBeenCalledWith(
expect.stringContaining(
'Type mismatch in metadata field "testStrategy"'
)
);
consoleWarnSpy.mockRestore();
});
});
describe('mapStatus', () => {
it('should map database status to internal status', () => {
expect(TaskMapper.mapStatus('todo')).toBe('pending');
expect(TaskMapper.mapStatus('in_progress')).toBe('in-progress');
expect(TaskMapper.mapStatus('done')).toBe('done');
});
});
});

View File

@@ -2,22 +2,32 @@ import { Task, Subtask } from '../types/index.js';
import { Database, Tables } from '../types/database.types.js';
type TaskRow = Tables<'tasks'>;
type DependencyRow = Tables<'task_dependencies'>;
// Legacy type for backward compatibility
type DependencyRow = Tables<'task_dependencies'> & {
depends_on_task?: { display_id: string } | null;
depends_on_task_id?: string;
};
export class TaskMapper {
/**
* Maps database tasks to internal Task format
* @param dbTasks - Array of tasks from database
* @param dependencies - Either a Map of task_id to display_ids or legacy array format
*/
static mapDatabaseTasksToTasks(
dbTasks: TaskRow[],
dbDependencies: DependencyRow[]
dependencies: Map<string, string[]> | DependencyRow[]
): Task[] {
if (!dbTasks || dbTasks.length === 0) {
return [];
}
// Group dependencies by task_id
const dependenciesByTaskId = this.groupDependenciesByTaskId(dbDependencies);
// Handle both Map and array formats for backward compatibility
const dependenciesByTaskId =
dependencies instanceof Map
? dependencies
: this.groupDependenciesByTaskId(dependencies);
// Separate parent tasks and subtasks
const parentTasks = dbTasks.filter((t) => !t.parent_task_id);
@@ -43,21 +53,23 @@ export class TaskMapper {
): Task {
// Map subtasks
const subtasks: Subtask[] = dbSubtasks.map((subtask, index) => ({
id: index + 1, // Use numeric ID for subtasks
id: subtask.display_id || String(index + 1), // Use display_id if available (API storage), fallback to numeric (file storage)
parentId: dbTask.id,
title: subtask.title,
description: subtask.description || '',
status: this.mapStatus(subtask.status),
priority: this.mapPriority(subtask.priority),
dependencies: dependenciesByTaskId.get(subtask.id) || [],
details: (subtask.metadata as any)?.details || '',
testStrategy: (subtask.metadata as any)?.testStrategy || '',
details: this.extractMetadataField(subtask.metadata, 'details', ''),
testStrategy: this.extractMetadataField(
subtask.metadata,
'testStrategy',
''
),
createdAt: subtask.created_at,
updatedAt: subtask.updated_at,
assignee: subtask.assignee_id || undefined,
complexity: subtask.complexity
? this.mapComplexityToInternal(subtask.complexity)
: undefined
complexity: subtask.complexity ?? undefined
}));
return {
@@ -67,22 +79,25 @@ export class TaskMapper {
status: this.mapStatus(dbTask.status),
priority: this.mapPriority(dbTask.priority),
dependencies: dependenciesByTaskId.get(dbTask.id) || [],
details: (dbTask.metadata as any)?.details || '',
testStrategy: (dbTask.metadata as any)?.testStrategy || '',
details: this.extractMetadataField(dbTask.metadata, 'details', ''),
testStrategy: this.extractMetadataField(
dbTask.metadata,
'testStrategy',
''
),
subtasks,
createdAt: dbTask.created_at,
updatedAt: dbTask.updated_at,
assignee: dbTask.assignee_id || undefined,
complexity: dbTask.complexity
? this.mapComplexityToInternal(dbTask.complexity)
: undefined,
complexity: dbTask.complexity ?? undefined,
effort: dbTask.estimated_hours || undefined,
actualEffort: dbTask.actual_hours || undefined
};
}
/**
* Groups dependencies by task ID
* Groups dependencies by task ID (legacy method for backward compatibility)
* @deprecated Use DependencyFetcher.fetchDependenciesWithDisplayIds instead
*/
private static groupDependenciesByTaskId(
dependencies: DependencyRow[]
@@ -92,7 +107,14 @@ export class TaskMapper {
if (dependencies) {
for (const dep of dependencies) {
const deps = dependenciesByTaskId.get(dep.task_id) || [];
deps.push(dep.depends_on_task_id);
// Handle both old format (UUID string) and new format (object with display_id)
const dependencyId =
typeof dep.depends_on_task === 'object'
? dep.depends_on_task?.display_id
: dep.depends_on_task_id;
if (dependencyId) {
deps.push(dependencyId);
}
dependenciesByTaskId.set(dep.task_id, deps);
}
}
@@ -157,14 +179,38 @@ export class TaskMapper {
}
/**
* Maps numeric complexity to descriptive complexity
* Safely extracts a field from metadata JSON with runtime type validation
* @param metadata The metadata object (could be null or any type)
* @param field The field to extract
* @param defaultValue Default value if field doesn't exist
* @returns The extracted value if it matches the expected type, otherwise defaultValue
*/
private static mapComplexityToInternal(
complexity: number
): Task['complexity'] {
if (complexity <= 2) return 'simple';
if (complexity <= 5) return 'moderate';
if (complexity <= 8) return 'complex';
return 'very-complex';
private static extractMetadataField<T>(
metadata: unknown,
field: string,
defaultValue: T
): T {
if (!metadata || typeof metadata !== 'object') {
return defaultValue;
}
const value = (metadata as Record<string, unknown>)[field];
if (value === undefined) {
return defaultValue;
}
// Runtime type validation: ensure value matches the type of defaultValue
const expectedType = typeof defaultValue;
const actualType = typeof value;
if (expectedType !== actualType) {
console.warn(
`Type mismatch in metadata field "${field}": expected ${expectedType}, got ${actualType}. Using default value.`
);
return defaultValue;
}
return value as T;
}
}

View File

@@ -0,0 +1,185 @@
/**
* @fileoverview ComplexityReportManager - Handles loading and managing complexity analysis reports
* Follows the same pattern as ConfigManager and AuthManager
*/
import { promises as fs } from 'fs';
import path from 'path';
import type {
ComplexityReport,
ComplexityAnalysis,
TaskComplexityData
} from './types.js';
import { getLogger } from '../logger/index.js';
const logger = getLogger('ComplexityReportManager');
/**
* Manages complexity analysis reports
* Handles loading, caching, and providing complexity data for tasks
*/
export class ComplexityReportManager {
private projectRoot: string;
private reportCache: Map<string, ComplexityReport> = new Map();
constructor(projectRoot: string) {
this.projectRoot = projectRoot;
}
/**
* Get the path to the complexity report file for a given tag
*/
private getReportPath(tag?: string): string {
const reportsDir = path.join(this.projectRoot, '.taskmaster', 'reports');
const tagSuffix = tag && tag !== 'master' ? `_${tag}` : '';
return path.join(reportsDir, `task-complexity-report${tagSuffix}.json`);
}
/**
* Load complexity report for a given tag
* Results are cached to avoid repeated file reads
*/
async loadReport(tag?: string): Promise<ComplexityReport | null> {
const resolvedTag = tag || 'master';
const cacheKey = resolvedTag;
// Check cache first
if (this.reportCache.has(cacheKey)) {
return this.reportCache.get(cacheKey)!;
}
const reportPath = this.getReportPath(tag);
try {
// Check if file exists
await fs.access(reportPath);
// Read and parse the report
const content = await fs.readFile(reportPath, 'utf-8');
const report = JSON.parse(content) as ComplexityReport;
// Validate basic structure
if (!report.meta || !Array.isArray(report.complexityAnalysis)) {
logger.warn(
`Invalid complexity report structure at ${reportPath}, ignoring`
);
return null;
}
// Cache the report
this.reportCache.set(cacheKey, report);
logger.debug(
`Loaded complexity report for tag '${resolvedTag}' with ${report.complexityAnalysis.length} analyses`
);
return report;
} catch (error: any) {
if (error.code === 'ENOENT') {
// File doesn't exist - this is normal, not all projects have complexity reports
logger.debug(`No complexity report found for tag '${resolvedTag}'`);
return null;
}
// Other errors (parsing, permissions, etc.)
logger.warn(
`Failed to load complexity report for tag '${resolvedTag}': ${error.message}`
);
return null;
}
}
/**
* Get complexity data for a specific task ID
*/
async getComplexityForTask(
taskId: string | number,
tag?: string
): Promise<TaskComplexityData | null> {
const report = await this.loadReport(tag);
if (!report) {
return null;
}
// Find the analysis for this task
const analysis = report.complexityAnalysis.find(
(a) => String(a.taskId) === String(taskId)
);
if (!analysis) {
return null;
}
// Convert to TaskComplexityData format
return {
complexityScore: analysis.complexityScore,
recommendedSubtasks: analysis.recommendedSubtasks,
expansionPrompt: analysis.expansionPrompt,
complexityReasoning: analysis.complexityReasoning
};
}
/**
* Get complexity data for multiple tasks at once
* More efficient than calling getComplexityForTask multiple times
*/
async getComplexityForTasks(
taskIds: (string | number)[],
tag?: string
): Promise<Map<string, TaskComplexityData>> {
const result = new Map<string, TaskComplexityData>();
const report = await this.loadReport(tag);
if (!report) {
return result;
}
// Create a map for fast lookups
const analysisMap = new Map<string, ComplexityAnalysis>();
report.complexityAnalysis.forEach((analysis) => {
analysisMap.set(String(analysis.taskId), analysis);
});
// Map each task ID to its complexity data
taskIds.forEach((taskId) => {
const analysis = analysisMap.get(String(taskId));
if (analysis) {
result.set(String(taskId), {
complexityScore: analysis.complexityScore,
recommendedSubtasks: analysis.recommendedSubtasks,
expansionPrompt: analysis.expansionPrompt,
complexityReasoning: analysis.complexityReasoning
});
}
});
return result;
}
/**
* Clear the report cache
* @param tag - Specific tag to clear, or undefined to clear all cached reports
* Useful when reports are regenerated or modified externally
*/
clearCache(tag?: string): void {
if (tag) {
this.reportCache.delete(tag);
} else {
// Clear all cached reports
this.reportCache.clear();
}
}
/**
* Check if a complexity report exists for a tag
*/
async hasReport(tag?: string): Promise<boolean> {
const reportPath = this.getReportPath(tag);
try {
await fs.access(reportPath);
return true;
} catch {
return false;
}
}
}

View File

@@ -0,0 +1,11 @@
/**
* @fileoverview Reports module exports
*/
export { ComplexityReportManager } from './complexity-report-manager.js';
export type {
ComplexityReport,
ComplexityReportMetadata,
ComplexityAnalysis,
TaskComplexityData
} from './types.js';

View File

@@ -0,0 +1,65 @@
/**
* @fileoverview Type definitions for complexity analysis reports
*/
/**
* Analysis result for a single task
*/
export interface ComplexityAnalysis {
/** Task ID being analyzed */
taskId: string | number;
/** Task title */
taskTitle: string;
/** Complexity score (1-10 scale) */
complexityScore: number;
/** Recommended number of subtasks */
recommendedSubtasks: number;
/** AI-generated prompt for task expansion */
expansionPrompt: string;
/** Reasoning behind the complexity assessment */
complexityReasoning: string;
}
/**
* Metadata about the complexity report
*/
export interface ComplexityReportMetadata {
/** When the report was generated */
generatedAt: string;
/** Number of tasks analyzed in this run */
tasksAnalyzed: number;
/** Total number of tasks in the file */
totalTasks?: number;
/** Total analyses in the report (across all runs) */
analysisCount?: number;
/** Complexity threshold score used */
thresholdScore: number;
/** Project name */
projectName?: string;
/** Whether research mode was used */
usedResearch: boolean;
}
/**
* Complete complexity analysis report
*/
export interface ComplexityReport {
/** Report metadata */
meta: ComplexityReportMetadata;
/** Array of complexity analyses */
complexityAnalysis: ComplexityAnalysis[];
}
/**
* Complexity data to be attached to a Task
*/
export interface TaskComplexityData {
/** Complexity score (1-10 scale) */
complexityScore?: number;
/** Recommended number of subtasks */
recommendedSubtasks?: number;
/** AI-generated expansion prompt */
expansionPrompt?: string;
/** Reasoning behind the assessment */
complexityReasoning?: string;
}

View File

@@ -1,224 +0,0 @@
import { SupabaseClient } from '@supabase/supabase-js';
import { Task } from '../types/index.js';
import { Database } from '../types/database.types.js';
import { TaskMapper } from '../mappers/TaskMapper.js';
import { AuthManager } from '../auth/auth-manager.js';
import { z } from 'zod';
// Zod schema for task status validation
const TaskStatusSchema = z.enum([
'pending',
'in-progress',
'done',
'review',
'deferred',
'cancelled',
'blocked'
]);
// Zod schema for task updates
const TaskUpdateSchema = z
.object({
title: z.string().min(1).optional(),
description: z.string().optional(),
status: TaskStatusSchema.optional(),
priority: z.enum(['low', 'medium', 'high', 'critical']).optional(),
details: z.string().optional(),
testStrategy: z.string().optional()
})
.partial();
export class SupabaseTaskRepository {
constructor(private supabase: SupabaseClient<Database>) {}
async getTasks(_projectId?: string): Promise<Task[]> {
// Get the current context to determine briefId
const authManager = AuthManager.getInstance();
const context = authManager.getContext();
if (!context || !context.briefId) {
throw new Error(
'No brief selected. Please select a brief first using: tm context brief'
);
}
// Get all tasks for the brief using the exact query structure
const { data: tasks, error } = await this.supabase
.from('tasks')
.select(`
*,
document:document_id (
id,
document_name,
title,
description
)
`)
.eq('brief_id', context.briefId)
.order('position', { ascending: true })
.order('subtask_position', { ascending: true })
.order('created_at', { ascending: true });
if (error) {
throw new Error(`Failed to fetch tasks: ${error.message}`);
}
if (!tasks || tasks.length === 0) {
return [];
}
// Get all dependencies for these tasks
const taskIds = tasks.map((t: any) => t.id);
const { data: depsData, error: depsError } = await this.supabase
.from('task_dependencies')
.select('*')
.in('task_id', taskIds);
if (depsError) {
throw new Error(
`Failed to fetch task dependencies: ${depsError.message}`
);
}
// Use mapper to convert to internal format
return TaskMapper.mapDatabaseTasksToTasks(tasks, depsData || []);
}
async getTask(_projectId: string, taskId: string): Promise<Task | null> {
// Get the current context to determine briefId (projectId not used in Supabase context)
const authManager = AuthManager.getInstance();
const context = authManager.getContext();
if (!context || !context.briefId) {
throw new Error(
'No brief selected. Please select a brief first using: tm context brief'
);
}
const { data, error } = await this.supabase
.from('tasks')
.select('*')
.eq('brief_id', context.briefId)
.eq('display_id', taskId.toUpperCase())
.single();
if (error) {
if (error.code === 'PGRST116') {
return null; // Not found
}
throw new Error(`Failed to fetch task: ${error.message}`);
}
// Get dependencies for this task
const { data: depsData } = await this.supabase
.from('task_dependencies')
.select('*')
.eq('task_id', taskId);
// Get subtasks if this is a parent task
const { data: subtasksData } = await this.supabase
.from('tasks')
.select('*')
.eq('parent_task_id', taskId)
.order('subtask_position', { ascending: true });
// Create dependency map
const dependenciesByTaskId = new Map<string, string[]>();
if (depsData) {
dependenciesByTaskId.set(
taskId,
depsData.map(
(d: Database['public']['Tables']['task_dependencies']['Row']) =>
d.depends_on_task_id
)
);
}
// Use mapper to convert single task
return TaskMapper.mapDatabaseTaskToTask(
data,
subtasksData || [],
dependenciesByTaskId
);
}
async updateTask(
projectId: string,
taskId: string,
updates: Partial<Task>
): Promise<Task> {
// Get the current context to determine briefId
const authManager = AuthManager.getInstance();
const context = authManager.getContext();
if (!context || !context.briefId) {
throw new Error(
'No brief selected. Please select a brief first using: tm context brief'
);
}
// Validate updates using Zod schema
try {
TaskUpdateSchema.parse(updates);
} catch (error) {
if (error instanceof z.ZodError) {
const errorMessages = error.errors
.map((err) => `${err.path.join('.')}: ${err.message}`)
.join(', ');
throw new Error(`Invalid task update data: ${errorMessages}`);
}
throw error;
}
// Convert Task fields to database fields - only include fields that actually exist in the database
const dbUpdates: any = {};
if (updates.title !== undefined) dbUpdates.title = updates.title;
if (updates.description !== undefined)
dbUpdates.description = updates.description;
if (updates.status !== undefined)
dbUpdates.status = this.mapStatusToDatabase(updates.status);
if (updates.priority !== undefined) dbUpdates.priority = updates.priority;
// Skip fields that don't exist in database schema: details, testStrategy, etc.
// Update the task
const { error } = await this.supabase
.from('tasks')
.update(dbUpdates)
.eq('brief_id', context.briefId)
.eq('display_id', taskId.toUpperCase());
if (error) {
throw new Error(`Failed to update task: ${error.message}`);
}
// Return the updated task by fetching it
const updatedTask = await this.getTask(projectId, taskId);
if (!updatedTask) {
throw new Error(`Failed to retrieve updated task ${taskId}`);
}
return updatedTask;
}
/**
* Maps internal status to database status
*/
private mapStatusToDatabase(
status: string
): Database['public']['Enums']['task_status'] {
switch (status) {
case 'pending':
return 'todo';
case 'in-progress':
case 'in_progress': // Accept both formats
return 'in_progress';
case 'done':
return 'done';
default:
throw new Error(
`Invalid task status: ${status}. Valid statuses are: pending, in-progress, done`
);
}
}
}

View File

@@ -0,0 +1,68 @@
import { SupabaseClient } from '@supabase/supabase-js';
import { Database } from '../../types/database.types.js';
import { DependencyWithDisplayId } from '../../types/repository-types.js';
/**
* Handles fetching and processing of task dependencies with display_ids
*/
export class DependencyFetcher {
constructor(private supabase: SupabaseClient<Database>) {}
/**
* Fetches dependencies for given task IDs with display_ids joined
* @param taskIds Array of task IDs to fetch dependencies for
* @returns Map of task ID to array of dependency display_ids
*/
async fetchDependenciesWithDisplayIds(
taskIds: string[]
): Promise<Map<string, string[]>> {
if (!taskIds || taskIds.length === 0) {
return new Map();
}
const { data, error } = await this.supabase
.from('task_dependencies')
.select(`
task_id,
depends_on_task:tasks!task_dependencies_depends_on_task_id_fkey (
display_id
)
`)
.in('task_id', taskIds);
if (error) {
throw new Error(`Failed to fetch task dependencies: ${error.message}`);
}
return this.processDependencyData(data as DependencyWithDisplayId[]);
}
/**
* Processes raw dependency data into a map structure
*/
private processDependencyData(
dependencies: DependencyWithDisplayId[]
): Map<string, string[]> {
const dependenciesByTaskId = new Map<string, string[]>();
if (!dependencies) {
return dependenciesByTaskId;
}
for (const dep of dependencies) {
if (!dep.task_id) continue;
const currentDeps = dependenciesByTaskId.get(dep.task_id) || [];
// Extract display_id from the joined object
const displayId = dep.depends_on_task?.display_id;
if (displayId) {
currentDeps.push(displayId);
}
dependenciesByTaskId.set(dep.task_id, currentDeps);
}
return dependenciesByTaskId;
}
}

View File

@@ -0,0 +1,5 @@
/**
* Supabase repository implementations
*/
export { SupabaseTaskRepository } from './supabase-task-repository.js';
export { DependencyFetcher } from './dependency-fetcher.js';

View File

@@ -0,0 +1,275 @@
import { SupabaseClient } from '@supabase/supabase-js';
import { Task } from '../../types/index.js';
import { Database, Json } from '../../types/database.types.js';
import { TaskMapper } from '../../mappers/TaskMapper.js';
import { AuthManager } from '../../auth/auth-manager.js';
import { DependencyFetcher } from './dependency-fetcher.js';
import {
TaskWithRelations,
TaskDatabaseUpdate
} from '../../types/repository-types.js';
import { LoadTasksOptions } from '../../interfaces/storage.interface.js';
import { z } from 'zod';
// Zod schema for task status validation
const TaskStatusSchema = z.enum([
'pending',
'in-progress',
'done',
'review',
'deferred',
'cancelled',
'blocked'
]);
// Zod schema for task updates
const TaskUpdateSchema = z
.object({
title: z.string().min(1).optional(),
description: z.string().optional(),
status: TaskStatusSchema.optional(),
priority: z.enum(['low', 'medium', 'high', 'critical']).optional(),
details: z.string().optional(),
testStrategy: z.string().optional()
})
.partial();
export class SupabaseTaskRepository {
private dependencyFetcher: DependencyFetcher;
private authManager: AuthManager;
constructor(private supabase: SupabaseClient<Database>) {
this.dependencyFetcher = new DependencyFetcher(supabase);
this.authManager = AuthManager.getInstance();
}
/**
* Gets the current brief ID from auth context
* @throws {Error} If no brief is selected
*/
private getBriefIdOrThrow(): string {
const context = this.authManager.getContext();
if (!context?.briefId) {
throw new Error(
'No brief selected. Please select a brief first using: tm context brief'
);
}
return context.briefId;
}
async getTasks(
_projectId?: string,
options?: LoadTasksOptions
): Promise<Task[]> {
const briefId = this.getBriefIdOrThrow();
// Build query with filters
let query = this.supabase
.from('tasks')
.select(`
*,
document:document_id (
id,
document_name,
title,
description
)
`)
.eq('brief_id', briefId);
// Apply status filter at database level if specified
if (options?.status) {
const dbStatus = this.mapStatusToDatabase(options.status);
query = query.eq('status', dbStatus);
}
// Apply subtask exclusion at database level if specified
if (options?.excludeSubtasks) {
// Only fetch parent tasks (where parent_task_id is null)
query = query.is('parent_task_id', null);
}
// Execute query with ordering
const { data: tasks, error } = await query
.order('position', { ascending: true })
.order('subtask_position', { ascending: true })
.order('created_at', { ascending: true });
if (error) {
throw new Error(`Failed to fetch tasks: ${error.message}`);
}
if (!tasks || tasks.length === 0) {
return [];
}
// Type-safe task ID extraction
const typedTasks = tasks as TaskWithRelations[];
const taskIds = typedTasks.map((t) => t.id);
const dependenciesMap =
await this.dependencyFetcher.fetchDependenciesWithDisplayIds(taskIds);
// Use mapper to convert to internal format
return TaskMapper.mapDatabaseTasksToTasks(tasks, dependenciesMap);
}
async getTask(_projectId: string, taskId: string): Promise<Task | null> {
const briefId = this.getBriefIdOrThrow();
const { data, error } = await this.supabase
.from('tasks')
.select('*')
.eq('brief_id', briefId)
.eq('display_id', taskId.toUpperCase())
.single();
if (error) {
if (error.code === 'PGRST116') {
return null; // Not found
}
throw new Error(`Failed to fetch task: ${error.message}`);
}
// Get subtasks if this is a parent task
const { data: subtasksData } = await this.supabase
.from('tasks')
.select('*')
.eq('parent_task_id', data.id)
.order('subtask_position', { ascending: true });
// Get all task IDs (parent + subtasks) to fetch dependencies
const allTaskIds = [data.id, ...(subtasksData?.map((st) => st.id) || [])];
// Fetch dependencies using the dedicated fetcher
const dependenciesByTaskId =
await this.dependencyFetcher.fetchDependenciesWithDisplayIds(allTaskIds);
// Use mapper to convert single task
return TaskMapper.mapDatabaseTaskToTask(
data,
subtasksData || [],
dependenciesByTaskId
);
}
async updateTask(
projectId: string,
taskId: string,
updates: Partial<Task>
): Promise<Task> {
const briefId = this.getBriefIdOrThrow();
// Validate updates using Zod schema
try {
TaskUpdateSchema.parse(updates);
} catch (error) {
if (error instanceof z.ZodError) {
const errorMessages = error.issues
.map((err) => `${err.path.join('.')}: ${err.message}`)
.join(', ');
throw new Error(`Invalid task update data: ${errorMessages}`);
}
throw error;
}
// Convert Task fields to database fields with proper typing
const dbUpdates: TaskDatabaseUpdate = {};
if (updates.title !== undefined) dbUpdates.title = updates.title;
if (updates.description !== undefined)
dbUpdates.description = updates.description;
if (updates.status !== undefined)
dbUpdates.status = this.mapStatusToDatabase(updates.status);
if (updates.priority !== undefined)
dbUpdates.priority = this.mapPriorityToDatabase(updates.priority);
// Handle metadata fields (details, testStrategy, etc.)
// Load existing metadata to preserve fields not being updated
const { data: existingMetadataRow, error: existingMetadataError } =
await this.supabase
.from('tasks')
.select('metadata')
.eq('brief_id', briefId)
.eq('display_id', taskId.toUpperCase())
.single();
if (existingMetadataError) {
throw new Error(
`Failed to load existing task metadata: ${existingMetadataError.message}`
);
}
const metadata: Record<string, unknown> = {
...((existingMetadataRow?.metadata as Record<string, unknown>) ?? {})
};
if (updates.details !== undefined) metadata.details = updates.details;
if (updates.testStrategy !== undefined)
metadata.testStrategy = updates.testStrategy;
if (Object.keys(metadata).length > 0) {
dbUpdates.metadata = metadata as Json;
}
// Update the task
const { error } = await this.supabase
.from('tasks')
.update(dbUpdates)
.eq('brief_id', briefId)
.eq('display_id', taskId.toUpperCase());
if (error) {
throw new Error(`Failed to update task: ${error.message}`);
}
// Return the updated task by fetching it
const updatedTask = await this.getTask(projectId, taskId);
if (!updatedTask) {
throw new Error(`Failed to retrieve updated task ${taskId}`);
}
return updatedTask;
}
/**
* Maps internal status to database status
*/
private mapStatusToDatabase(
status: string
): Database['public']['Enums']['task_status'] {
switch (status) {
case 'pending':
return 'todo';
case 'in-progress':
case 'in_progress': // Accept both formats
return 'in_progress';
case 'done':
return 'done';
default:
throw new Error(
`Invalid task status: ${status}. Valid statuses are: pending, in-progress, done`
);
}
}
/**
* Maps internal priority to database priority
* Task Master uses 'critical', database uses 'urgent'
*/
private mapPriorityToDatabase(
priority: string
): Database['public']['Enums']['task_priority'] {
switch (priority) {
case 'critical':
return 'urgent';
case 'low':
case 'medium':
case 'high':
return priority as Database['public']['Enums']['task_priority'];
default:
throw new Error(
`Invalid task priority: ${priority}. Valid priorities are: low, medium, high, critical`
);
}
}
}

View File

@@ -1,8 +1,9 @@
import { Task, TaskTag } from '../types/index.js';
import { LoadTasksOptions } from '../interfaces/storage.interface.js';
export interface TaskRepository {
// Task operations
getTasks(projectId: string): Promise<Task[]>;
getTasks(projectId: string, options?: LoadTasksOptions): Promise<Task[]>;
getTask(projectId: string, taskId: string): Promise<Task | null>;
createTask(projectId: string, task: Omit<Task, 'id'>): Promise<Task>;
updateTask(

View File

@@ -0,0 +1,496 @@
/**
* @fileoverview Export Service
* Core service for exporting tasks to external systems (e.g., Hamster briefs)
*/
import type { Task, TaskStatus } from '../types/index.js';
import type { UserContext } from '../auth/types.js';
import { ConfigManager } from '../config/config-manager.js';
import { AuthManager } from '../auth/auth-manager.js';
import { ERROR_CODES, TaskMasterError } from '../errors/task-master-error.js';
import { FileStorage } from '../storage/file-storage/index.js';
// Type definitions for the bulk API response
interface TaskImportResult {
externalId?: string;
index: number;
success: boolean;
taskId?: string;
error?: string;
validationErrors?: string[];
}
interface BulkTasksResponse {
dryRun: boolean;
totalTasks: number;
successCount: number;
failedCount: number;
skippedCount: number;
results: TaskImportResult[];
summary: {
message: string;
duration: number;
};
}
/**
* Options for exporting tasks
*/
export interface ExportTasksOptions {
/** Optional tag to export tasks from (uses active tag if not provided) */
tag?: string;
/** Brief ID to export to */
briefId?: string;
/** Organization ID (required if briefId is provided) */
orgId?: string;
/** Filter by task status */
status?: TaskStatus;
/** Exclude subtasks from export (default: false, subtasks included by default) */
excludeSubtasks?: boolean;
}
/**
* Result of the export operation
*/
export interface ExportResult {
/** Whether the export was successful */
success: boolean;
/** Number of tasks exported */
taskCount: number;
/** The brief ID tasks were exported to */
briefId: string;
/** The organization ID */
orgId: string;
/** Optional message */
message?: string;
/** Error details if export failed */
error?: {
code: string;
message: string;
};
}
/**
* Brief information from API
*/
export interface Brief {
id: string;
accountId: string;
createdAt: string;
name?: string;
}
/**
* ExportService handles task export to external systems
*/
export class ExportService {
private configManager: ConfigManager;
private authManager: AuthManager;
constructor(configManager: ConfigManager, authManager: AuthManager) {
this.configManager = configManager;
this.authManager = authManager;
}
/**
* Export tasks to a brief
*/
async exportTasks(options: ExportTasksOptions): Promise<ExportResult> {
// Validate authentication
if (!this.authManager.isAuthenticated()) {
throw new TaskMasterError(
'Authentication required for export',
ERROR_CODES.AUTHENTICATION_ERROR
);
}
// Get current context
const context = this.authManager.getContext();
// Determine org and brief IDs
let orgId = options.orgId || context?.orgId;
let briefId = options.briefId || context?.briefId;
// Validate we have necessary IDs
if (!orgId) {
throw new TaskMasterError(
'Organization ID is required for export. Use "tm context org" to select one.',
ERROR_CODES.MISSING_CONFIGURATION
);
}
if (!briefId) {
throw new TaskMasterError(
'Brief ID is required for export. Use "tm context brief" or provide --brief flag.',
ERROR_CODES.MISSING_CONFIGURATION
);
}
// Get tasks from the specified or active tag
const activeTag = this.configManager.getActiveTag();
const tag = options.tag || activeTag;
// Always read tasks from local file storage for export
// (we're exporting local tasks to a remote brief)
const fileStorage = new FileStorage(this.configManager.getProjectRoot());
await fileStorage.initialize();
// Load tasks with filters applied at storage layer
const filteredTasks = await fileStorage.loadTasks(tag, {
status: options.status,
excludeSubtasks: options.excludeSubtasks
});
// Get total count (without filters) for comparison
const allTasks = await fileStorage.loadTasks(tag);
const taskListResult = {
tasks: filteredTasks,
total: allTasks.length,
filtered: filteredTasks.length,
tag,
storageType: 'file' as const
};
if (taskListResult.tasks.length === 0) {
return {
success: false,
taskCount: 0,
briefId,
orgId,
message: 'No tasks found to export',
error: {
code: 'NO_TASKS',
message: 'No tasks match the specified criteria'
}
};
}
try {
// Call the export API with the original tasks
// performExport will handle the transformation based on the method used
await this.performExport(orgId, briefId, taskListResult.tasks);
return {
success: true,
taskCount: taskListResult.tasks.length,
briefId,
orgId,
message: `Successfully exported ${taskListResult.tasks.length} task(s) to brief`
};
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
return {
success: false,
taskCount: 0,
briefId,
orgId,
error: {
code: 'EXPORT_FAILED',
message: errorMessage
}
};
}
}
/**
* Export tasks from a brief ID or URL
*/
async exportFromBriefInput(briefInput: string): Promise<ExportResult> {
// Extract brief ID from input
const briefId = this.extractBriefId(briefInput);
if (!briefId) {
throw new TaskMasterError(
'Invalid brief ID or URL provided',
ERROR_CODES.VALIDATION_ERROR
);
}
// Fetch brief to get organization
const brief = await this.authManager.getBrief(briefId);
if (!brief) {
throw new TaskMasterError(
'Brief not found or you do not have access',
ERROR_CODES.NOT_FOUND
);
}
// Export with the resolved org and brief
return this.exportTasks({
orgId: brief.accountId,
briefId: brief.id
});
}
/**
* Validate export context before prompting
*/
async validateContext(): Promise<{
hasOrg: boolean;
hasBrief: boolean;
context: UserContext | null;
}> {
const context = this.authManager.getContext();
return {
hasOrg: !!context?.orgId,
hasBrief: !!context?.briefId,
context
};
}
/**
* Transform tasks for API bulk import format (flat structure)
*/
private transformTasksForBulkImport(tasks: Task[]): any[] {
const flatTasks: any[] = [];
// Process each task and its subtasks
tasks.forEach((task) => {
// Add parent task
flatTasks.push({
externalId: String(task.id),
title: task.title,
description: this.enrichDescription(task),
status: this.mapStatusForAPI(task.status),
priority: task.priority || 'medium',
dependencies: task.dependencies?.map(String) || [],
details: task.details,
testStrategy: task.testStrategy,
complexity: task.complexity,
metadata: {
complexity: task.complexity,
originalId: task.id,
originalDescription: task.description,
originalDetails: task.details,
originalTestStrategy: task.testStrategy
}
});
// Add subtasks if they exist
if (task.subtasks && task.subtasks.length > 0) {
task.subtasks.forEach((subtask) => {
flatTasks.push({
externalId: `${task.id}.${subtask.id}`,
parentExternalId: String(task.id),
title: subtask.title,
description: this.enrichDescription(subtask),
status: this.mapStatusForAPI(subtask.status),
priority: subtask.priority || 'medium',
dependencies:
subtask.dependencies?.map((dep) => {
// Convert subtask dependencies to full ID format
if (String(dep).includes('.')) {
return String(dep);
}
return `${task.id}.${dep}`;
}) || [],
details: subtask.details,
testStrategy: subtask.testStrategy,
complexity: subtask.complexity,
metadata: {
complexity: subtask.complexity,
originalId: subtask.id,
originalDescription: subtask.description,
originalDetails: subtask.details,
originalTestStrategy: subtask.testStrategy
}
});
});
}
});
return flatTasks;
}
/**
* Enrich task/subtask description with implementation details and test strategy
* Creates a comprehensive markdown-formatted description
*/
private enrichDescription(taskOrSubtask: Task | any): string {
const sections: string[] = [];
// Start with original description if it exists
if (taskOrSubtask.description) {
sections.push(taskOrSubtask.description);
}
// Add implementation details section
if (taskOrSubtask.details) {
sections.push('## Implementation Details\n');
sections.push(taskOrSubtask.details);
}
// Add test strategy section
if (taskOrSubtask.testStrategy) {
sections.push('## Test Strategy\n');
sections.push(taskOrSubtask.testStrategy);
}
// Join sections with double newlines for better markdown formatting
return sections.join('\n\n').trim() || 'No description provided';
}
/**
* Map internal status to API status format
*/
private mapStatusForAPI(status?: string): string {
switch (status) {
case 'pending':
return 'todo';
case 'in-progress':
return 'in_progress';
case 'done':
return 'done';
default:
return 'todo';
}
}
/**
* Perform the actual export API call
*/
private async performExport(
orgId: string,
briefId: string,
tasks: any[]
): Promise<void> {
// Check if we should use the API endpoint or direct Supabase
const useAPIEndpoint = process.env.TM_PUBLIC_BASE_DOMAIN;
if (useAPIEndpoint) {
// Use the new bulk import API endpoint
const apiUrl = `${process.env.TM_PUBLIC_BASE_DOMAIN}/ai/api/v1/briefs/${briefId}/tasks/bulk`;
// Transform tasks to flat structure for API
const flatTasks = this.transformTasksForBulkImport(tasks);
// Prepare request body
const requestBody = {
source: 'task-master-cli',
accountId: orgId,
options: {
dryRun: false,
stopOnError: false
},
tasks: flatTasks
};
// Get auth token
const credentials = this.authManager.getCredentials();
if (!credentials || !credentials.token) {
throw new Error('Not authenticated');
}
// Make API request
const response = await fetch(apiUrl, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${credentials.token}`
},
body: JSON.stringify(requestBody)
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(
`API request failed: ${response.status} - ${errorText}`
);
}
const result = (await response.json()) as BulkTasksResponse;
if (result.failedCount > 0) {
const failedTasks = result.results
.filter((r) => !r.success)
.map((r) => `${r.externalId}: ${r.error}`)
.join(', ');
console.warn(
`Warning: ${result.failedCount} tasks failed to import: ${failedTasks}`
);
}
console.log(
`Successfully exported ${result.successCount} of ${result.totalTasks} tasks to brief ${briefId}`
);
} else {
// Direct Supabase approach is no longer supported
// The extractTasks method has been removed from SupabaseTaskRepository
// as we now exclusively use the API endpoint for exports
throw new Error(
'Export API endpoint not configured. Please set TM_PUBLIC_BASE_DOMAIN environment variable to enable task export.'
);
}
}
/**
* Extract a brief ID from raw input (ID or URL)
*/
private extractBriefId(input: string): string | null {
const raw = input?.trim() ?? '';
if (!raw) return null;
const parseUrl = (s: string): URL | null => {
try {
return new URL(s);
} catch {}
try {
return new URL(`https://${s}`);
} catch {}
return null;
};
const fromParts = (path: string): string | null => {
const parts = path.split('/').filter(Boolean);
const briefsIdx = parts.lastIndexOf('briefs');
const candidate =
briefsIdx >= 0 && parts.length > briefsIdx + 1
? parts[briefsIdx + 1]
: parts[parts.length - 1];
return candidate?.trim() || null;
};
// Try to parse as URL
const url = parseUrl(raw);
if (url) {
const qId = url.searchParams.get('id') || url.searchParams.get('briefId');
const candidate = (qId || fromParts(url.pathname)) ?? null;
if (candidate) {
if (this.isLikelyId(candidate) || candidate.length >= 8) {
return candidate;
}
}
}
// Check if it looks like a path without scheme
if (raw.includes('/')) {
const candidate = fromParts(raw);
if (candidate && (this.isLikelyId(candidate) || candidate.length >= 8)) {
return candidate;
}
}
// Return as-is if it looks like an ID
if (this.isLikelyId(raw) || raw.length >= 8) {
return raw;
}
return null;
}
/**
* Check if a string looks like a brief ID (UUID-like)
*/
private isLikelyId(value: string): boolean {
const uuidRegex =
/^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$/;
const ulidRegex = /^[0-9A-HJKMNP-TV-Z]{26}$/i;
const slugRegex = /^[A-Za-z0-9_-]{16,}$/;
return (
uuidRegex.test(value) || ulidRegex.test(value) || slugRegex.test(value)
);
}
}

View File

@@ -5,4 +5,9 @@
export { TaskService } from './task-service.js';
export { OrganizationService } from './organization.service.js';
export { ExportService } from './export.service.js';
export type { Organization, Brief } from './organization.service.js';
export type {
ExportTasksOptions,
ExportResult
} from './export.service.js';

View File

@@ -14,6 +14,7 @@ import { ConfigManager } from '../config/config-manager.js';
import { StorageFactory } from '../storage/storage-factory.js';
import { TaskEntity } from '../entities/task.entity.js';
import { ERROR_CODES, TaskMasterError } from '../errors/task-master-error.js';
import { getLogger } from '../logger/factory.js';
/**
* Result returned by getTaskList
@@ -51,6 +52,7 @@ export class TaskService {
private configManager: ConfigManager;
private storage: IStorage;
private initialized = false;
private logger = getLogger('TaskService');
constructor(configManager: ConfigManager) {
this.configManager = configManager;
@@ -90,37 +92,76 @@ export class TaskService {
const tag = options.tag || activeTag;
try {
// Load raw tasks from storage - storage only knows about tags
const rawTasks = await this.storage.loadTasks(tag);
// Determine if we can push filters to storage layer
const canPushStatusFilter =
options.filter?.status &&
!options.filter.priority &&
!options.filter.tags &&
!options.filter.assignee &&
!options.filter.search &&
options.filter.hasSubtasks === undefined;
// Build storage-level options
const storageOptions: any = {};
// Push status filter to storage if it's the only filter
if (canPushStatusFilter) {
const statuses = Array.isArray(options.filter!.status)
? options.filter!.status
: [options.filter!.status];
// Only push single status to storage (multiple statuses need in-memory filtering)
if (statuses.length === 1) {
storageOptions.status = statuses[0];
}
}
// Push subtask exclusion to storage
if (options.includeSubtasks === false) {
storageOptions.excludeSubtasks = true;
}
// Load tasks from storage with pushed-down filters
const rawTasks = await this.storage.loadTasks(tag, storageOptions);
// Get total count without status filters, but preserve subtask exclusion
const baseOptions: any = {};
if (options.includeSubtasks === false) {
baseOptions.excludeSubtasks = true;
}
const allTasks =
storageOptions.status !== undefined
? await this.storage.loadTasks(tag, baseOptions)
: rawTasks;
// Convert to TaskEntity for business logic operations
const taskEntities = TaskEntity.fromArray(rawTasks);
// Apply filters if provided
// Apply remaining filters in-memory if needed
let filteredEntities = taskEntities;
if (options.filter) {
if (options.filter && !canPushStatusFilter) {
filteredEntities = this.applyFilters(taskEntities, options.filter);
} else if (
options.filter?.status &&
Array.isArray(options.filter.status) &&
options.filter.status.length > 1
) {
// Multiple statuses - filter in-memory
filteredEntities = this.applyFilters(taskEntities, options.filter);
}
// Convert back to plain objects
let tasks = filteredEntities.map((entity) => entity.toJSON());
// Handle subtasks option
if (options.includeSubtasks === false) {
tasks = tasks.map((task) => ({
...task,
subtasks: []
}));
}
const tasks = filteredEntities.map((entity) => entity.toJSON());
return {
tasks,
total: rawTasks.length,
total: allTasks.length,
filtered: filteredEntities.length,
tag: tag, // Return the actual tag being used (either explicitly provided or active tag)
storageType: this.getStorageType()
};
} catch (error) {
this.logger.error('Failed to get task list', error);
throw new TaskMasterError(
'Failed to get task list',
ERROR_CODES.INTERNAL_ERROR,
@@ -135,15 +176,28 @@ export class TaskService {
}
/**
* Get a single task by ID
* Get a single task by ID - delegates to storage layer
*/
async getTask(taskId: string, tag?: string): Promise<Task | null> {
const result = await this.getTaskList({
tag,
includeSubtasks: true
});
// Use provided tag or get active tag
const activeTag = tag || this.getActiveTag();
return result.tasks.find((t) => t.id === taskId) || null;
try {
// Delegate to storage layer which handles the specific logic for tasks vs subtasks
return await this.storage.loadTask(String(taskId), activeTag);
} catch (error) {
throw new TaskMasterError(
`Failed to get task ${taskId}`,
ERROR_CODES.STORAGE_ERROR,
{
operation: 'getTask',
resource: 'task',
taskId: String(taskId),
tag: activeTag
},
error as Error
);
}
}
/**
@@ -384,16 +438,6 @@ export class TaskService {
}
}
// Complexity filter
if (filter.complexity) {
const complexities = Array.isArray(filter.complexity)
? filter.complexity
: [filter.complexity];
if (!task.complexity || !complexities.includes(task.complexity)) {
return false;
}
}
// Search filter
if (filter.search) {
const searchLower = filter.search.toLowerCase();
@@ -446,7 +490,7 @@ export class TaskService {
}
/**
* Update task status
* Update task status - delegates to storage layer which handles storage-specific logic
*/
async updateTaskStatus(
taskId: string | number,
@@ -468,49 +512,28 @@ export class TaskService {
// Use provided tag or get active tag
const activeTag = tag || this.getActiveTag();
const taskIdStr = String(taskId);
// TODO: For now, assume it's a regular task and just try to update directly
// In the future, we can add subtask support if needed
if (taskIdStr.includes('.')) {
throw new TaskMasterError(
'Subtask status updates not yet supported in API storage',
ERROR_CODES.NOT_IMPLEMENTED
);
}
// Get the current task to get old status (simple, direct approach)
let currentTask: Task | null;
try {
// Try to get the task directly
currentTask = await this.storage.loadTask(taskIdStr, activeTag);
// Delegate to storage layer which handles the specific logic for tasks vs subtasks
return await this.storage.updateTaskStatus(
taskIdStr,
newStatus,
activeTag
);
} catch (error) {
throw new TaskMasterError(
`Failed to load task ${taskIdStr}`,
ERROR_CODES.TASK_NOT_FOUND,
{ taskId: taskIdStr },
`Failed to update task status for ${taskIdStr}`,
ERROR_CODES.STORAGE_ERROR,
{
operation: 'updateTaskStatus',
resource: 'task',
taskId: taskIdStr,
newStatus,
tag: activeTag
},
error as Error
);
}
if (!currentTask) {
throw new TaskMasterError(
`Task ${taskIdStr} not found`,
ERROR_CODES.TASK_NOT_FOUND
);
}
const oldStatus = currentTask.status;
// Simple, direct update - just change the status
await this.storage.updateTask(taskIdStr, { status: newStatus }, activeTag);
return {
success: true,
oldStatus,
newStatus,
taskId: taskIdStr
};
}
}

View File

@@ -5,12 +5,19 @@
import type {
IStorage,
StorageStats
StorageStats,
UpdateStatusResult,
LoadTasksOptions
} from '../interfaces/storage.interface.js';
import type { Task, TaskMetadata, TaskTag } from '../types/index.js';
import type {
Task,
TaskMetadata,
TaskTag,
TaskStatus
} from '../types/index.js';
import { ERROR_CODES, TaskMasterError } from '../errors/task-master-error.js';
import { TaskRepository } from '../repositories/task-repository.interface.js';
import { SupabaseTaskRepository } from '../repositories/supabase-task-repository.js';
import { SupabaseTaskRepository } from '../repositories/supabase/index.js';
import { SupabaseClient } from '@supabase/supabase-js';
import { AuthManager } from '../auth/auth-manager.js';
@@ -140,7 +147,7 @@ export class ApiStorage implements IStorage {
* Load tasks from API
* In our system, the tag parameter represents a brief ID
*/
async loadTasks(tag?: string): Promise<Task[]> {
async loadTasks(tag?: string, options?: LoadTasksOptions): Promise<Task[]> {
await this.ensureInitialized();
try {
@@ -154,9 +161,9 @@ export class ApiStorage implements IStorage {
);
}
// Load tasks from the current brief context
// Load tasks from the current brief context with filters pushed to repository
const tasks = await this.retryOperation(() =>
this.repository.getTasks(this.projectId)
this.repository.getTasks(this.projectId, options)
);
// Update the tag cache with the loaded task IDs
@@ -485,6 +492,62 @@ export class ApiStorage implements IStorage {
}
}
/**
* Update task or subtask status by ID - for API storage
*/
async updateTaskStatus(
taskId: string,
newStatus: TaskStatus,
tag?: string
): Promise<UpdateStatusResult> {
await this.ensureInitialized();
try {
const existingTask = await this.retryOperation(() =>
this.repository.getTask(this.projectId, taskId)
);
if (!existingTask) {
throw new Error(`Task ${taskId} not found`);
}
const oldStatus = existingTask.status;
if (oldStatus === newStatus) {
return {
success: true,
oldStatus,
newStatus,
taskId
};
}
// Update the task/subtask status
await this.retryOperation(() =>
this.repository.updateTask(this.projectId, taskId, {
status: newStatus,
updatedAt: new Date().toISOString()
})
);
// Note: Parent status auto-adjustment is handled by the backend API service
// which has its own business logic for managing task relationships
return {
success: true,
oldStatus,
newStatus,
taskId
};
} catch (error) {
throw new TaskMasterError(
'Failed to update task status via API',
ERROR_CODES.STORAGE_ERROR,
{ operation: 'updateTaskStatus', taskId, newStatus, tag },
error as Error
);
}
}
/**
* Get all available tags
*/

View File

@@ -2,14 +2,17 @@
* @fileoverview Refactored file-based storage implementation for Task Master
*/
import type { Task, TaskMetadata } from '../../types/index.js';
import type { Task, TaskMetadata, TaskStatus } from '../../types/index.js';
import type {
IStorage,
StorageStats
StorageStats,
UpdateStatusResult,
LoadTasksOptions
} from '../../interfaces/storage.interface.js';
import { FormatHandler } from './format-handler.js';
import { FileOperations } from './file-operations.js';
import { PathResolver } from './path-resolver.js';
import { ComplexityReportManager } from '../../reports/complexity-report-manager.js';
/**
* File-based storage implementation using a single tasks.json file with separated concerns
@@ -18,11 +21,13 @@ export class FileStorage implements IStorage {
private formatHandler: FormatHandler;
private fileOps: FileOperations;
private pathResolver: PathResolver;
private complexityManager: ComplexityReportManager;
constructor(projectPath: string) {
this.formatHandler = new FormatHandler();
this.fileOps = new FileOperations();
this.pathResolver = new PathResolver(projectPath);
this.complexityManager = new ComplexityReportManager(projectPath);
}
/**
@@ -86,14 +91,33 @@ export class FileStorage implements IStorage {
/**
* Load tasks from the single tasks.json file for a specific tag
* Enriches tasks with complexity data from the complexity report
*/
async loadTasks(tag?: string): Promise<Task[]> {
async loadTasks(tag?: string, options?: LoadTasksOptions): Promise<Task[]> {
const filePath = this.pathResolver.getTasksPath();
const resolvedTag = tag || 'master';
try {
const rawData = await this.fileOps.readJson(filePath);
return this.formatHandler.extractTasks(rawData, resolvedTag);
let tasks = this.formatHandler.extractTasks(rawData, resolvedTag);
// Apply filters if provided
if (options) {
// Filter by status if specified
if (options.status) {
tasks = tasks.filter((task) => task.status === options.status);
}
// Exclude subtasks if specified
if (options.excludeSubtasks) {
tasks = tasks.map((task) => ({
...task,
subtasks: []
}));
}
}
return await this.enrichTasksWithComplexity(tasks, resolvedTag);
} catch (error: any) {
if (error.code === 'ENOENT') {
return []; // File doesn't exist, return empty array
@@ -104,9 +128,65 @@ export class FileStorage implements IStorage {
/**
* Load a single task by ID from the tasks.json file
* Handles both regular tasks and subtasks (with dotted notation like "1.2")
*/
async loadTask(taskId: string, tag?: string): Promise<Task | null> {
const tasks = await this.loadTasks(tag);
// Check if this is a subtask (contains a dot)
if (taskId.includes('.')) {
const [parentId, subtaskId] = taskId.split('.');
const parentTask = tasks.find((t) => String(t.id) === parentId);
if (!parentTask || !parentTask.subtasks) {
return null;
}
const subtask = parentTask.subtasks.find(
(st) => String(st.id) === subtaskId
);
if (!subtask) {
return null;
}
const toFullSubId = (maybeDotId: string | number): string => {
const depId = String(maybeDotId);
return depId.includes('.') ? depId : `${parentTask.id}.${depId}`;
};
const resolvedDependencies =
subtask.dependencies?.map((dep) => toFullSubId(dep)) ?? [];
// Return a Task-like object for the subtask with the full dotted ID
// Following the same pattern as findTaskById in utils.js
const subtaskResult = {
...subtask,
id: taskId, // Use the full dotted ID
title: subtask.title || `Subtask ${subtaskId}`,
description: subtask.description || '',
status: subtask.status || 'pending',
priority: subtask.priority || parentTask.priority || 'medium',
dependencies: resolvedDependencies,
details: subtask.details || '',
testStrategy: subtask.testStrategy || '',
subtasks: [],
tags: parentTask.tags || [],
assignee: subtask.assignee || parentTask.assignee,
complexity: subtask.complexity || parentTask.complexity,
createdAt: subtask.createdAt || parentTask.createdAt,
updatedAt: subtask.updatedAt || parentTask.updatedAt,
// Add reference to parent task for context (like utils.js does)
parentTask: {
id: parentTask.id,
title: parentTask.title,
status: parentTask.status
},
isSubtask: true
};
return subtaskResult;
}
// Handle regular task lookup
return tasks.find((task) => String(task.id) === String(taskId)) || null;
}
@@ -281,6 +361,159 @@ export class FileStorage implements IStorage {
await this.saveTasks(tasks, tag);
}
/**
* Update task or subtask status by ID - handles file storage logic with parent/subtask relationships
*/
async updateTaskStatus(
taskId: string,
newStatus: TaskStatus,
tag?: string
): Promise<UpdateStatusResult> {
const tasks = await this.loadTasks(tag);
// Check if this is a subtask (contains a dot)
if (taskId.includes('.')) {
return this.updateSubtaskStatusInFile(tasks, taskId, newStatus, tag);
}
// Handle regular task update
const taskIndex = tasks.findIndex((t) => String(t.id) === String(taskId));
if (taskIndex === -1) {
throw new Error(`Task ${taskId} not found`);
}
const oldStatus = tasks[taskIndex].status;
if (oldStatus === newStatus) {
return {
success: true,
oldStatus,
newStatus,
taskId: String(taskId)
};
}
tasks[taskIndex] = {
...tasks[taskIndex],
status: newStatus,
updatedAt: new Date().toISOString()
};
await this.saveTasks(tasks, tag);
return {
success: true,
oldStatus,
newStatus,
taskId: String(taskId)
};
}
/**
* Update subtask status within file storage - handles parent status auto-adjustment
*/
private async updateSubtaskStatusInFile(
tasks: Task[],
subtaskId: string,
newStatus: TaskStatus,
tag?: string
): Promise<UpdateStatusResult> {
// Parse the subtask ID to get parent ID and subtask ID
const parts = subtaskId.split('.');
if (parts.length !== 2) {
throw new Error(
`Invalid subtask ID format: ${subtaskId}. Expected format: parentId.subtaskId`
);
}
const [parentId, subIdRaw] = parts;
const subId = subIdRaw.trim();
if (!/^\d+$/.test(subId)) {
throw new Error(
`Invalid subtask ID: ${subId}. Subtask ID must be a positive integer.`
);
}
const subtaskNumericId = Number(subId);
// Find the parent task
const parentTaskIndex = tasks.findIndex(
(t) => String(t.id) === String(parentId)
);
if (parentTaskIndex === -1) {
throw new Error(`Parent task ${parentId} not found`);
}
const parentTask = tasks[parentTaskIndex];
// Find the subtask within the parent task
const subtaskIndex = parentTask.subtasks.findIndex(
(st) => st.id === subtaskNumericId || String(st.id) === subId
);
if (subtaskIndex === -1) {
throw new Error(
`Subtask ${subtaskId} not found in parent task ${parentId}`
);
}
const oldStatus = parentTask.subtasks[subtaskIndex].status || 'pending';
if (oldStatus === newStatus) {
return {
success: true,
oldStatus,
newStatus,
taskId: subtaskId
};
}
const now = new Date().toISOString();
// Update the subtask status
parentTask.subtasks[subtaskIndex] = {
...parentTask.subtasks[subtaskIndex],
status: newStatus,
updatedAt: now
};
// Auto-adjust parent status based on subtask statuses
const subs = parentTask.subtasks;
let parentNewStatus = parentTask.status;
if (subs.length > 0) {
const norm = (s: any) => s.status || 'pending';
const isDoneLike = (s: any) => {
const st = norm(s);
return st === 'done' || st === 'completed';
};
const allDone = subs.every(isDoneLike);
const anyInProgress = subs.some((s) => norm(s) === 'in-progress');
const anyDone = subs.some(isDoneLike);
const allPending = subs.every((s) => norm(s) === 'pending');
if (allDone) parentNewStatus = 'done';
else if (anyInProgress || anyDone) parentNewStatus = 'in-progress';
else if (allPending) parentNewStatus = 'pending';
}
// Always bump updatedAt; update status only if changed
tasks[parentTaskIndex] = {
...parentTask,
...(parentNewStatus !== parentTask.status
? { status: parentNewStatus }
: {}),
updatedAt: now
};
await this.saveTasks(tasks, tag);
return {
success: true,
oldStatus,
newStatus,
taskId: subtaskId
};
}
/**
* Delete a task
*/
@@ -386,6 +619,46 @@ export class FileStorage implements IStorage {
await this.saveTasks(tasks, targetTag);
}
/**
* Enrich tasks with complexity data from the complexity report
* Private helper method called by loadTasks()
*/
private async enrichTasksWithComplexity(
tasks: Task[],
tag: string
): Promise<Task[]> {
// Get all task IDs for bulk lookup
const taskIds = tasks.map((t) => t.id);
// Load complexity data for all tasks at once (more efficient)
const complexityMap = await this.complexityManager.getComplexityForTasks(
taskIds,
tag
);
// If no complexity data found, return tasks as-is
if (complexityMap.size === 0) {
return tasks;
}
// Enrich each task with its complexity data
return tasks.map((task) => {
const complexityData = complexityMap.get(String(task.id));
if (!complexityData) {
return task;
}
// Merge complexity data into the task
return {
...task,
complexity: complexityData.complexityScore,
recommendedSubtasks: complexityData.recommendedSubtasks,
expansionPrompt: complexityData.expansionPrompt,
complexityReasoning: complexityData.complexityReasoning
};
});
}
}
// Export as default for convenience

View File

@@ -82,7 +82,7 @@ export class StorageFactory {
apiAccessToken: credentials.token,
apiEndpoint:
config.storage?.apiEndpoint ||
process.env.HAMSTER_API_URL ||
process.env.TM_PUBLIC_BASE_DOMAIN ||
'https://tryhamster.com/api'
};
config.storage = nextStorage;
@@ -112,7 +112,7 @@ export class StorageFactory {
apiAccessToken: credentials.token,
apiEndpoint:
config.storage?.apiEndpoint ||
process.env.HAMSTER_API_URL ||
process.env.TM_PUBLIC_BASE_DOMAIN ||
'https://tryhamster.com/api'
};
config.storage = nextStorage;

Some files were not shown because too many files have changed in this diff Show More