Compare commits
40 Commits
task-maste
...
docs/auto-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1eb84f9660 | ||
|
|
7b5a7c4495 | ||
|
|
caee040907 | ||
|
|
4b5473860b | ||
|
|
b43b7ce201 | ||
|
|
86027f1ee4 | ||
|
|
4f984f8a69 | ||
|
|
f7646f41b5 | ||
|
|
20004a39ea | ||
|
|
f1393f47b1 | ||
|
|
738ec51c04 | ||
|
|
c7418c4594 | ||
|
|
0747f1c772 | ||
|
|
ffe24a2e35 | ||
|
|
604b94baa9 | ||
|
|
2ea4bb6a81 | ||
|
|
3e96387715 | ||
|
|
100c3dc47d | ||
|
|
986ac117ae | ||
|
|
18aa416035 | ||
|
|
0079b7defd | ||
|
|
0b2c6967c4 | ||
|
|
c0682ac795 | ||
|
|
01a7faea8f | ||
|
|
814265cd33 | ||
|
|
9b7b2ca7b2 | ||
|
|
949f091179 | ||
|
|
32c2b03c23 | ||
|
|
3bfd999d81 | ||
|
|
9fa79eb026 | ||
|
|
875134247a | ||
|
|
c2fc61ddb3 | ||
|
|
aaacc3dae3 | ||
|
|
46cd5dc186 | ||
|
|
49a31be416 | ||
|
|
2b69936ee7 | ||
|
|
b5fe723f8e | ||
|
|
d67b81d25d | ||
|
|
66c05053c0 | ||
|
|
d7ab4609aa |
11
.changeset/brave-lions-sing.md
Normal file
11
.changeset/brave-lions-sing.md
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": minor
|
||||||
|
---
|
||||||
|
|
||||||
|
Add Codex CLI provider with OAuth authentication
|
||||||
|
|
||||||
|
- Added codex-cli provider for GPT-5 and GPT-5-Codex models (272K input / 128K output)
|
||||||
|
- OAuth-first authentication via `codex login` - no API key required
|
||||||
|
- Optional OPENAI_CODEX_API_KEY support
|
||||||
|
- Codebase analysis capabilities automatically enabled
|
||||||
|
- Command-specific settings and approval/sandbox modes
|
||||||
5
.changeset/chore-fix-docs.md
Normal file
5
.changeset/chore-fix-docs.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Improve `analyze-complexity` cli docs and `--research` flag documentation
|
||||||
7
.changeset/cursor-slash-commands.md
Normal file
7
.changeset/cursor-slash-commands.md
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": minor
|
||||||
|
---
|
||||||
|
|
||||||
|
Add Cursor IDE custom slash command support
|
||||||
|
|
||||||
|
Expose Task Master commands as Cursor slash commands by copying assets/claude/commands to .cursor/commands on profile add and cleaning up on remove.
|
||||||
5
.changeset/curvy-weeks-flow.md
Normal file
5
.changeset/curvy-weeks-flow.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Change parent task back to "pending" when all subtasks are in "pending" state
|
||||||
5
.changeset/easy-spiders-wave.md
Normal file
5
.changeset/easy-spiders-wave.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Do a quick fix on build
|
||||||
5
.changeset/fix-mcp-connection-errors.md
Normal file
5
.changeset/fix-mcp-connection-errors.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Fix MCP connection errors caused by deprecated generateTaskFiles calls. Resolves "Cannot read properties of null (reading 'toString')" errors when using MCP tools for task management operations.
|
||||||
5
.changeset/fix-mcp-default-tasks-path.md
Normal file
5
.changeset/fix-mcp-default-tasks-path.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Fix MCP server error when file parameter not provided - now properly constructs default tasks.json path instead of failing with 'tasksJsonPath is required' error.
|
||||||
5
.changeset/flat-cities-say.md
Normal file
5
.changeset/flat-cities-say.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": minor
|
||||||
|
---
|
||||||
|
|
||||||
|
Added api keys page on docs website: docs.task-master.dev/getting-started/api-keys
|
||||||
10
.changeset/forty-tables-invite.md
Normal file
10
.changeset/forty-tables-invite.md
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": minor
|
||||||
|
---
|
||||||
|
|
||||||
|
Move to AI SDK v5:
|
||||||
|
|
||||||
|
- Works better with claude-code and gemini-cli as ai providers
|
||||||
|
- Improved openai model family compatibility
|
||||||
|
- Migrate ollama provider to v2
|
||||||
|
- Closes #1223, #1013, #1161, #1174
|
||||||
30
.changeset/gentle-cats-dance.md
Normal file
30
.changeset/gentle-cats-dance.md
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": minor
|
||||||
|
---
|
||||||
|
|
||||||
|
Migrate AI services to use generateObject for structured data generation
|
||||||
|
|
||||||
|
This update migrates all AI service calls from generateText to generateObject, ensuring more reliable and structured responses across all commands.
|
||||||
|
|
||||||
|
### Key Changes:
|
||||||
|
|
||||||
|
- **Unified AI Service**: Replaced separate generateText implementations with a single generateObjectService that handles structured data generation
|
||||||
|
- **JSON Mode Support**: Added proper JSON mode configuration for providers that support it (OpenAI, Anthropic, Google, Groq)
|
||||||
|
- **Schema Validation**: Integrated Zod schemas for all AI-generated content with automatic validation
|
||||||
|
- **Provider Compatibility**: Maintained compatibility with all existing providers while leveraging their native structured output capabilities
|
||||||
|
- **Improved Reliability**: Structured output generation reduces parsing errors and ensures consistent data formats
|
||||||
|
|
||||||
|
### Technical Improvements:
|
||||||
|
|
||||||
|
- Centralized provider configuration in `ai-providers-unified.js`
|
||||||
|
- Added `generateObject` support detection for each provider
|
||||||
|
- Implemented proper error handling for schema validation failures
|
||||||
|
- Maintained backward compatibility with existing prompt structures
|
||||||
|
|
||||||
|
### Bug Fixes:
|
||||||
|
|
||||||
|
- Fixed subtask ID numbering issue where AI was generating inconsistent IDs (101-105, 601-603) instead of sequential numbering (1, 2, 3...)
|
||||||
|
- Enhanced prompt instructions to enforce proper ID generation patterns
|
||||||
|
- Ensured subtasks display correctly as X.1, X.2, X.3 format
|
||||||
|
|
||||||
|
This migration improves the reliability and consistency of AI-generated content throughout the Task Master application.
|
||||||
13
.changeset/mcp-timeout-configuration.md
Normal file
13
.changeset/mcp-timeout-configuration.md
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": minor
|
||||||
|
---
|
||||||
|
|
||||||
|
Enhanced Roo Code profile with MCP timeout configuration for improved reliability during long-running AI operations. The Roo profile now automatically configures a 300-second timeout for MCP server operations, preventing timeouts during complex tasks like `parse-prd`, `expand-all`, `analyze-complexity`, and `research` operations. This change also replaces static MCP configuration files with programmatic generation for better maintainability.
|
||||||
|
|
||||||
|
**What's New:**
|
||||||
|
- 300-second timeout for MCP operations (up from default 60 seconds)
|
||||||
|
- Programmatic MCP configuration generation (replaces static asset files)
|
||||||
|
- Enhanced reliability for AI-powered operations
|
||||||
|
- Consistent with other AI coding assistant profiles
|
||||||
|
|
||||||
|
**Migration:** No user action required - existing Roo Code installations will automatically receive the enhanced MCP configuration on next initialization.
|
||||||
5
.changeset/petite-ideas-grab.md
Normal file
5
.changeset/petite-ideas-grab.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Fix Claude Code settings validation for pathToClaudeCodeExecutable
|
||||||
23
.changeset/pre.json
Normal file
23
.changeset/pre.json
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
{
|
||||||
|
"mode": "pre",
|
||||||
|
"tag": "rc",
|
||||||
|
"initialVersions": {
|
||||||
|
"task-master-ai": "0.27.3",
|
||||||
|
"docs": "0.0.4",
|
||||||
|
"extension": "0.25.4"
|
||||||
|
},
|
||||||
|
"changesets": [
|
||||||
|
"chore-fix-docs",
|
||||||
|
"cursor-slash-commands",
|
||||||
|
"curvy-weeks-flow",
|
||||||
|
"easy-spiders-wave",
|
||||||
|
"flat-cities-say",
|
||||||
|
"forty-tables-invite",
|
||||||
|
"gentle-cats-dance",
|
||||||
|
"mcp-timeout-configuration",
|
||||||
|
"petite-ideas-grab",
|
||||||
|
"silly-pandas-find",
|
||||||
|
"sweet-maps-rule",
|
||||||
|
"whole-pigs-say"
|
||||||
|
]
|
||||||
|
}
|
||||||
5
.changeset/silly-pandas-find.md
Normal file
5
.changeset/silly-pandas-find.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Fix sonar deep research model failing, should be called `sonar-deep-research`
|
||||||
5
.changeset/sweet-maps-rule.md
Normal file
5
.changeset/sweet-maps-rule.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": minor
|
||||||
|
---
|
||||||
|
|
||||||
|
Upgrade grok-cli ai provider to ai sdk v5
|
||||||
8
.changeset/whole-pigs-say.md
Normal file
8
.changeset/whole-pigs-say.md
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Fix complexity score not showing for `task-master show` and `task-master list`
|
||||||
|
|
||||||
|
- Added complexity score on "next task" when running `task-master list`
|
||||||
|
- Added colors to complexity to reflect complexity (easy, medium, hard)
|
||||||
3
.github/workflows/ci.yml
vendored
3
.github/workflows/ci.yml
vendored
@@ -6,9 +6,6 @@ on:
|
|||||||
- main
|
- main
|
||||||
- next
|
- next
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
- next
|
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
|
|||||||
5
.github/workflows/extension-ci.yml
vendored
5
.github/workflows/extension-ci.yml
vendored
@@ -41,8 +41,7 @@ jobs:
|
|||||||
restore-keys: |
|
restore-keys: |
|
||||||
${{ runner.os }}-node-
|
${{ runner.os }}-node-
|
||||||
|
|
||||||
- name: Install Extension Dependencies
|
- name: Install Monorepo Dependencies
|
||||||
working-directory: apps/extension
|
|
||||||
run: npm ci
|
run: npm ci
|
||||||
timeout-minutes: 5
|
timeout-minutes: 5
|
||||||
|
|
||||||
@@ -68,7 +67,6 @@ jobs:
|
|||||||
${{ runner.os }}-node-
|
${{ runner.os }}-node-
|
||||||
|
|
||||||
- name: Install if cache miss
|
- name: Install if cache miss
|
||||||
working-directory: apps/extension
|
|
||||||
run: npm ci
|
run: npm ci
|
||||||
timeout-minutes: 3
|
timeout-minutes: 3
|
||||||
|
|
||||||
@@ -100,7 +98,6 @@ jobs:
|
|||||||
${{ runner.os }}-node-
|
${{ runner.os }}-node-
|
||||||
|
|
||||||
- name: Install if cache miss
|
- name: Install if cache miss
|
||||||
working-directory: apps/extension
|
|
||||||
run: npm ci
|
run: npm ci
|
||||||
timeout-minutes: 3
|
timeout-minutes: 3
|
||||||
|
|
||||||
|
|||||||
3
.github/workflows/extension-release.yml
vendored
3
.github/workflows/extension-release.yml
vendored
@@ -31,8 +31,7 @@ jobs:
|
|||||||
restore-keys: |
|
restore-keys: |
|
||||||
${{ runner.os }}-node-
|
${{ runner.os }}-node-
|
||||||
|
|
||||||
- name: Install Extension Dependencies
|
- name: Install Monorepo Dependencies
|
||||||
working-directory: apps/extension
|
|
||||||
run: npm ci
|
run: npm ci
|
||||||
timeout-minutes: 5
|
timeout-minutes: 5
|
||||||
|
|
||||||
|
|||||||
72
CHANGELOG.md
72
CHANGELOG.md
@@ -1,5 +1,77 @@
|
|||||||
# task-master-ai
|
# task-master-ai
|
||||||
|
|
||||||
|
## 0.28.0-rc.1
|
||||||
|
|
||||||
|
### Patch Changes
|
||||||
|
|
||||||
|
- [#1274](https://github.com/eyaltoledano/claude-task-master/pull/1274) [`4f984f8`](https://github.com/eyaltoledano/claude-task-master/commit/4f984f8a6965da9f9c7edd60ddfd6560ac022917) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Do a quick fix on build
|
||||||
|
|
||||||
|
## 0.28.0-rc.0
|
||||||
|
|
||||||
|
### Minor Changes
|
||||||
|
|
||||||
|
- [#1215](https://github.com/eyaltoledano/claude-task-master/pull/1215) [`0079b7d`](https://github.com/eyaltoledano/claude-task-master/commit/0079b7defdad550811f704c470fdd01955d91d4d) Thanks [@joedanz](https://github.com/joedanz)! - Add Cursor IDE custom slash command support
|
||||||
|
|
||||||
|
Expose Task Master commands as Cursor slash commands by copying assets/claude/commands to .cursor/commands on profile add and cleaning up on remove.
|
||||||
|
|
||||||
|
- [#1246](https://github.com/eyaltoledano/claude-task-master/pull/1246) [`18aa416`](https://github.com/eyaltoledano/claude-task-master/commit/18aa416035f44345bde1c7321490345733a5d042) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Added api keys page on docs website: docs.task-master.dev/getting-started/api-keys
|
||||||
|
|
||||||
|
- [#1246](https://github.com/eyaltoledano/claude-task-master/pull/1246) [`18aa416`](https://github.com/eyaltoledano/claude-task-master/commit/18aa416035f44345bde1c7321490345733a5d042) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Move to AI SDK v5:
|
||||||
|
- Works better with claude-code and gemini-cli as ai providers
|
||||||
|
- Improved openai model family compatibility
|
||||||
|
- Migrate ollama provider to v2
|
||||||
|
- Closes #1223, #1013, #1161, #1174
|
||||||
|
|
||||||
|
- [#1262](https://github.com/eyaltoledano/claude-task-master/pull/1262) [`738ec51`](https://github.com/eyaltoledano/claude-task-master/commit/738ec51c049a295a12839b2dfddaf05e23b8fede) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Migrate AI services to use generateObject for structured data generation
|
||||||
|
|
||||||
|
This update migrates all AI service calls from generateText to generateObject, ensuring more reliable and structured responses across all commands.
|
||||||
|
|
||||||
|
### Key Changes:
|
||||||
|
- **Unified AI Service**: Replaced separate generateText implementations with a single generateObjectService that handles structured data generation
|
||||||
|
- **JSON Mode Support**: Added proper JSON mode configuration for providers that support it (OpenAI, Anthropic, Google, Groq)
|
||||||
|
- **Schema Validation**: Integrated Zod schemas for all AI-generated content with automatic validation
|
||||||
|
- **Provider Compatibility**: Maintained compatibility with all existing providers while leveraging their native structured output capabilities
|
||||||
|
- **Improved Reliability**: Structured output generation reduces parsing errors and ensures consistent data formats
|
||||||
|
|
||||||
|
### Technical Improvements:
|
||||||
|
- Centralized provider configuration in `ai-providers-unified.js`
|
||||||
|
- Added `generateObject` support detection for each provider
|
||||||
|
- Implemented proper error handling for schema validation failures
|
||||||
|
- Maintained backward compatibility with existing prompt structures
|
||||||
|
|
||||||
|
### Bug Fixes:
|
||||||
|
- Fixed subtask ID numbering issue where AI was generating inconsistent IDs (101-105, 601-603) instead of sequential numbering (1, 2, 3...)
|
||||||
|
- Enhanced prompt instructions to enforce proper ID generation patterns
|
||||||
|
- Ensured subtasks display correctly as X.1, X.2, X.3 format
|
||||||
|
|
||||||
|
This migration improves the reliability and consistency of AI-generated content throughout the Task Master application.
|
||||||
|
|
||||||
|
- [#1112](https://github.com/eyaltoledano/claude-task-master/pull/1112) [`d67b81d`](https://github.com/eyaltoledano/claude-task-master/commit/d67b81d25ddd927fabb6f5deb368e8993519c541) Thanks [@olssonsten](https://github.com/olssonsten)! - Enhanced Roo Code profile with MCP timeout configuration for improved reliability during long-running AI operations. The Roo profile now automatically configures a 300-second timeout for MCP server operations, preventing timeouts during complex tasks like `parse-prd`, `expand-all`, `analyze-complexity`, and `research` operations. This change also replaces static MCP configuration files with programmatic generation for better maintainability.
|
||||||
|
|
||||||
|
**What's New:**
|
||||||
|
- 300-second timeout for MCP operations (up from default 60 seconds)
|
||||||
|
- Programmatic MCP configuration generation (replaces static asset files)
|
||||||
|
- Enhanced reliability for AI-powered operations
|
||||||
|
- Consistent with other AI coding assistant profiles
|
||||||
|
|
||||||
|
**Migration:** No user action required - existing Roo Code installations will automatically receive the enhanced MCP configuration on next initialization.
|
||||||
|
|
||||||
|
- [#1246](https://github.com/eyaltoledano/claude-task-master/pull/1246) [`986ac11`](https://github.com/eyaltoledano/claude-task-master/commit/986ac117aee00bcd3e6830a0f76e1ad6d10e0bca) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Upgrade grok-cli ai provider to ai sdk v5
|
||||||
|
|
||||||
|
### Patch Changes
|
||||||
|
|
||||||
|
- [#1235](https://github.com/eyaltoledano/claude-task-master/pull/1235) [`aaacc3d`](https://github.com/eyaltoledano/claude-task-master/commit/aaacc3dae36247b4de72b2d2697f49e5df6d01e3) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Improve `analyze-complexity` cli docs and `--research` flag documentation
|
||||||
|
|
||||||
|
- [#1251](https://github.com/eyaltoledano/claude-task-master/pull/1251) [`0b2c696`](https://github.com/eyaltoledano/claude-task-master/commit/0b2c6967c4605c33a100cff16f6ce8ff09ad06f0) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Change parent task back to "pending" when all subtasks are in "pending" state
|
||||||
|
|
||||||
|
- [#1172](https://github.com/eyaltoledano/claude-task-master/pull/1172) [`b5fe723`](https://github.com/eyaltoledano/claude-task-master/commit/b5fe723f8ead928e9f2dbde13b833ee70ac3382d) Thanks [@jujax](https://github.com/jujax)! - Fix Claude Code settings validation for pathToClaudeCodeExecutable
|
||||||
|
|
||||||
|
- [#1192](https://github.com/eyaltoledano/claude-task-master/pull/1192) [`2b69936`](https://github.com/eyaltoledano/claude-task-master/commit/2b69936ee7b34346d6de5175af20e077359e2e2a) Thanks [@nukunga](https://github.com/nukunga)! - Fix sonar deep research model failing, should be called `sonar-deep-research`
|
||||||
|
|
||||||
|
- [#1270](https://github.com/eyaltoledano/claude-task-master/pull/1270) [`20004a3`](https://github.com/eyaltoledano/claude-task-master/commit/20004a39ea848f747e1ff48981bfe176554e4055) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix complexity score not showing for `task-master show` and `task-master list`
|
||||||
|
- Added complexity score on "next task" when running `task-master list`
|
||||||
|
- Added colors to complexity to reflect complexity (easy, medium, hard)
|
||||||
|
|
||||||
## 0.27.3
|
## 0.27.3
|
||||||
|
|
||||||
### Patch Changes
|
### Patch Changes
|
||||||
|
|||||||
22
CLAUDE.md
22
CLAUDE.md
@@ -4,6 +4,28 @@
|
|||||||
**Import Task Master's development workflow commands and guidelines, treat as if import is in the main CLAUDE.md file.**
|
**Import Task Master's development workflow commands and guidelines, treat as if import is in the main CLAUDE.md file.**
|
||||||
@./.taskmaster/CLAUDE.md
|
@./.taskmaster/CLAUDE.md
|
||||||
|
|
||||||
|
## Test Guidelines
|
||||||
|
|
||||||
|
### Synchronous Tests
|
||||||
|
- **NEVER use async/await in test functions** unless testing actual asynchronous operations
|
||||||
|
- Use synchronous top-level imports instead of dynamic `await import()`
|
||||||
|
- Test bodies should be synchronous whenever possible
|
||||||
|
- Example:
|
||||||
|
```javascript
|
||||||
|
// ✅ CORRECT - Synchronous imports
|
||||||
|
import { MyClass } from '../src/my-class.js';
|
||||||
|
|
||||||
|
it('should verify behavior', () => {
|
||||||
|
expect(new MyClass().property).toBe(value);
|
||||||
|
});
|
||||||
|
|
||||||
|
// ❌ INCORRECT - Async imports
|
||||||
|
it('should verify behavior', async () => {
|
||||||
|
const { MyClass } = await import('../src/my-class.js');
|
||||||
|
expect(new MyClass().property).toBe(value);
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
## Changeset Guidelines
|
## Changeset Guidelines
|
||||||
|
|
||||||
- When creating changesets, remember that it's user-facing, meaning we don't have to get into the specifics of the code, but rather mention what the end-user is getting or fixing from this changeset.
|
- When creating changesets, remember that it's user-facing, meaning we don't have to get into the specifics of the code, but rather mention what the end-user is getting or fixing from this changeset.
|
||||||
19
README.md
19
README.md
@@ -60,6 +60,19 @@ The following documentation is also available in the `docs` directory:
|
|||||||
|
|
||||||
> **Note:** After clicking the link, you'll still need to add your API keys to the configuration. The link installs the MCP server with placeholder keys that you'll need to replace with your actual API keys.
|
> **Note:** After clicking the link, you'll still need to add your API keys to the configuration. The link installs the MCP server with placeholder keys that you'll need to replace with your actual API keys.
|
||||||
|
|
||||||
|
#### Claude Code Quick Install
|
||||||
|
|
||||||
|
For Claude Code users:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
claude mcp add taskmaster-ai -- npx -y task-master-ai
|
||||||
|
```
|
||||||
|
|
||||||
|
Don't forget to add your API keys to the configuration:
|
||||||
|
- in the root .env of your Project
|
||||||
|
- in the "env" section of your mcp config for taskmaster-ai
|
||||||
|
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
Taskmaster utilizes AI across several commands, and those require a separate API key. You can use a variety of models from different AI providers provided you add your API keys. For example, if you want to use Claude 3.7, you'll need an Anthropic API key.
|
Taskmaster utilizes AI across several commands, and those require a separate API key. You can use a variety of models from different AI providers provided you add your API keys. For example, if you want to use Claude 3.7, you'll need an Anthropic API key.
|
||||||
@@ -75,8 +88,9 @@ At least one (1) of the following is required:
|
|||||||
- xAI API Key (for research or main model)
|
- xAI API Key (for research or main model)
|
||||||
- OpenRouter API Key (for research or main model)
|
- OpenRouter API Key (for research or main model)
|
||||||
- Claude Code (no API key required - requires Claude Code CLI)
|
- Claude Code (no API key required - requires Claude Code CLI)
|
||||||
|
- Codex CLI (OAuth via ChatGPT subscription - requires Codex CLI)
|
||||||
|
|
||||||
Using the research model is optional but highly recommended. You will need at least ONE API key (unless using Claude Code). Adding all API keys enables you to seamlessly switch between model providers at will.
|
Using the research model is optional but highly recommended. You will need at least ONE API key (unless using Claude Code or Codex CLI with OAuth). Adding all API keys enables you to seamlessly switch between model providers at will.
|
||||||
|
|
||||||
## Quick Start
|
## Quick Start
|
||||||
|
|
||||||
@@ -92,10 +106,11 @@ MCP (Model Control Protocol) lets you run Task Master directly from your editor.
|
|||||||
| | Project | `<project_folder>/.cursor/mcp.json` | `<project_folder>\.cursor\mcp.json` | `mcpServers` |
|
| | Project | `<project_folder>/.cursor/mcp.json` | `<project_folder>\.cursor\mcp.json` | `mcpServers` |
|
||||||
| **Windsurf** | Global | `~/.codeium/windsurf/mcp_config.json` | `%USERPROFILE%\.codeium\windsurf\mcp_config.json` | `mcpServers` |
|
| **Windsurf** | Global | `~/.codeium/windsurf/mcp_config.json` | `%USERPROFILE%\.codeium\windsurf\mcp_config.json` | `mcpServers` |
|
||||||
| **VS Code** | Project | `<project_folder>/.vscode/mcp.json` | `<project_folder>\.vscode\mcp.json` | `servers` |
|
| **VS Code** | Project | `<project_folder>/.vscode/mcp.json` | `<project_folder>\.vscode\mcp.json` | `servers` |
|
||||||
|
| **Q CLI** | Global | `~/.aws/amazonq/mcp.json` | | `mcpServers` |
|
||||||
|
|
||||||
##### Manual Configuration
|
##### Manual Configuration
|
||||||
|
|
||||||
###### Cursor & Windsurf (`mcpServers`)
|
###### Cursor & Windsurf & Q Developer CLI (`mcpServers`)
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -35,7 +35,7 @@
|
|||||||
"@types/inquirer": "^9.0.3",
|
"@types/inquirer": "^9.0.3",
|
||||||
"@types/node": "^22.10.5",
|
"@types/node": "^22.10.5",
|
||||||
"tsx": "^4.20.4",
|
"tsx": "^4.20.4",
|
||||||
"typescript": "^5.7.3",
|
"typescript": "^5.9.2",
|
||||||
"vitest": "^2.1.8"
|
"vitest": "^2.1.8"
|
||||||
},
|
},
|
||||||
"engines": {
|
"engines": {
|
||||||
|
|||||||
@@ -281,9 +281,14 @@ export class ListTasksCommand extends Command {
|
|||||||
const priorityBreakdown = getPriorityBreakdown(tasks);
|
const priorityBreakdown = getPriorityBreakdown(tasks);
|
||||||
|
|
||||||
// Find next task following the same logic as findNextTask
|
// Find next task following the same logic as findNextTask
|
||||||
const nextTask = this.findNextTask(tasks);
|
const nextTaskInfo = this.findNextTask(tasks);
|
||||||
|
|
||||||
// Display dashboard boxes
|
// Get the full task object with complexity data already included
|
||||||
|
const nextTask = nextTaskInfo
|
||||||
|
? tasks.find((t) => String(t.id) === String(nextTaskInfo.id))
|
||||||
|
: undefined;
|
||||||
|
|
||||||
|
// Display dashboard boxes (nextTask already has complexity from storage enrichment)
|
||||||
displayDashboards(
|
displayDashboards(
|
||||||
taskStats,
|
taskStats,
|
||||||
subtaskStats,
|
subtaskStats,
|
||||||
@@ -303,14 +308,16 @@ export class ListTasksCommand extends Command {
|
|||||||
|
|
||||||
// Display recommended next task section immediately after table
|
// Display recommended next task section immediately after table
|
||||||
if (nextTask) {
|
if (nextTask) {
|
||||||
// Find the full task object to get description
|
const description = getTaskDescription(nextTask);
|
||||||
const fullTask = tasks.find((t) => String(t.id) === String(nextTask.id));
|
|
||||||
const description = fullTask ? getTaskDescription(fullTask) : undefined;
|
|
||||||
|
|
||||||
displayRecommendedNextTask({
|
displayRecommendedNextTask({
|
||||||
...nextTask,
|
id: nextTask.id,
|
||||||
status: 'pending', // Next task is typically pending
|
title: nextTask.title,
|
||||||
description
|
priority: nextTask.priority,
|
||||||
|
status: nextTask.status,
|
||||||
|
dependencies: nextTask.dependencies,
|
||||||
|
description,
|
||||||
|
complexity: nextTask.complexity as number | undefined
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
displayRecommendedNextTask(undefined);
|
displayRecommendedNextTask(undefined);
|
||||||
|
|||||||
@@ -6,6 +6,7 @@
|
|||||||
import chalk from 'chalk';
|
import chalk from 'chalk';
|
||||||
import boxen from 'boxen';
|
import boxen from 'boxen';
|
||||||
import type { Task, TaskPriority } from '@tm/core/types';
|
import type { Task, TaskPriority } from '@tm/core/types';
|
||||||
|
import { getComplexityWithColor } from '../../utils/ui.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Statistics for task collection
|
* Statistics for task collection
|
||||||
@@ -479,7 +480,7 @@ export function displayDependencyDashboard(
|
|||||||
? chalk.cyan(nextTask.dependencies.join(', '))
|
? chalk.cyan(nextTask.dependencies.join(', '))
|
||||||
: chalk.gray('None')
|
: chalk.gray('None')
|
||||||
}\n` +
|
}\n` +
|
||||||
`Complexity: ${nextTask?.complexity || chalk.gray('N/A')}`;
|
`Complexity: ${nextTask?.complexity !== undefined ? getComplexityWithColor(nextTask.complexity) : chalk.gray('N/A')}`;
|
||||||
|
|
||||||
return content;
|
return content;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,6 +6,7 @@
|
|||||||
import chalk from 'chalk';
|
import chalk from 'chalk';
|
||||||
import boxen from 'boxen';
|
import boxen from 'boxen';
|
||||||
import type { Task } from '@tm/core/types';
|
import type { Task } from '@tm/core/types';
|
||||||
|
import { getComplexityWithColor } from '../../utils/ui.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Next task display options
|
* Next task display options
|
||||||
@@ -17,6 +18,7 @@ export interface NextTaskDisplayOptions {
|
|||||||
status?: string;
|
status?: string;
|
||||||
dependencies?: (string | number)[];
|
dependencies?: (string | number)[];
|
||||||
description?: string;
|
description?: string;
|
||||||
|
complexity?: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -82,6 +84,11 @@ export function displayRecommendedNextTask(
|
|||||||
: chalk.cyan(task.dependencies.join(', '));
|
: chalk.cyan(task.dependencies.join(', '));
|
||||||
content.push(`Dependencies: ${depsDisplay}`);
|
content.push(`Dependencies: ${depsDisplay}`);
|
||||||
|
|
||||||
|
// Complexity with color and label
|
||||||
|
if (typeof task.complexity === 'number') {
|
||||||
|
content.push(`Complexity: ${getComplexityWithColor(task.complexity)}`);
|
||||||
|
}
|
||||||
|
|
||||||
// Description if available
|
// Description if available
|
||||||
if (task.description) {
|
if (task.description) {
|
||||||
content.push('');
|
content.push('');
|
||||||
|
|||||||
@@ -9,7 +9,11 @@ import Table from 'cli-table3';
|
|||||||
import { marked, MarkedExtension } from 'marked';
|
import { marked, MarkedExtension } from 'marked';
|
||||||
import { markedTerminal } from 'marked-terminal';
|
import { markedTerminal } from 'marked-terminal';
|
||||||
import type { Task } from '@tm/core/types';
|
import type { Task } from '@tm/core/types';
|
||||||
import { getStatusWithColor, getPriorityWithColor } from '../../utils/ui.js';
|
import {
|
||||||
|
getStatusWithColor,
|
||||||
|
getPriorityWithColor,
|
||||||
|
getComplexityWithColor
|
||||||
|
} from '../../utils/ui.js';
|
||||||
|
|
||||||
// Configure marked to use terminal renderer with subtle colors
|
// Configure marked to use terminal renderer with subtle colors
|
||||||
marked.use(
|
marked.use(
|
||||||
@@ -108,7 +112,9 @@ export function displayTaskProperties(task: Task): void {
|
|||||||
getStatusWithColor(task.status),
|
getStatusWithColor(task.status),
|
||||||
getPriorityWithColor(task.priority),
|
getPriorityWithColor(task.priority),
|
||||||
deps,
|
deps,
|
||||||
'N/A',
|
typeof task.complexity === 'number'
|
||||||
|
? getComplexityWithColor(task.complexity)
|
||||||
|
: chalk.gray('N/A'),
|
||||||
task.description || ''
|
task.description || ''
|
||||||
].join('\n');
|
].join('\n');
|
||||||
|
|
||||||
|
|||||||
@@ -158,10 +158,18 @@ export function displayUpgradeNotification(
|
|||||||
export async function performAutoUpdate(
|
export async function performAutoUpdate(
|
||||||
latestVersion: string
|
latestVersion: string
|
||||||
): Promise<boolean> {
|
): Promise<boolean> {
|
||||||
if (process.env.TASKMASTER_SKIP_AUTO_UPDATE === '1' || process.env.CI) {
|
if (
|
||||||
console.log(
|
process.env.TASKMASTER_SKIP_AUTO_UPDATE === '1' ||
|
||||||
chalk.dim('Skipping auto-update (TASKMASTER_SKIP_AUTO_UPDATE/CI).')
|
process.env.CI ||
|
||||||
);
|
process.env.NODE_ENV === 'test'
|
||||||
|
) {
|
||||||
|
const reason =
|
||||||
|
process.env.TASKMASTER_SKIP_AUTO_UPDATE === '1'
|
||||||
|
? 'TASKMASTER_SKIP_AUTO_UPDATE=1'
|
||||||
|
: process.env.CI
|
||||||
|
? 'CI environment'
|
||||||
|
: 'NODE_ENV=test';
|
||||||
|
console.log(chalk.dim(`Skipping auto-update (${reason})`));
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
const spinner = ora({
|
const spinner = ora({
|
||||||
|
|||||||
@@ -84,7 +84,23 @@ export function getPriorityWithColor(priority: TaskPriority): string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get colored complexity display
|
* Get complexity color and label based on score thresholds
|
||||||
|
*/
|
||||||
|
function getComplexityLevel(score: number): {
|
||||||
|
color: (text: string) => string;
|
||||||
|
label: string;
|
||||||
|
} {
|
||||||
|
if (score >= 7) {
|
||||||
|
return { color: chalk.hex('#CC0000'), label: 'High' };
|
||||||
|
} else if (score >= 4) {
|
||||||
|
return { color: chalk.hex('#FF8800'), label: 'Medium' };
|
||||||
|
} else {
|
||||||
|
return { color: chalk.green, label: 'Low' };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get colored complexity display with dot indicator (simple format)
|
||||||
*/
|
*/
|
||||||
export function getComplexityWithColor(complexity: number | string): string {
|
export function getComplexityWithColor(complexity: number | string): string {
|
||||||
const score =
|
const score =
|
||||||
@@ -94,13 +110,20 @@ export function getComplexityWithColor(complexity: number | string): string {
|
|||||||
return chalk.gray('N/A');
|
return chalk.gray('N/A');
|
||||||
}
|
}
|
||||||
|
|
||||||
if (score >= 8) {
|
const { color } = getComplexityLevel(score);
|
||||||
return chalk.red.bold(`${score} (High)`);
|
return color(`● ${score}`);
|
||||||
} else if (score >= 5) {
|
}
|
||||||
return chalk.yellow(`${score} (Medium)`);
|
|
||||||
} else {
|
/**
|
||||||
return chalk.green(`${score} (Low)`);
|
* Get colored complexity display with /10 format (for dashboards)
|
||||||
|
*/
|
||||||
|
export function getComplexityWithScore(complexity: number | undefined): string {
|
||||||
|
if (typeof complexity !== 'number') {
|
||||||
|
return chalk.gray('N/A');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const { color, label } = getComplexityLevel(complexity);
|
||||||
|
return color(`${complexity}/10 (${label})`);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -323,8 +346,12 @@ export function createTaskTable(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (showComplexity) {
|
if (showComplexity) {
|
||||||
// Show N/A if no complexity score
|
// Show complexity score from report if available
|
||||||
row.push(chalk.gray('N/A'));
|
if (typeof task.complexity === 'number') {
|
||||||
|
row.push(getComplexityWithColor(task.complexity));
|
||||||
|
} else {
|
||||||
|
row.push(chalk.gray('N/A'));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
table.push(row);
|
table.push(row);
|
||||||
|
|||||||
@@ -1,22 +1,24 @@
|
|||||||
# Task Master Documentation
|
# Task Master Documentation
|
||||||
|
|
||||||
Welcome to the Task Master documentation. Use the links below to navigate to the information you need:
|
Welcome to the Task Master documentation. This documentation site provides comprehensive guides for getting started with Task Master.
|
||||||
|
|
||||||
## Getting Started
|
## Getting Started
|
||||||
|
|
||||||
- [Configuration Guide](archive/configuration.md) - Set up environment variables and customize Task Master
|
- [Quick Start Guide](/getting-started/quick-start) - Complete setup and first-time usage guide
|
||||||
- [Tutorial](archive/ctutorial.md) - Step-by-step guide to getting started with Task Master
|
- [Requirements](/getting-started/quick-start/requirements) - What you need to get started
|
||||||
|
- [Installation](/getting-started/quick-start/installation) - How to install Task Master
|
||||||
|
|
||||||
## Reference
|
## Core Capabilities
|
||||||
|
|
||||||
- [Command Reference](archive/ccommand-reference.md) - Complete list of all available commands
|
- [MCP Tools](/capabilities/mcp) - Model Control Protocol integration
|
||||||
- [Task Structure](archive/ctask-structure.md) - Understanding the task format and features
|
- [CLI Commands](/capabilities/cli-root-commands) - Command line interface reference
|
||||||
|
- [Task Structure](/capabilities/task-structure) - Understanding tasks and subtasks
|
||||||
|
|
||||||
## Examples & Licensing
|
## Best Practices
|
||||||
|
|
||||||
- [Example Interactions](archive/cexamples.md) - Common Cursor AI interaction examples
|
- [Advanced Configuration](/best-practices/configuration-advanced) - Detailed configuration options
|
||||||
- [Licensing Information](archive/clicensing.md) - Detailed information about the license
|
- [Advanced Tasks](/best-practices/advanced-tasks) - Working with complex task structures
|
||||||
|
|
||||||
## Need More Help?
|
## Need More Help?
|
||||||
|
|
||||||
If you can't find what you're looking for in these docs, please check the [main README](../README.md) or visit our [GitHub repository](https://github.com/eyaltoledano/claude-task-master).
|
If you can't find what you're looking for in these docs, please check the root README.md or visit our [GitHub repository](https://github.com/eyaltoledano/claude-task-master).
|
||||||
|
|||||||
@@ -156,7 +156,7 @@ sidebarTitle: "CLI Commands"
|
|||||||
# Use an alternative tasks file
|
# Use an alternative tasks file
|
||||||
task-master analyze-complexity --file=custom-tasks.json
|
task-master analyze-complexity --file=custom-tasks.json
|
||||||
|
|
||||||
# Use Perplexity AI for research-backed complexity analysis
|
# Use your configured research model for research-backed complexity analysis
|
||||||
task-master analyze-complexity --research
|
task-master analyze-complexity --research
|
||||||
```
|
```
|
||||||
</Accordion>
|
</Accordion>
|
||||||
|
|||||||
@@ -85,7 +85,7 @@ The CLI is organized into a series of commands, each with its own set of options
|
|||||||
### 4. Project and Configuration
|
### 4. Project and Configuration
|
||||||
|
|
||||||
- **`init`**: Initializes a new project.
|
- **`init`**: Initializes a new project.
|
||||||
- **`generate`**: Generates individual task files.
|
- **`generate`**: Generates individual task files from tasks.json. Run this manually after task operations to create readable text files for each task.
|
||||||
- **`migrate`**: Migrates a project to the new directory structure.
|
- **`migrate`**: Migrates a project to the new directory structure.
|
||||||
- **`research`**: Performs AI-powered research.
|
- **`research`**: Performs AI-powered research.
|
||||||
- `--query <query>`: The research query.
|
- `--query <query>`: The research query.
|
||||||
@@ -123,7 +123,7 @@ The core functionalities can be categorized as follows:
|
|||||||
|
|
||||||
### 1. Task and Subtask Management
|
### 1. Task and Subtask Management
|
||||||
|
|
||||||
These functions are the bread and butter of the application, allowing for the creation, modification, and deletion of tasks and subtasks.
|
These functions are the bread and butter of the application, allowing for the creation, modification, and deletion of tasks and subtasks. Note: As of v0.27.3, these operations no longer automatically generate individual task files - use the `generate` command manually when needed.
|
||||||
|
|
||||||
- **`addTask(prompt, dependencies, priority)`**: Creates a new task using an AI-powered prompt to generate the title, description, details, and test strategy. It can also be used to create a task manually by providing the task data directly.
|
- **`addTask(prompt, dependencies, priority)`**: Creates a new task using an AI-powered prompt to generate the title, description, details, and test strategy. It can also be used to create a task manually by providing the task data directly.
|
||||||
- **`addSubtask(parentId, existingTaskId, newSubtaskData)`**: Adds a subtask to a parent task. It can either convert an existing task into a subtask or create a new subtask from scratch.
|
- **`addSubtask(parentId, existingTaskId, newSubtaskData)`**: Adds a subtask to a parent task. It can either convert an existing task into a subtask or create a new subtask from scratch.
|
||||||
@@ -167,7 +167,7 @@ These functions are crucial for managing the relationships between tasks.
|
|||||||
|
|
||||||
These functions are for managing the project and its configuration.
|
These functions are for managing the project and its configuration.
|
||||||
|
|
||||||
- **`generateTaskFiles()`**: Generates individual task files from `tasks.json`.
|
- **`generateTaskFiles()`**: Generates individual task files from `tasks.json`. This is now a manual operation - task management operations no longer automatically generate these files.
|
||||||
- **`migrateProject()`**: Migrates the project to the new `.taskmaster` directory structure.
|
- **`migrateProject()`**: Migrates the project to the new `.taskmaster` directory structure.
|
||||||
- **`performResearch(query, options)`**: Performs AI-powered research with project context.
|
- **`performResearch(query, options)`**: Performs AI-powered research with project context.
|
||||||
|
|
||||||
@@ -225,7 +225,7 @@ The MCP tools can be categorized in the same way as the core functionalities:
|
|||||||
### 5. Project and Configuration
|
### 5. Project and Configuration
|
||||||
|
|
||||||
- **`initialize_project`**: Initializes a new project.
|
- **`initialize_project`**: Initializes a new project.
|
||||||
- **`generate`**: Generates individual task files.
|
- **`generate`**: Generates individual task files from tasks.json. Run this manually when you want to create readable text files for each task.
|
||||||
- **`models`**: Manages AI model configurations.
|
- **`models`**: Manages AI model configurations.
|
||||||
- **`research`**: Performs AI-powered research.
|
- **`research`**: Performs AI-powered research.
|
||||||
|
|
||||||
|
|||||||
@@ -32,6 +32,7 @@
|
|||||||
"getting-started/quick-start/execute-quick"
|
"getting-started/quick-start/execute-quick"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"getting-started/api-keys",
|
||||||
"getting-started/faq",
|
"getting-started/faq",
|
||||||
"getting-started/contribute"
|
"getting-started/contribute"
|
||||||
]
|
]
|
||||||
|
|||||||
267
apps/docs/getting-started/api-keys.mdx
Normal file
267
apps/docs/getting-started/api-keys.mdx
Normal file
@@ -0,0 +1,267 @@
|
|||||||
|
# API Keys Configuration
|
||||||
|
|
||||||
|
Task Master supports multiple AI providers through environment variables. This page lists all available API keys and their configuration requirements.
|
||||||
|
|
||||||
|
## Required API Keys
|
||||||
|
|
||||||
|
> **Note**: At least one required API key must be configured for Task Master to function.
|
||||||
|
>
|
||||||
|
> "Required: Yes" below means "required to use that specific provider," not "required globally." You only need at least one provider configured.
|
||||||
|
|
||||||
|
### ANTHROPIC_API_KEY (Recommended)
|
||||||
|
- **Provider**: Anthropic Claude models
|
||||||
|
- **Format**: `sk-ant-api03-...`
|
||||||
|
- **Required**: ✅ **Yes**
|
||||||
|
- **Models**: Claude 3.5 Sonnet, Claude 3 Haiku, Claude 3 Opus
|
||||||
|
- **Get Key**: [Anthropic Console](https://console.anthropic.com/)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ANTHROPIC_API_KEY="sk-ant-api03-your-key-here"
|
||||||
|
```
|
||||||
|
|
||||||
|
### PERPLEXITY_API_KEY (Highly Recommended for Research)
|
||||||
|
- **Provider**: Perplexity AI (Research features)
|
||||||
|
- **Format**: `pplx-...`
|
||||||
|
- **Required**: ✅ **Yes**
|
||||||
|
- **Purpose**: Enables research-backed task expansions and updates
|
||||||
|
- **Models**: Perplexity Sonar models
|
||||||
|
- **Get Key**: [Perplexity API](https://www.perplexity.ai/settings/api)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
PERPLEXITY_API_KEY="pplx-your-key-here"
|
||||||
|
```
|
||||||
|
|
||||||
|
### OPENAI_API_KEY
|
||||||
|
- **Provider**: OpenAI GPT models
|
||||||
|
- **Format**: `sk-proj-...` or `sk-...`
|
||||||
|
- **Required**: ✅ **Yes**
|
||||||
|
- **Models**: GPT-4, GPT-4 Turbo, GPT-3.5 Turbo, O1 models
|
||||||
|
- **Get Key**: [OpenAI Platform](https://platform.openai.com/api-keys)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
OPENAI_API_KEY="sk-proj-your-key-here"
|
||||||
|
```
|
||||||
|
|
||||||
|
### GOOGLE_API_KEY
|
||||||
|
- **Provider**: Google Gemini models
|
||||||
|
- **Format**: Various formats
|
||||||
|
- **Required**: ✅ **Yes**
|
||||||
|
- **Models**: Gemini Pro, Gemini Flash, Gemini Ultra
|
||||||
|
- **Get Key**: [Google AI Studio](https://aistudio.google.com/app/apikey)
|
||||||
|
- **Alternative**: Use `GOOGLE_APPLICATION_CREDENTIALS` for service account (Google Vertex)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
GOOGLE_API_KEY="your-google-api-key-here"
|
||||||
|
```
|
||||||
|
|
||||||
|
### GROQ_API_KEY
|
||||||
|
- **Provider**: Groq (High-performance inference)
|
||||||
|
- **Required**: ✅ **Yes**
|
||||||
|
- **Models**: Llama models, Mixtral models (via Groq)
|
||||||
|
- **Get Key**: [Groq Console](https://console.groq.com/keys)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
GROQ_API_KEY="your-groq-key-here"
|
||||||
|
```
|
||||||
|
|
||||||
|
### OPENROUTER_API_KEY
|
||||||
|
- **Provider**: OpenRouter (Multiple model access)
|
||||||
|
- **Required**: ✅ **Yes**
|
||||||
|
- **Models**: Access to various models through single API
|
||||||
|
- **Get Key**: [OpenRouter](https://openrouter.ai/keys)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
OPENROUTER_API_KEY="your-openrouter-key-here"
|
||||||
|
```
|
||||||
|
|
||||||
|
### AZURE_OPENAI_API_KEY
|
||||||
|
- **Provider**: Azure OpenAI Service
|
||||||
|
- **Required**: ✅ **Yes**
|
||||||
|
- **Requirements**: Also requires `AZURE_OPENAI_ENDPOINT` configuration
|
||||||
|
- **Models**: GPT models via Azure
|
||||||
|
- **Get Key**: [Azure Portal](https://portal.azure.com/)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
AZURE_OPENAI_API_KEY="your-azure-key-here"
|
||||||
|
```
|
||||||
|
|
||||||
|
### XAI_API_KEY
|
||||||
|
- **Provider**: xAI (Grok) models
|
||||||
|
- **Required**: ✅ **Yes**
|
||||||
|
- **Models**: Grok models
|
||||||
|
- **Get Key**: [xAI Console](https://console.x.ai/)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
XAI_API_KEY="your-xai-key-here"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Optional API Keys
|
||||||
|
|
||||||
|
> **Note**: These API keys are optional - providers will work without them or use alternative authentication methods.
|
||||||
|
|
||||||
|
### AWS_ACCESS_KEY_ID (Bedrock)
|
||||||
|
- **Provider**: AWS Bedrock
|
||||||
|
- **Required**: ❌ **No** (uses AWS credential chain)
|
||||||
|
- **Models**: Claude models via AWS Bedrock
|
||||||
|
- **Authentication**: Uses AWS credential chain (profiles, IAM roles, etc.)
|
||||||
|
- **Get Key**: [AWS Console](https://console.aws.amazon.com/iam/)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Optional - AWS credential chain is preferred
|
||||||
|
AWS_ACCESS_KEY_ID="your-aws-access-key"
|
||||||
|
AWS_SECRET_ACCESS_KEY="your-aws-secret-key"
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLAUDE_CODE_API_KEY
|
||||||
|
- **Provider**: Claude Code CLI
|
||||||
|
- **Required**: ❌ **No** (uses OAuth tokens)
|
||||||
|
- **Purpose**: Integration with local Claude Code CLI
|
||||||
|
- **Authentication**: Uses OAuth tokens, no API key needed
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Not typically needed
|
||||||
|
CLAUDE_CODE_API_KEY="not-usually-required"
|
||||||
|
```
|
||||||
|
|
||||||
|
### GEMINI_API_KEY
|
||||||
|
- **Provider**: Gemini CLI
|
||||||
|
- **Required**: ❌ **No** (uses OAuth authentication)
|
||||||
|
- **Purpose**: Integration with Gemini CLI
|
||||||
|
- **Authentication**: Primarily uses OAuth via CLI, API key is optional
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Optional - OAuth via CLI is preferred
|
||||||
|
GEMINI_API_KEY="your-gemini-key-here"
|
||||||
|
```
|
||||||
|
|
||||||
|
### GROK_CLI_API_KEY
|
||||||
|
- **Provider**: Grok CLI
|
||||||
|
- **Required**: ❌ **No** (can use CLI config)
|
||||||
|
- **Purpose**: Integration with Grok CLI
|
||||||
|
- **Authentication**: Can use Grok CLI's own config file
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Optional - CLI config is preferred
|
||||||
|
GROK_CLI_API_KEY="your-grok-cli-key"
|
||||||
|
```
|
||||||
|
|
||||||
|
### OLLAMA_API_KEY
|
||||||
|
- **Provider**: Ollama (Local/Remote)
|
||||||
|
- **Required**: ❌ **No** (local installation doesn't need key)
|
||||||
|
- **Purpose**: For remote Ollama servers that require authentication
|
||||||
|
- **Requirements**: Only needed for remote servers with authentication
|
||||||
|
- **Note**: Not needed for local Ollama installations
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Only needed for remote Ollama servers
|
||||||
|
OLLAMA_API_KEY="your-ollama-api-key-here"
|
||||||
|
```
|
||||||
|
|
||||||
|
### GITHUB_API_KEY
|
||||||
|
- **Provider**: GitHub (Import/Export features)
|
||||||
|
- **Format**: `ghp_...` or `github_pat_...`
|
||||||
|
- **Required**: ❌ **No** (for GitHub features only)
|
||||||
|
- **Purpose**: GitHub import/export features
|
||||||
|
- **Get Key**: [GitHub Settings](https://github.com/settings/tokens)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
GITHUB_API_KEY="ghp-your-github-key-here"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuration Methods
|
||||||
|
|
||||||
|
### Method 1: Environment File (.env)
|
||||||
|
Create a `.env` file in your project root:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Copy from .env.example
|
||||||
|
cp .env.example .env
|
||||||
|
|
||||||
|
# Edit with your keys
|
||||||
|
vim .env
|
||||||
|
```
|
||||||
|
|
||||||
|
### Method 2: System Environment Variables
|
||||||
|
```bash
|
||||||
|
export ANTHROPIC_API_KEY="your-key-here"
|
||||||
|
export PERPLEXITY_API_KEY="your-key-here"
|
||||||
|
# ... other keys
|
||||||
|
```
|
||||||
|
|
||||||
|
### Method 3: MCP Server Configuration
|
||||||
|
For Claude Code integration, configure keys in `.mcp.json`:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"mcpServers": {
|
||||||
|
"task-master-ai": {
|
||||||
|
"command": "npx",
|
||||||
|
"args": ["-y", "task-master-ai"],
|
||||||
|
"env": {
|
||||||
|
"ANTHROPIC_API_KEY": "your-key-here",
|
||||||
|
"PERPLEXITY_API_KEY": "your-key-here",
|
||||||
|
"OPENAI_API_KEY": "your-key-here"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Key Requirements
|
||||||
|
|
||||||
|
### Minimum Requirements
|
||||||
|
- **At least one** AI provider key is required
|
||||||
|
- **ANTHROPIC_API_KEY** is recommended as the primary provider
|
||||||
|
- **PERPLEXITY_API_KEY** is highly recommended for research features
|
||||||
|
|
||||||
|
### Provider-Specific Requirements
|
||||||
|
- **Azure OpenAI**: Requires both `AZURE_OPENAI_API_KEY` and `AZURE_OPENAI_ENDPOINT` configuration
|
||||||
|
- **Google Vertex**: Requires `VERTEX_PROJECT_ID` and `VERTEX_LOCATION` environment variables
|
||||||
|
- **AWS Bedrock**: Uses AWS credential chain (profiles, IAM roles, etc.) instead of API keys
|
||||||
|
- **Ollama**: Only needs API key for remote servers with authentication
|
||||||
|
- **CLI Providers**: Gemini CLI, Grok CLI, and Claude Code use OAuth/CLI config instead of API keys
|
||||||
|
|
||||||
|
## Model Configuration
|
||||||
|
|
||||||
|
After setting up API keys, configure which models to use:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Interactive model setup
|
||||||
|
task-master models --setup
|
||||||
|
|
||||||
|
# Set specific models
|
||||||
|
task-master models --set-main claude-3-5-sonnet-20241022
|
||||||
|
task-master models --set-research perplexity-llama-3.1-sonar-large-128k-online
|
||||||
|
task-master models --set-fallback gpt-4o-mini
|
||||||
|
```
|
||||||
|
|
||||||
|
## Security Best Practices
|
||||||
|
|
||||||
|
1. **Never commit API keys** to version control
|
||||||
|
2. **Use .env files** and add them to `.gitignore`
|
||||||
|
3. **Rotate keys regularly** especially if compromised
|
||||||
|
4. **Use minimal permissions** for service accounts
|
||||||
|
5. **Monitor usage** to detect unauthorized access
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Key Validation
|
||||||
|
```bash
|
||||||
|
# Check if keys are properly configured
|
||||||
|
task-master models
|
||||||
|
|
||||||
|
# Test specific provider
|
||||||
|
task-master add-task --prompt="test task" --model=claude-3-5-sonnet-20241022
|
||||||
|
```
|
||||||
|
|
||||||
|
### Common Issues
|
||||||
|
- **Invalid key format**: Check the expected format for each provider
|
||||||
|
- **Insufficient permissions**: Ensure keys have necessary API access
|
||||||
|
- **Rate limits**: Some providers have usage limits
|
||||||
|
- **Regional restrictions**: Some models may not be available in all regions
|
||||||
|
|
||||||
|
### Getting Help
|
||||||
|
If you encounter issues with API key configuration:
|
||||||
|
- Check the [FAQ](/getting-started/faq) for common solutions
|
||||||
|
- Join our [Discord community](https://discord.gg/fWJkU7rf) for support
|
||||||
|
- Report issues on [GitHub](https://github.com/eyaltoledano/claude-task-master/issues)
|
||||||
@@ -108,5 +108,5 @@ You don’t need to configure everything up front. Most settings can be left as
|
|||||||
</Accordion>
|
</Accordion>
|
||||||
|
|
||||||
<Note>
|
<Note>
|
||||||
For advanced configuration options and detailed customization, see our [Advanced Configuration Guide](/docs/best-practices/configuration-advanced) page.
|
For advanced configuration options and detailed customization, see our [Advanced Configuration Guide](/best-practices/configuration-advanced) page.
|
||||||
</Note>
|
</Note>
|
||||||
@@ -56,4 +56,4 @@ If you ran into problems and had to debug errors you can create new rules as you
|
|||||||
|
|
||||||
By now you have all you need to get started executing code faster and smarter with Task Master.
|
By now you have all you need to get started executing code faster and smarter with Task Master.
|
||||||
|
|
||||||
If you have any questions please check out [Frequently Asked Questions](/docs/getting-started/faq)
|
If you have any questions please check out [Frequently Asked Questions](/getting-started/faq)
|
||||||
|
|||||||
@@ -30,6 +30,19 @@ cursor://anysphere.cursor-deeplink/mcp/install?name=taskmaster-ai&config=eyJjb21
|
|||||||
```
|
```
|
||||||
|
|
||||||
> **Note:** After clicking the link, you'll still need to add your API keys to the configuration. The link installs the MCP server with placeholder keys that you'll need to replace with your actual API keys.
|
> **Note:** After clicking the link, you'll still need to add your API keys to the configuration. The link installs the MCP server with placeholder keys that you'll need to replace with your actual API keys.
|
||||||
|
|
||||||
|
### Claude Code Quick Install
|
||||||
|
|
||||||
|
For Claude Code users:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
claude mcp add taskmaster-ai -- npx -y task-master-ai
|
||||||
|
```
|
||||||
|
|
||||||
|
Don't forget to add your API keys to the configuration:
|
||||||
|
- in the root .env of your Project
|
||||||
|
- in the "env" section of your mcp config for taskmaster-ai
|
||||||
|
|
||||||
</Accordion>
|
</Accordion>
|
||||||
## Installation Options
|
## Installation Options
|
||||||
|
|
||||||
|
|||||||
@@ -6,13 +6,13 @@ sidebarTitle: "Quick Start"
|
|||||||
This guide is for new users who want to start using Task Master with minimal setup time.
|
This guide is for new users who want to start using Task Master with minimal setup time.
|
||||||
|
|
||||||
It covers:
|
It covers:
|
||||||
- [Requirements](/docs/getting-started/quick-start/requirements): You will need Node.js and an AI model API Key.
|
- [Requirements](/getting-started/quick-start/requirements): You will need Node.js and an AI model API Key.
|
||||||
- [Installation](/docs/getting-started/quick-start/installation): How to Install Task Master.
|
- [Installation](/getting-started/quick-start/installation): How to Install Task Master.
|
||||||
- [Configuration](/docs/getting-started/quick-start/configuration-quick): Setting up your API Key, MCP, and more.
|
- [Configuration](/getting-started/quick-start/configuration-quick): Setting up your API Key, MCP, and more.
|
||||||
- [PRD](/docs/getting-started/quick-start/prd-quick): Writing and parsing your first PRD.
|
- [PRD](/getting-started/quick-start/prd-quick): Writing and parsing your first PRD.
|
||||||
- [Task Setup](/docs/getting-started/quick-start/tasks-quick): Preparing your tasks for execution.
|
- [Task Setup](/getting-started/quick-start/tasks-quick): Preparing your tasks for execution.
|
||||||
- [Executing Tasks](/docs/getting-started/quick-start/execute-quick): Using Task Master to execute tasks.
|
- [Executing Tasks](/getting-started/quick-start/execute-quick): Using Task Master to execute tasks.
|
||||||
- [Rules & Context](/docs/getting-started/quick-start/rules-quick): Learn how and why to build context in your project over time.
|
- [Rules & Context](/getting-started/quick-start/rules-quick): Learn how and why to build context in your project over time.
|
||||||
|
|
||||||
<Tip>
|
<Tip>
|
||||||
By the end of this guide, you'll have everything you need to begin working productively with Task Master.
|
By the end of this guide, you'll have everything you need to begin working productively with Task Master.
|
||||||
|
|||||||
@@ -61,9 +61,25 @@ Task Master can provide a complexity report which can be helpful to read before
|
|||||||
Can you analyze the complexity of our tasks to help me understand which ones need to be broken down further?
|
Can you analyze the complexity of our tasks to help me understand which ones need to be broken down further?
|
||||||
```
|
```
|
||||||
|
|
||||||
|
The agent will use the `analyze_project_complexity` MCP tool, or you can run it directly with the CLI command:
|
||||||
|
```bash
|
||||||
|
task-master analyze-complexity
|
||||||
|
```
|
||||||
|
|
||||||
|
For more comprehensive analysis using your configured research model, you can use:
|
||||||
|
```bash
|
||||||
|
task-master analyze-complexity --research
|
||||||
|
```
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
The `--research` flag uses whatever research model you have configured in `.taskmaster/config.json` (configurable via `task-master models --setup`) for research-backed complexity analysis, providing more informed recommendations.
|
||||||
|
</Tip>
|
||||||
|
|
||||||
You can view the report in a friendly table using:
|
You can view the report in a friendly table using:
|
||||||
```
|
```
|
||||||
Can you show me the complexity report in a more readable format?
|
Can you show me the complexity report in a more readable format?
|
||||||
```
|
```
|
||||||
|
|
||||||
<Check>Now you are ready to begin [executing tasks](/docs/getting-started/quick-start/execute-quick)</Check>
|
For more detailed CLI options, see the [Analyze Task Complexity](/capabilities/cli-root-commands#analyze-task-complexity) section.
|
||||||
|
|
||||||
|
<Check>Now you are ready to begin [executing tasks](/getting-started/quick-start/execute-quick)</Check>
|
||||||
@@ -4,7 +4,7 @@ Welcome to v1 of the Task Master Docs. Expect weekly updates as we expand and re
|
|||||||
|
|
||||||
We've organized the docs into three sections depending on your experience level and goals:
|
We've organized the docs into three sections depending on your experience level and goals:
|
||||||
|
|
||||||
### Getting Started - Jump in to [Quick Start](/docs/getting-started/quick-start)
|
### Getting Started - Jump in to [Quick Start](/getting-started/quick-start)
|
||||||
Designed for first-time users. Get set up, create your first PRD, and run your first task.
|
Designed for first-time users. Get set up, create your first PRD, and run your first task.
|
||||||
|
|
||||||
### Best Practices
|
### Best Practices
|
||||||
|
|||||||
@@ -1,5 +1,12 @@
|
|||||||
# Change Log
|
# Change Log
|
||||||
|
|
||||||
|
## 0.25.5-rc.0
|
||||||
|
|
||||||
|
### Patch Changes
|
||||||
|
|
||||||
|
- Updated dependencies [[`aaacc3d`](https://github.com/eyaltoledano/claude-task-master/commit/aaacc3dae36247b4de72b2d2697f49e5df6d01e3), [`0079b7d`](https://github.com/eyaltoledano/claude-task-master/commit/0079b7defdad550811f704c470fdd01955d91d4d), [`0b2c696`](https://github.com/eyaltoledano/claude-task-master/commit/0b2c6967c4605c33a100cff16f6ce8ff09ad06f0), [`18aa416`](https://github.com/eyaltoledano/claude-task-master/commit/18aa416035f44345bde1c7321490345733a5d042), [`18aa416`](https://github.com/eyaltoledano/claude-task-master/commit/18aa416035f44345bde1c7321490345733a5d042), [`738ec51`](https://github.com/eyaltoledano/claude-task-master/commit/738ec51c049a295a12839b2dfddaf05e23b8fede), [`d67b81d`](https://github.com/eyaltoledano/claude-task-master/commit/d67b81d25ddd927fabb6f5deb368e8993519c541), [`b5fe723`](https://github.com/eyaltoledano/claude-task-master/commit/b5fe723f8ead928e9f2dbde13b833ee70ac3382d), [`2b69936`](https://github.com/eyaltoledano/claude-task-master/commit/2b69936ee7b34346d6de5175af20e077359e2e2a), [`986ac11`](https://github.com/eyaltoledano/claude-task-master/commit/986ac117aee00bcd3e6830a0f76e1ad6d10e0bca), [`20004a3`](https://github.com/eyaltoledano/claude-task-master/commit/20004a39ea848f747e1ff48981bfe176554e4055)]:
|
||||||
|
- task-master-ai@0.28.0-rc.0
|
||||||
|
|
||||||
## 0.25.4
|
## 0.25.4
|
||||||
|
|
||||||
### Patch Changes
|
### Patch Changes
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
"private": true,
|
"private": true,
|
||||||
"displayName": "TaskMaster",
|
"displayName": "TaskMaster",
|
||||||
"description": "A visual Kanban board interface for TaskMaster projects in VS Code",
|
"description": "A visual Kanban board interface for TaskMaster projects in VS Code",
|
||||||
"version": "0.25.4",
|
"version": "0.25.5-rc.0",
|
||||||
"publisher": "Hamster",
|
"publisher": "Hamster",
|
||||||
"icon": "assets/icon.png",
|
"icon": "assets/icon.png",
|
||||||
"engines": {
|
"engines": {
|
||||||
@@ -240,7 +240,7 @@
|
|||||||
"check-types": "tsc --noEmit"
|
"check-types": "tsc --noEmit"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"task-master-ai": "0.27.3"
|
"task-master-ai": "*"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@dnd-kit/core": "^6.3.1",
|
"@dnd-kit/core": "^6.3.1",
|
||||||
@@ -276,7 +276,8 @@
|
|||||||
"react-dom": "^19.0.0",
|
"react-dom": "^19.0.0",
|
||||||
"tailwind-merge": "^3.3.1",
|
"tailwind-merge": "^3.3.1",
|
||||||
"tailwindcss": "4.1.11",
|
"tailwindcss": "4.1.11",
|
||||||
"typescript": "^5.7.3"
|
"typescript": "^5.9.2",
|
||||||
|
"@tm/core": "*"
|
||||||
},
|
},
|
||||||
"overrides": {
|
"overrides": {
|
||||||
"glob@<8": "^10.4.5",
|
"glob@<8": "^10.4.5",
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
"name": "task-master-hamster",
|
"name": "task-master-hamster",
|
||||||
"displayName": "Taskmaster AI",
|
"displayName": "Taskmaster AI",
|
||||||
"description": "A visual Kanban board interface for Taskmaster projects in VS Code",
|
"description": "A visual Kanban board interface for Taskmaster projects in VS Code",
|
||||||
"version": "0.23.1",
|
"version": "0.25.3",
|
||||||
"publisher": "Hamster",
|
"publisher": "Hamster",
|
||||||
"icon": "assets/icon.png",
|
"icon": "assets/icon.png",
|
||||||
"engines": {
|
"engines": {
|
||||||
|
|||||||
@@ -5,7 +5,6 @@
|
|||||||
"outDir": "out",
|
"outDir": "out",
|
||||||
"lib": ["ES2022", "DOM"],
|
"lib": ["ES2022", "DOM"],
|
||||||
"sourceMap": true,
|
"sourceMap": true,
|
||||||
"rootDir": "src",
|
|
||||||
"strict": true /* enable all strict type-checking options */,
|
"strict": true /* enable all strict type-checking options */,
|
||||||
"moduleResolution": "Node",
|
"moduleResolution": "Node",
|
||||||
"esModuleInterop": true,
|
"esModuleInterop": true,
|
||||||
@@ -21,8 +20,10 @@
|
|||||||
"@/*": ["./src/*"],
|
"@/*": ["./src/*"],
|
||||||
"@/components/*": ["./src/components/*"],
|
"@/components/*": ["./src/components/*"],
|
||||||
"@/lib/*": ["./src/lib/*"],
|
"@/lib/*": ["./src/lib/*"],
|
||||||
"@tm/core": ["../core/src"]
|
"@tm/core": ["../../packages/tm-core/src/index.ts"],
|
||||||
|
"@tm/core/*": ["../../packages/tm-core/src/*"]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"include": ["src/**/*"],
|
||||||
"exclude": ["node_modules", ".vscode-test", "out", "dist"]
|
"exclude": ["node_modules", ".vscode-test", "out", "dist"]
|
||||||
}
|
}
|
||||||
|
|||||||
231
docs/claude-code-integration.md
Normal file
231
docs/claude-code-integration.md
Normal file
@@ -0,0 +1,231 @@
|
|||||||
|
# TODO: Move to apps/docs inside our documentation website
|
||||||
|
|
||||||
|
# Claude Code Integration Guide
|
||||||
|
|
||||||
|
This guide covers how to use Task Master with Claude Code AI SDK integration for enhanced AI-powered development workflows.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Claude Code integration allows Task Master to leverage the Claude Code CLI for AI operations without requiring direct API keys. The integration uses OAuth tokens managed by the Claude Code CLI itself.
|
||||||
|
|
||||||
|
## Authentication Setup
|
||||||
|
|
||||||
|
The Claude Code provider uses token authentication managed by the Claude Code CLI.
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
1. **Install Claude Code CLI** (if not already installed):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Installation method depends on your system
|
||||||
|
# Follow Claude Code documentation for installation
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Set up OAuth token** using Claude Code CLI:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
claude setup-token
|
||||||
|
```
|
||||||
|
|
||||||
|
This command will:
|
||||||
|
- Guide you through OAuth authentication
|
||||||
|
- Store the token securely for CLI usage
|
||||||
|
- Enable Task Master to use Claude Code without manual API key configuration
|
||||||
|
|
||||||
|
### Authentication Priority
|
||||||
|
|
||||||
|
Task Master will attempt authentication in this order:
|
||||||
|
|
||||||
|
1. **Environment Variable** (optional): `CLAUDE_CODE_OAUTH_TOKEN`
|
||||||
|
- Useful for CI/CD environments or when you want to override the default token
|
||||||
|
- Not required if you've set up the CLI token
|
||||||
|
|
||||||
|
2. **Claude Code CLI Token** (recommended): Token managed by `claude setup-token`
|
||||||
|
- Automatically used when available
|
||||||
|
- Most convenient for local development
|
||||||
|
|
||||||
|
3. **Fallback**: Error if neither is available
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### Basic Configuration
|
||||||
|
|
||||||
|
Add Claude Code to your Task Master configuration:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// In your .taskmaster/config.json or via task-master models command
|
||||||
|
{
|
||||||
|
"models": {
|
||||||
|
"main": "claude-code:sonnet", // Use Claude Code with Sonnet
|
||||||
|
"research": "perplexity-llama-3.1-sonar-large-128k-online",
|
||||||
|
"fallback": "claude-code:opus" // Use Claude Code with Opus as fallback
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Supported Models
|
||||||
|
|
||||||
|
- `claude-code:sonnet` - Claude 3.5 Sonnet via Claude Code CLI
|
||||||
|
- `claude-code:opus` - Claude 3 Opus via Claude Code CLI
|
||||||
|
|
||||||
|
### Environment Variables (Optional)
|
||||||
|
|
||||||
|
While not required, you can optionally set:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export CLAUDE_CODE_OAUTH_TOKEN="your_oauth_token_here"
|
||||||
|
```
|
||||||
|
|
||||||
|
This is only needed in specific scenarios like:
|
||||||
|
|
||||||
|
- CI/CD pipelines
|
||||||
|
- Docker containers
|
||||||
|
- When you want to use a different token than the CLI default
|
||||||
|
|
||||||
|
## Usage Examples
|
||||||
|
|
||||||
|
### Basic Task Operations
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Use Claude Code for task operations
|
||||||
|
task-master add-task --prompt="Implement user authentication system" --research
|
||||||
|
task-master expand --id=1 --research
|
||||||
|
task-master update-task --id=1.1 --prompt="Add JWT token validation"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Model Configuration Commands
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Set Claude Code as main model
|
||||||
|
task-master models --set-main claude-code:sonnet
|
||||||
|
|
||||||
|
# Use interactive setup
|
||||||
|
task-master models --setup
|
||||||
|
# Then select "claude-code" from the provider list
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Common Issues
|
||||||
|
|
||||||
|
#### 1. "Claude Code CLI not available" Error
|
||||||
|
|
||||||
|
**Problem**: Task Master cannot connect to Claude Code CLI.
|
||||||
|
|
||||||
|
**Solutions**:
|
||||||
|
|
||||||
|
- Ensure Claude Code CLI is installed and in your PATH
|
||||||
|
- Run `claude setup-token` to configure authentication
|
||||||
|
- Verify Claude Code CLI works: `claude --help`
|
||||||
|
|
||||||
|
#### 2. Authentication Failures
|
||||||
|
|
||||||
|
**Problem**: Token authentication is failing.
|
||||||
|
|
||||||
|
**Solutions**:
|
||||||
|
|
||||||
|
- Re-run `claude setup-token` to refresh your OAuth token
|
||||||
|
- Check if your token has expired
|
||||||
|
- Verify Claude Code CLI can authenticate: try a simple `claude` command
|
||||||
|
|
||||||
|
#### 3. Model Not Available
|
||||||
|
|
||||||
|
**Problem**: Specified Claude Code model is not supported.
|
||||||
|
|
||||||
|
**Solutions**:
|
||||||
|
|
||||||
|
- Use supported models: `sonnet` or `opus`
|
||||||
|
- Check model availability: `task-master models --list`
|
||||||
|
- Verify your Claude Code CLI has access to the requested model
|
||||||
|
|
||||||
|
### Debug Steps
|
||||||
|
|
||||||
|
1. **Test Claude Code CLI directly**:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
claude --help
|
||||||
|
# Should show help without errors
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Test authentication**:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
claude setup-token --verify
|
||||||
|
# Should confirm token is valid
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Test Task Master integration**:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master models --test claude-code:sonnet
|
||||||
|
# Should successfully connect and test the model
|
||||||
|
```
|
||||||
|
|
||||||
|
4. **Check logs**:
|
||||||
|
- Task Master logs will show detailed error messages
|
||||||
|
- Use `--verbose` flag for more detailed output
|
||||||
|
|
||||||
|
### Environment-Specific Configuration
|
||||||
|
|
||||||
|
#### Docker/Containers
|
||||||
|
|
||||||
|
When running in Docker, you'll need to:
|
||||||
|
|
||||||
|
1. Install Claude Code CLI in your container
|
||||||
|
2. Set up authentication via environment variable:
|
||||||
|
|
||||||
|
```dockerfile
|
||||||
|
ENV CLAUDE_CODE_OAUTH_TOKEN="your_token_here"
|
||||||
|
```
|
||||||
|
|
||||||
|
#### CI/CD Pipelines
|
||||||
|
|
||||||
|
For automated environments:
|
||||||
|
|
||||||
|
1. Set up a service account token or use environment variables
|
||||||
|
2. Ensure Claude Code CLI is available in the pipeline environment
|
||||||
|
3. Configure authentication before running Task Master commands
|
||||||
|
|
||||||
|
## Integration with AI SDK
|
||||||
|
|
||||||
|
Task Master's Claude Code integration uses the official `ai-sdk-provider-claude-code` package, providing:
|
||||||
|
|
||||||
|
- **Streaming Support**: Real-time token streaming for interactive experiences
|
||||||
|
- **Full AI SDK Compatibility**: Works with generateText, streamText, and other AI SDK functions
|
||||||
|
- **Automatic Error Handling**: Graceful degradation when Claude Code is unavailable
|
||||||
|
- **Type Safety**: Full TypeScript support with proper type definitions
|
||||||
|
|
||||||
|
### Example AI SDK Usage
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
import { generateText } from 'ai';
|
||||||
|
import { ClaudeCodeProvider } from './src/ai-providers/claude-code.js';
|
||||||
|
|
||||||
|
const provider = new ClaudeCodeProvider();
|
||||||
|
const client = provider.getClient();
|
||||||
|
|
||||||
|
const result = await generateText({
|
||||||
|
model: client('sonnet'),
|
||||||
|
messages: [
|
||||||
|
{ role: 'user', content: 'Hello Claude!' }
|
||||||
|
]
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(result.text);
|
||||||
|
```
|
||||||
|
|
||||||
|
## Security Notes
|
||||||
|
|
||||||
|
- OAuth tokens are managed securely by Claude Code CLI
|
||||||
|
- No API keys need to be stored in your project files
|
||||||
|
- Tokens are automatically refreshed by the Claude Code CLI
|
||||||
|
- Environment variables should only be used in secure environments
|
||||||
|
|
||||||
|
## Getting Help
|
||||||
|
|
||||||
|
If you encounter issues:
|
||||||
|
|
||||||
|
1. Check the Claude Code CLI documentation
|
||||||
|
2. Verify your authentication setup with `claude setup-token --verify`
|
||||||
|
3. Review Task Master logs for detailed error messages
|
||||||
|
4. Open an issue with both Task Master and Claude Code version information
|
||||||
@@ -383,6 +383,12 @@ task-master models --set-main=my-local-llama --ollama
|
|||||||
# Set a custom OpenRouter model for the research role
|
# Set a custom OpenRouter model for the research role
|
||||||
task-master models --set-research=google/gemini-pro --openrouter
|
task-master models --set-research=google/gemini-pro --openrouter
|
||||||
|
|
||||||
|
# Set Codex CLI model for the main role (uses ChatGPT subscription via OAuth)
|
||||||
|
task-master models --set-main=gpt-5-codex --codex-cli
|
||||||
|
|
||||||
|
# Set Codex CLI model for the fallback role
|
||||||
|
task-master models --set-fallback=gpt-5 --codex-cli
|
||||||
|
|
||||||
# Run interactive setup to configure models, including custom ones
|
# Run interactive setup to configure models, including custom ones
|
||||||
task-master models --setup
|
task-master models --setup
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -235,6 +235,60 @@ node scripts/init.js
|
|||||||
- "MCP provider requires session context" → Ensure running in MCP environment
|
- "MCP provider requires session context" → Ensure running in MCP environment
|
||||||
- See the [MCP Provider Guide](./mcp-provider-guide.md) for detailed troubleshooting
|
- See the [MCP Provider Guide](./mcp-provider-guide.md) for detailed troubleshooting
|
||||||
|
|
||||||
|
### MCP Timeout Configuration
|
||||||
|
|
||||||
|
Long-running AI operations in taskmaster-ai can exceed the default 60-second MCP timeout. Operations like `parse_prd`, `expand_task`, `research`, and `analyze_project_complexity` may take 2-5 minutes to complete.
|
||||||
|
|
||||||
|
#### Adding Timeout Configuration
|
||||||
|
|
||||||
|
Add a `timeout` parameter to your MCP configuration to extend the timeout limit. The timeout configuration works identically across MCP clients including Cursor, Windsurf, and RooCode:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"mcpServers": {
|
||||||
|
"task-master-ai": {
|
||||||
|
"command": "npx",
|
||||||
|
"args": ["-y", "--package=task-master-ai", "task-master-ai"],
|
||||||
|
"timeout": 300,
|
||||||
|
"env": {
|
||||||
|
"ANTHROPIC_API_KEY": "your-anthropic-api-key"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Configuration Details:**
|
||||||
|
- **`timeout: 300`** - Sets timeout to 300 seconds (5 minutes)
|
||||||
|
- **Value range**: 1-3600 seconds (1 second to 1 hour)
|
||||||
|
- **Recommended**: 300 seconds provides sufficient time for most AI operations
|
||||||
|
- **Format**: Integer value in seconds (not milliseconds)
|
||||||
|
|
||||||
|
#### Automatic Setup
|
||||||
|
|
||||||
|
When adding taskmaster rules for supported editors, the timeout configuration is automatically included:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Automatically includes timeout configuration
|
||||||
|
task-master rules add cursor
|
||||||
|
task-master rules add roo
|
||||||
|
task-master rules add windsurf
|
||||||
|
task-master rules add vscode
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Troubleshooting Timeouts
|
||||||
|
|
||||||
|
If you're still experiencing timeout errors:
|
||||||
|
|
||||||
|
1. **Verify configuration**: Check that `timeout: 300` is present in your MCP config
|
||||||
|
2. **Restart editor**: Restart your editor after making configuration changes
|
||||||
|
3. **Increase timeout**: For very complex operations, try `timeout: 600` (10 minutes)
|
||||||
|
4. **Check API keys**: Ensure required API keys are properly configured
|
||||||
|
|
||||||
|
**Expected behavior:**
|
||||||
|
- **Before fix**: Operations fail after 60 seconds with `MCP request timed out after 60000ms`
|
||||||
|
- **After fix**: Operations complete successfully within the configured timeout limit
|
||||||
|
|
||||||
### Google Vertex AI Configuration
|
### Google Vertex AI Configuration
|
||||||
|
|
||||||
Google Vertex AI is Google Cloud's enterprise AI platform and requires specific configuration:
|
Google Vertex AI is Google Cloud's enterprise AI platform and requires specific configuration:
|
||||||
@@ -375,3 +429,153 @@ Azure OpenAI provides enterprise-grade OpenAI models through Microsoft's Azure c
|
|||||||
- Verify the deployment name matches your configuration exactly (case-sensitive)
|
- Verify the deployment name matches your configuration exactly (case-sensitive)
|
||||||
- Ensure the model deployment is in a "Succeeded" state in Azure OpenAI Studio
|
- Ensure the model deployment is in a "Succeeded" state in Azure OpenAI Studio
|
||||||
- Ensure youre not getting rate limited by `maxTokens` maintain appropriate Tokens per Minute Rate Limit (TPM) in your deployment.
|
- Ensure youre not getting rate limited by `maxTokens` maintain appropriate Tokens per Minute Rate Limit (TPM) in your deployment.
|
||||||
|
|
||||||
|
### Codex CLI Provider
|
||||||
|
|
||||||
|
The Codex CLI provider integrates Task Master with OpenAI's Codex CLI, allowing you to use ChatGPT subscription models via OAuth authentication.
|
||||||
|
|
||||||
|
1. **Prerequisites**:
|
||||||
|
- Node.js >= 18
|
||||||
|
- Codex CLI >= 0.42.0 (>= 0.44.0 recommended)
|
||||||
|
- ChatGPT subscription: Plus, Pro, Business, Edu, or Enterprise (for OAuth access to GPT-5 models)
|
||||||
|
|
||||||
|
2. **Installation**:
|
||||||
|
```bash
|
||||||
|
npm install -g @openai/codex
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Authentication** (OAuth - Primary Method):
|
||||||
|
```bash
|
||||||
|
codex login
|
||||||
|
```
|
||||||
|
This will open a browser window for OAuth authentication with your ChatGPT account. Once authenticated, Task Master will automatically use these credentials.
|
||||||
|
|
||||||
|
4. **Optional API Key Method**:
|
||||||
|
While OAuth is the primary and recommended authentication method, you can optionally set an OpenAI API key:
|
||||||
|
```bash
|
||||||
|
# In .env file
|
||||||
|
OPENAI_API_KEY=sk-your-openai-api-key-here
|
||||||
|
```
|
||||||
|
**Note**: The API key will only be injected if explicitly provided. OAuth is always preferred.
|
||||||
|
|
||||||
|
5. **Configuration**:
|
||||||
|
```json
|
||||||
|
// In .taskmaster/config.json
|
||||||
|
{
|
||||||
|
"models": {
|
||||||
|
"main": {
|
||||||
|
"provider": "codex-cli",
|
||||||
|
"modelId": "gpt-5-codex",
|
||||||
|
"maxTokens": 128000,
|
||||||
|
"temperature": 0.2
|
||||||
|
},
|
||||||
|
"fallback": {
|
||||||
|
"provider": "codex-cli",
|
||||||
|
"modelId": "gpt-5",
|
||||||
|
"maxTokens": 128000,
|
||||||
|
"temperature": 0.2
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"codexCli": {
|
||||||
|
"allowNpx": true,
|
||||||
|
"skipGitRepoCheck": true,
|
||||||
|
"approvalMode": "on-failure",
|
||||||
|
"sandboxMode": "workspace-write"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
6. **Available Models**:
|
||||||
|
- `gpt-5` - Latest GPT-5 model (272K max input, 128K max output)
|
||||||
|
- `gpt-5-codex` - GPT-5 optimized for agentic software engineering (272K max input, 128K max output)
|
||||||
|
|
||||||
|
7. **Codex CLI Settings (`codexCli` section)**:
|
||||||
|
|
||||||
|
The `codexCli` section in your configuration file supports the following options:
|
||||||
|
|
||||||
|
- **`allowNpx`** (boolean, default: `false`): Allow fallback to `npx @openai/codex` if CLI not found on PATH
|
||||||
|
- **`skipGitRepoCheck`** (boolean, default: `false`): Skip git repository safety check (recommended for CI/non-repo usage)
|
||||||
|
- **`approvalMode`** (string): Control command execution approval
|
||||||
|
- `"untrusted"`: Require approval for all commands
|
||||||
|
- `"on-failure"`: Only require approval after a command fails (default)
|
||||||
|
- `"on-request"`: Approve only when explicitly requested
|
||||||
|
- `"never"`: Never require approval (not recommended)
|
||||||
|
- **`sandboxMode`** (string): Control filesystem access
|
||||||
|
- `"read-only"`: Read-only access
|
||||||
|
- `"workspace-write"`: Allow writes to workspace (default)
|
||||||
|
- `"danger-full-access"`: Full filesystem access (use with caution)
|
||||||
|
- **`codexPath`** (string, optional): Custom path to codex CLI executable
|
||||||
|
- **`cwd`** (string, optional): Working directory for Codex CLI execution
|
||||||
|
- **`fullAuto`** (boolean, optional): Fully automatic mode (equivalent to `--full-auto` flag)
|
||||||
|
- **`dangerouslyBypassApprovalsAndSandbox`** (boolean, optional): Bypass all safety checks (dangerous!)
|
||||||
|
- **`color`** (string, optional): Color handling - `"always"`, `"never"`, or `"auto"`
|
||||||
|
- **`outputLastMessageFile`** (string, optional): Write last agent message to specified file
|
||||||
|
- **`verbose`** (boolean, optional): Enable verbose logging
|
||||||
|
- **`env`** (object, optional): Additional environment variables for Codex CLI
|
||||||
|
|
||||||
|
8. **Command-Specific Settings** (optional):
|
||||||
|
You can override settings for specific Task Master commands:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"codexCli": {
|
||||||
|
"allowNpx": true,
|
||||||
|
"approvalMode": "on-failure",
|
||||||
|
"commandSpecific": {
|
||||||
|
"parse-prd": {
|
||||||
|
"approvalMode": "never",
|
||||||
|
"verbose": true
|
||||||
|
},
|
||||||
|
"expand": {
|
||||||
|
"sandboxMode": "read-only"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
9. **Codebase Features**:
|
||||||
|
The Codex CLI provider is codebase-capable, meaning it can analyze and interact with your project files. Codebase analysis features are automatically enabled when using `codex-cli` as your provider and `enableCodebaseAnalysis` is set to `true` in your global configuration (default).
|
||||||
|
|
||||||
|
10. **Setup Commands**:
|
||||||
|
```bash
|
||||||
|
# Set Codex CLI for main role
|
||||||
|
task-master models --set-main gpt-5-codex --codex-cli
|
||||||
|
|
||||||
|
# Set Codex CLI for fallback role
|
||||||
|
task-master models --set-fallback gpt-5 --codex-cli
|
||||||
|
|
||||||
|
# Verify configuration
|
||||||
|
task-master models
|
||||||
|
```
|
||||||
|
|
||||||
|
11. **Troubleshooting**:
|
||||||
|
|
||||||
|
**"codex: command not found" error:**
|
||||||
|
- Install Codex CLI globally: `npm install -g @openai/codex`
|
||||||
|
- Verify installation: `codex --version`
|
||||||
|
- Alternatively, enable `allowNpx: true` in your codexCli configuration
|
||||||
|
|
||||||
|
**"Not logged in" errors:**
|
||||||
|
- Run `codex login` to authenticate with your ChatGPT account
|
||||||
|
- Verify authentication status: `codex` (opens interactive CLI)
|
||||||
|
|
||||||
|
**"Old version" warnings:**
|
||||||
|
- Check version: `codex --version`
|
||||||
|
- Upgrade: `npm install -g @openai/codex@latest`
|
||||||
|
- Minimum version: 0.42.0, recommended: >= 0.44.0
|
||||||
|
|
||||||
|
**"Model not available" errors:**
|
||||||
|
- Only `gpt-5` and `gpt-5-codex` are available via OAuth subscription
|
||||||
|
- Verify your ChatGPT subscription is active
|
||||||
|
- For other OpenAI models, use the standard `openai` provider with an API key
|
||||||
|
|
||||||
|
**API key not being used:**
|
||||||
|
- API key is only injected when explicitly provided
|
||||||
|
- OAuth authentication is always preferred
|
||||||
|
- If you want to use an API key, ensure `OPENAI_API_KEY` is set in your `.env` file
|
||||||
|
|
||||||
|
12. **Important Notes**:
|
||||||
|
- OAuth subscription required for model access (no API key needed for basic operation)
|
||||||
|
- Limited to OAuth-available models only (`gpt-5` and `gpt-5-codex`)
|
||||||
|
- Pricing information is not available for OAuth models (shows as "Unknown" in cost calculations)
|
||||||
|
- See [Codex CLI Provider Documentation](./providers/codex-cli.md) for more details
|
||||||
|
|||||||
463
docs/examples/codex-cli-usage.md
Normal file
463
docs/examples/codex-cli-usage.md
Normal file
@@ -0,0 +1,463 @@
|
|||||||
|
# Codex CLI Provider Usage Examples
|
||||||
|
|
||||||
|
This guide provides practical examples of using Task Master with the Codex CLI provider.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
Before using these examples, ensure you have:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Codex CLI installed
|
||||||
|
npm install -g @openai/codex
|
||||||
|
|
||||||
|
# 2. Authenticated with ChatGPT
|
||||||
|
codex login
|
||||||
|
|
||||||
|
# 3. Codex CLI configured as your provider
|
||||||
|
task-master models --set-main gpt-5-codex --codex-cli
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example 1: Basic Task Creation
|
||||||
|
|
||||||
|
Use Codex CLI to create tasks from a simple description:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Add a task with AI-powered enhancement
|
||||||
|
task-master add-task --prompt="Implement user authentication with JWT" --research
|
||||||
|
```
|
||||||
|
|
||||||
|
**What happens**:
|
||||||
|
1. Task Master sends your prompt to GPT-5-Codex via the CLI
|
||||||
|
2. The AI analyzes your request and generates a detailed task
|
||||||
|
3. The task is added to your `.taskmaster/tasks/tasks.json`
|
||||||
|
4. OAuth credentials are automatically used (no API key needed)
|
||||||
|
|
||||||
|
## Example 2: Parsing a Product Requirements Document
|
||||||
|
|
||||||
|
Create a comprehensive task list from a PRD:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create your PRD
|
||||||
|
cat > my-feature.txt <<EOF
|
||||||
|
# User Profile Feature
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
1. Users can view their profile
|
||||||
|
2. Users can edit their information
|
||||||
|
3. Profile pictures can be uploaded
|
||||||
|
4. Email verification required
|
||||||
|
|
||||||
|
## Technical Constraints
|
||||||
|
- Use React for frontend
|
||||||
|
- Node.js/Express backend
|
||||||
|
- PostgreSQL database
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Parse with Codex CLI
|
||||||
|
task-master parse-prd my-feature.txt --num-tasks 12
|
||||||
|
```
|
||||||
|
|
||||||
|
**What happens**:
|
||||||
|
1. GPT-5-Codex reads and analyzes your PRD
|
||||||
|
2. Generates structured tasks with dependencies
|
||||||
|
3. Creates subtasks for complex items
|
||||||
|
4. Saves everything to `.taskmaster/tasks/`
|
||||||
|
|
||||||
|
## Example 3: Expanding Tasks with Research
|
||||||
|
|
||||||
|
Break down a complex task into detailed subtasks:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# First, show your current tasks
|
||||||
|
task-master list
|
||||||
|
|
||||||
|
# Expand a specific task (e.g., task 1.2)
|
||||||
|
task-master expand --id=1.2 --research --force
|
||||||
|
```
|
||||||
|
|
||||||
|
**What happens**:
|
||||||
|
1. Codex CLI uses GPT-5 for research-level analysis
|
||||||
|
2. Breaks down the task into logical subtasks
|
||||||
|
3. Adds implementation details and test strategies
|
||||||
|
4. Updates the task with dependency information
|
||||||
|
|
||||||
|
## Example 4: Analyzing Project Complexity
|
||||||
|
|
||||||
|
Get AI-powered insights into your project's task complexity:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Analyze all tasks
|
||||||
|
task-master analyze-complexity --research
|
||||||
|
|
||||||
|
# View the complexity report
|
||||||
|
task-master complexity-report
|
||||||
|
```
|
||||||
|
|
||||||
|
**What happens**:
|
||||||
|
1. GPT-5 analyzes each task's scope and requirements
|
||||||
|
2. Assigns complexity scores and estimates subtask counts
|
||||||
|
3. Generates a detailed report
|
||||||
|
4. Saves to `.taskmaster/reports/task-complexity-report.json`
|
||||||
|
|
||||||
|
## Example 5: Using Custom Codex CLI Settings
|
||||||
|
|
||||||
|
Configure Codex CLI behavior for different commands:
|
||||||
|
|
||||||
|
```json
|
||||||
|
// In .taskmaster/config.json
|
||||||
|
{
|
||||||
|
"models": {
|
||||||
|
"main": {
|
||||||
|
"provider": "codex-cli",
|
||||||
|
"modelId": "gpt-5-codex",
|
||||||
|
"maxTokens": 128000,
|
||||||
|
"temperature": 0.2
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"codexCli": {
|
||||||
|
"allowNpx": true,
|
||||||
|
"approvalMode": "on-failure",
|
||||||
|
"sandboxMode": "workspace-write",
|
||||||
|
"commandSpecific": {
|
||||||
|
"parse-prd": {
|
||||||
|
"verbose": true,
|
||||||
|
"approvalMode": "never"
|
||||||
|
},
|
||||||
|
"expand": {
|
||||||
|
"sandboxMode": "read-only",
|
||||||
|
"verbose": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Now parse-prd runs with verbose output and no approvals
|
||||||
|
task-master parse-prd requirements.txt
|
||||||
|
|
||||||
|
# Expand runs with read-only mode
|
||||||
|
task-master expand --id=2.1
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example 6: Workflow - Building a Feature End-to-End
|
||||||
|
|
||||||
|
Complete workflow from PRD to implementation tracking:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Step 1: Initialize project
|
||||||
|
task-master init
|
||||||
|
|
||||||
|
# Step 2: Set up Codex CLI
|
||||||
|
task-master models --set-main gpt-5-codex --codex-cli
|
||||||
|
task-master models --set-fallback gpt-5 --codex-cli
|
||||||
|
|
||||||
|
# Step 3: Create PRD
|
||||||
|
cat > feature-prd.txt <<EOF
|
||||||
|
# Authentication System
|
||||||
|
|
||||||
|
Implement a complete authentication system with:
|
||||||
|
- User registration
|
||||||
|
- Email verification
|
||||||
|
- Password reset
|
||||||
|
- Two-factor authentication
|
||||||
|
- Session management
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Step 4: Parse PRD into tasks
|
||||||
|
task-master parse-prd feature-prd.txt --num-tasks 8
|
||||||
|
|
||||||
|
# Step 5: Analyze complexity
|
||||||
|
task-master analyze-complexity --research
|
||||||
|
|
||||||
|
# Step 6: Expand complex tasks
|
||||||
|
task-master expand --all --research
|
||||||
|
|
||||||
|
# Step 7: Start working
|
||||||
|
task-master next
|
||||||
|
# Shows: Task 1.1: User registration database schema
|
||||||
|
|
||||||
|
# Step 8: Mark completed as you work
|
||||||
|
task-master set-status --id=1.1 --status=done
|
||||||
|
|
||||||
|
# Step 9: Continue to next task
|
||||||
|
task-master next
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example 7: Multi-Role Configuration
|
||||||
|
|
||||||
|
Use Codex CLI for main tasks, Perplexity for research:
|
||||||
|
|
||||||
|
```json
|
||||||
|
// In .taskmaster/config.json
|
||||||
|
{
|
||||||
|
"models": {
|
||||||
|
"main": {
|
||||||
|
"provider": "codex-cli",
|
||||||
|
"modelId": "gpt-5-codex",
|
||||||
|
"maxTokens": 128000,
|
||||||
|
"temperature": 0.2
|
||||||
|
},
|
||||||
|
"research": {
|
||||||
|
"provider": "perplexity",
|
||||||
|
"modelId": "sonar-pro",
|
||||||
|
"maxTokens": 8700,
|
||||||
|
"temperature": 0.1
|
||||||
|
},
|
||||||
|
"fallback": {
|
||||||
|
"provider": "codex-cli",
|
||||||
|
"modelId": "gpt-5",
|
||||||
|
"maxTokens": 128000,
|
||||||
|
"temperature": 0.2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Main task operations use GPT-5-Codex
|
||||||
|
task-master add-task --prompt="Build REST API endpoint"
|
||||||
|
|
||||||
|
# Research operations use Perplexity
|
||||||
|
task-master analyze-complexity --research
|
||||||
|
|
||||||
|
# Fallback to GPT-5 if needed
|
||||||
|
task-master expand --id=3.2 --force
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example 8: Troubleshooting Common Issues
|
||||||
|
|
||||||
|
### Issue: Codex CLI not found
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check if Codex is installed
|
||||||
|
codex --version
|
||||||
|
|
||||||
|
# If not found, install globally
|
||||||
|
npm install -g @openai/codex
|
||||||
|
|
||||||
|
# Or enable npx fallback in config
|
||||||
|
cat >> .taskmaster/config.json <<EOF
|
||||||
|
{
|
||||||
|
"codexCli": {
|
||||||
|
"allowNpx": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
```
|
||||||
|
|
||||||
|
### Issue: Not authenticated
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check auth status
|
||||||
|
codex
|
||||||
|
# Use /about command to see auth info
|
||||||
|
|
||||||
|
# Re-authenticate if needed
|
||||||
|
codex login
|
||||||
|
```
|
||||||
|
|
||||||
|
### Issue: Want more verbose output
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Enable verbose mode in config
|
||||||
|
cat >> .taskmaster/config.json <<EOF
|
||||||
|
{
|
||||||
|
"codexCli": {
|
||||||
|
"verbose": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Or for specific commands
|
||||||
|
task-master parse-prd my-prd.txt
|
||||||
|
# (verbose output shows detailed Codex CLI interactions)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example 9: CI/CD Integration
|
||||||
|
|
||||||
|
Use Codex CLI in automated workflows:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# .github/workflows/task-analysis.yml
|
||||||
|
name: Analyze Task Complexity
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- '.taskmaster/**'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
analyze:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Setup Node.js
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: '20'
|
||||||
|
|
||||||
|
- name: Install Task Master
|
||||||
|
run: npm install -g task-master-ai
|
||||||
|
|
||||||
|
- name: Configure Codex CLI
|
||||||
|
run: |
|
||||||
|
npm install -g @openai/codex
|
||||||
|
echo "${{ secrets.OPENAI_CODEX_API_KEY }}" > ~/.codex-auth
|
||||||
|
env:
|
||||||
|
OPENAI_CODEX_API_KEY: ${{ secrets.OPENAI_CODEX_API_KEY }}
|
||||||
|
|
||||||
|
- name: Configure Task Master
|
||||||
|
run: |
|
||||||
|
cat > .taskmaster/config.json <<EOF
|
||||||
|
{
|
||||||
|
"models": {
|
||||||
|
"main": {
|
||||||
|
"provider": "codex-cli",
|
||||||
|
"modelId": "gpt-5"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"codexCli": {
|
||||||
|
"allowNpx": true,
|
||||||
|
"skipGitRepoCheck": true,
|
||||||
|
"approvalMode": "never",
|
||||||
|
"fullAuto": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
- name: Analyze Complexity
|
||||||
|
run: task-master analyze-complexity --research
|
||||||
|
|
||||||
|
- name: Upload Report
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: complexity-report
|
||||||
|
path: .taskmaster/reports/task-complexity-report.json
|
||||||
|
```
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
### 1. Use OAuth for Development
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# For local development, use OAuth (no API key needed)
|
||||||
|
codex login
|
||||||
|
task-master models --set-main gpt-5-codex --codex-cli
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Configure Approval Modes Appropriately
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"codexCli": {
|
||||||
|
"approvalMode": "on-failure", // Safe default
|
||||||
|
"sandboxMode": "workspace-write" // Restricts to project directory
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Use Command-Specific Settings
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"codexCli": {
|
||||||
|
"commandSpecific": {
|
||||||
|
"parse-prd": {
|
||||||
|
"approvalMode": "never", // PRD parsing is safe
|
||||||
|
"verbose": true
|
||||||
|
},
|
||||||
|
"expand": {
|
||||||
|
"approvalMode": "on-request", // More cautious for task expansion
|
||||||
|
"verbose": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Leverage Codebase Analysis
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"global": {
|
||||||
|
"enableCodebaseAnalysis": true // Let Codex analyze your code
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. Handle Errors Gracefully
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Always configure a fallback model
|
||||||
|
task-master models --set-fallback gpt-5 --codex-cli
|
||||||
|
|
||||||
|
# Or use a different provider as fallback
|
||||||
|
task-master models --set-fallback claude-3-5-sonnet
|
||||||
|
```
|
||||||
|
|
||||||
|
## Next Steps
|
||||||
|
|
||||||
|
- Read the [Codex CLI Provider Documentation](../providers/codex-cli.md)
|
||||||
|
- Explore [Configuration Options](../configuration.md#codex-cli-provider)
|
||||||
|
- Check out [Command Reference](../command-reference.md)
|
||||||
|
- Learn about [Task Structure](../task-structure.md)
|
||||||
|
|
||||||
|
## Common Patterns
|
||||||
|
|
||||||
|
### Pattern: Daily Development Workflow
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Morning: Review tasks
|
||||||
|
task-master list
|
||||||
|
|
||||||
|
# Get next task
|
||||||
|
task-master next
|
||||||
|
|
||||||
|
# Work on task...
|
||||||
|
|
||||||
|
# Update task with notes
|
||||||
|
task-master update-subtask --id=2.3 --prompt="Implemented authentication middleware"
|
||||||
|
|
||||||
|
# Mark complete
|
||||||
|
task-master set-status --id=2.3 --status=done
|
||||||
|
|
||||||
|
# Repeat
|
||||||
|
```
|
||||||
|
|
||||||
|
### Pattern: Feature Planning
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Write feature spec
|
||||||
|
vim new-feature.txt
|
||||||
|
|
||||||
|
# Generate tasks
|
||||||
|
task-master parse-prd new-feature.txt --num-tasks 10
|
||||||
|
|
||||||
|
# Analyze and expand
|
||||||
|
task-master analyze-complexity --research
|
||||||
|
task-master expand --all --research --force
|
||||||
|
|
||||||
|
# Review and adjust
|
||||||
|
task-master list
|
||||||
|
```
|
||||||
|
|
||||||
|
### Pattern: Sprint Planning
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Parse sprint requirements
|
||||||
|
task-master parse-prd sprint-requirements.txt
|
||||||
|
|
||||||
|
# Analyze complexity
|
||||||
|
task-master analyze-complexity --research
|
||||||
|
|
||||||
|
# View report
|
||||||
|
task-master complexity-report
|
||||||
|
|
||||||
|
# Adjust task estimates based on complexity scores
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
For more examples and advanced usage, see the [full documentation](https://docs.task-master.dev).
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
# Available Models as of September 19, 2025
|
# Available Models as of October 5, 2025
|
||||||
|
|
||||||
## Main Models
|
## Main Models
|
||||||
|
|
||||||
@@ -10,6 +10,8 @@
|
|||||||
| anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 |
|
| anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 |
|
||||||
| claude-code | opus | 0.725 | 0 | 0 |
|
| claude-code | opus | 0.725 | 0 | 0 |
|
||||||
| claude-code | sonnet | 0.727 | 0 | 0 |
|
| claude-code | sonnet | 0.727 | 0 | 0 |
|
||||||
|
| codex-cli | gpt-5 | 0.749 | 0 | 0 |
|
||||||
|
| codex-cli | gpt-5-codex | 0.749 | 0 | 0 |
|
||||||
| mcp | mcp-sampling | — | 0 | 0 |
|
| mcp | mcp-sampling | — | 0 | 0 |
|
||||||
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
|
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
|
||||||
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
|
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
|
||||||
@@ -100,6 +102,8 @@
|
|||||||
| ----------- | -------------------------------------------- | --------- | ---------- | ----------- |
|
| ----------- | -------------------------------------------- | --------- | ---------- | ----------- |
|
||||||
| claude-code | opus | 0.725 | 0 | 0 |
|
| claude-code | opus | 0.725 | 0 | 0 |
|
||||||
| claude-code | sonnet | 0.727 | 0 | 0 |
|
| claude-code | sonnet | 0.727 | 0 | 0 |
|
||||||
|
| codex-cli | gpt-5 | 0.749 | 0 | 0 |
|
||||||
|
| codex-cli | gpt-5-codex | 0.749 | 0 | 0 |
|
||||||
| mcp | mcp-sampling | — | 0 | 0 |
|
| mcp | mcp-sampling | — | 0 | 0 |
|
||||||
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
|
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
|
||||||
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
|
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
|
||||||
@@ -119,7 +123,7 @@
|
|||||||
| groq | deepseek-r1-distill-llama-70b | 0.52 | 0.75 | 0.99 |
|
| groq | deepseek-r1-distill-llama-70b | 0.52 | 0.75 | 0.99 |
|
||||||
| perplexity | sonar-pro | — | 3 | 15 |
|
| perplexity | sonar-pro | — | 3 | 15 |
|
||||||
| perplexity | sonar | — | 1 | 1 |
|
| perplexity | sonar | — | 1 | 1 |
|
||||||
| perplexity | deep-research | 0.211 | 2 | 8 |
|
| perplexity | sonar-deep-research | 0.211 | 2 | 8 |
|
||||||
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
|
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
|
||||||
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
|
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
|
||||||
| bedrock | us.anthropic.claude-3-opus-20240229-v1:0 | 0.725 | 15 | 75 |
|
| bedrock | us.anthropic.claude-3-opus-20240229-v1:0 | 0.725 | 15 | 75 |
|
||||||
@@ -140,6 +144,8 @@
|
|||||||
| anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 |
|
| anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 |
|
||||||
| claude-code | opus | 0.725 | 0 | 0 |
|
| claude-code | opus | 0.725 | 0 | 0 |
|
||||||
| claude-code | sonnet | 0.727 | 0 | 0 |
|
| claude-code | sonnet | 0.727 | 0 | 0 |
|
||||||
|
| codex-cli | gpt-5 | 0.749 | 0 | 0 |
|
||||||
|
| codex-cli | gpt-5-codex | 0.749 | 0 | 0 |
|
||||||
| mcp | mcp-sampling | — | 0 | 0 |
|
| mcp | mcp-sampling | — | 0 | 0 |
|
||||||
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
|
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
|
||||||
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
|
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
|
||||||
|
|||||||
510
docs/providers/codex-cli.md
Normal file
510
docs/providers/codex-cli.md
Normal file
@@ -0,0 +1,510 @@
|
|||||||
|
# Codex CLI Provider
|
||||||
|
|
||||||
|
The `codex-cli` provider integrates Task Master with OpenAI's Codex CLI via the community AI SDK provider [`ai-sdk-provider-codex-cli`](https://github.com/ben-vargas/ai-sdk-provider-codex-cli). It uses your ChatGPT subscription (OAuth) via `codex login`, with optional `OPENAI_CODEX_API_KEY` support.
|
||||||
|
|
||||||
|
## Why Use Codex CLI?
|
||||||
|
|
||||||
|
The primary benefits of using the `codex-cli` provider include:
|
||||||
|
|
||||||
|
- **Use Latest OpenAI Models**: Access to cutting-edge models like GPT-5 and GPT-5-Codex via ChatGPT subscription
|
||||||
|
- **OAuth Authentication**: No API key management needed - authenticate once with `codex login`
|
||||||
|
- **Built-in Tool Execution**: Native support for command execution, file changes, MCP tools, and web search
|
||||||
|
- **Native JSON Schema Support**: Structured output generation without post-processing
|
||||||
|
- **Approval/Sandbox Modes**: Fine-grained control over command execution and filesystem access for safety
|
||||||
|
|
||||||
|
## Quickstart
|
||||||
|
|
||||||
|
Get up and running with Codex CLI in 3 steps:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Install Codex CLI globally
|
||||||
|
npm install -g @openai/codex
|
||||||
|
|
||||||
|
# 2. Authenticate with your ChatGPT account
|
||||||
|
codex login
|
||||||
|
|
||||||
|
# 3. Configure Task Master to use Codex CLI
|
||||||
|
task-master models --set-main gpt-5-codex --codex-cli
|
||||||
|
```
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
- **Node.js**: >= 18.0.0
|
||||||
|
- **Codex CLI**: >= 0.42.0 (>= 0.44.0 recommended)
|
||||||
|
- **ChatGPT Subscription**: Required for OAuth access (Plus, Pro, Business, Edu, or Enterprise)
|
||||||
|
- **Task Master**: >= 0.27.3 (version with Codex CLI support)
|
||||||
|
|
||||||
|
### Checking Your Versions
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check Node.js version
|
||||||
|
node --version
|
||||||
|
|
||||||
|
# Check Codex CLI version
|
||||||
|
codex --version
|
||||||
|
|
||||||
|
# Check Task Master version
|
||||||
|
task-master --version
|
||||||
|
```
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
### Install Codex CLI
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install globally via npm
|
||||||
|
npm install -g @openai/codex
|
||||||
|
|
||||||
|
# Verify installation
|
||||||
|
codex --version
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected output: `v0.44.0` or higher
|
||||||
|
|
||||||
|
### Install Task Master (if not already installed)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install globally
|
||||||
|
npm install -g task-master-ai
|
||||||
|
|
||||||
|
# Or install in your project
|
||||||
|
npm install --save-dev task-master-ai
|
||||||
|
```
|
||||||
|
|
||||||
|
## Authentication
|
||||||
|
|
||||||
|
### OAuth Authentication (Primary Method - Recommended)
|
||||||
|
|
||||||
|
The Codex CLI provider is designed to use OAuth authentication with your ChatGPT subscription:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Launch Codex CLI and authenticate
|
||||||
|
codex login
|
||||||
|
```
|
||||||
|
|
||||||
|
This will:
|
||||||
|
1. Open a browser window for OAuth authentication
|
||||||
|
2. Prompt you to log in with your ChatGPT account
|
||||||
|
3. Store authentication credentials locally
|
||||||
|
4. Allow Task Master to automatically use these credentials
|
||||||
|
|
||||||
|
To verify your authentication:
|
||||||
|
```bash
|
||||||
|
# Open interactive Codex CLI
|
||||||
|
codex
|
||||||
|
|
||||||
|
# Use /about command to see auth status
|
||||||
|
/about
|
||||||
|
```
|
||||||
|
|
||||||
|
### Optional: API Key Method
|
||||||
|
|
||||||
|
While OAuth is the primary and recommended method, you can optionally use an OpenAI API key:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# In your .env file
|
||||||
|
OPENAI_CODEX_API_KEY=sk-your-openai-api-key-here
|
||||||
|
```
|
||||||
|
|
||||||
|
**Important Notes**:
|
||||||
|
- The API key will **only** be injected when explicitly provided
|
||||||
|
- OAuth authentication is always preferred when available
|
||||||
|
- Using an API key doesn't provide access to subscription-only models like GPT-5-Codex
|
||||||
|
- For full OpenAI API access with non-subscription models, consider using the standard `openai` provider instead
|
||||||
|
- `OPENAI_CODEX_API_KEY` is specific to the codex-cli provider to avoid conflicts with the `openai` provider's `OPENAI_API_KEY`
|
||||||
|
|
||||||
|
## Available Models
|
||||||
|
|
||||||
|
The Codex CLI provider supports only models available through ChatGPT subscription:
|
||||||
|
|
||||||
|
| Model ID | Description | Max Input Tokens | Max Output Tokens |
|
||||||
|
|----------|-------------|------------------|-------------------|
|
||||||
|
| `gpt-5` | Latest GPT-5 model | 272K | 128K |
|
||||||
|
| `gpt-5-codex` | GPT-5 optimized for agentic software engineering | 272K | 128K |
|
||||||
|
|
||||||
|
**Note**: These models are only available via OAuth subscription through Codex CLI (ChatGPT Plus, Pro, Business, Edu, or Enterprise plans). For other OpenAI models, use the standard `openai` provider with an API key.
|
||||||
|
|
||||||
|
**Research Capabilities**: Both GPT-5 models support web search tools, making them suitable for the `research` role in addition to `main` and `fallback` roles.
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### Basic Configuration
|
||||||
|
|
||||||
|
Add Codex CLI to your `.taskmaster/config.json`:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"models": {
|
||||||
|
"main": {
|
||||||
|
"provider": "codex-cli",
|
||||||
|
"modelId": "gpt-5-codex",
|
||||||
|
"maxTokens": 128000,
|
||||||
|
"temperature": 0.2
|
||||||
|
},
|
||||||
|
"fallback": {
|
||||||
|
"provider": "codex-cli",
|
||||||
|
"modelId": "gpt-5",
|
||||||
|
"maxTokens": 128000,
|
||||||
|
"temperature": 0.2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Advanced Configuration with Codex CLI Settings
|
||||||
|
|
||||||
|
The `codexCli` section allows you to customize Codex CLI behavior:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"models": {
|
||||||
|
"main": {
|
||||||
|
"provider": "codex-cli",
|
||||||
|
"modelId": "gpt-5-codex",
|
||||||
|
"maxTokens": 128000,
|
||||||
|
"temperature": 0.2
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"codexCli": {
|
||||||
|
"allowNpx": true,
|
||||||
|
"skipGitRepoCheck": true,
|
||||||
|
"approvalMode": "on-failure",
|
||||||
|
"sandboxMode": "workspace-write",
|
||||||
|
"verbose": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Codex CLI Settings Reference
|
||||||
|
|
||||||
|
#### Core Settings
|
||||||
|
|
||||||
|
- **`allowNpx`** (boolean, default: `false`)
|
||||||
|
- Allow fallback to `npx @openai/codex` if the CLI is not found on PATH
|
||||||
|
- Useful for CI environments or systems without global npm installations
|
||||||
|
- Example: `"allowNpx": true`
|
||||||
|
|
||||||
|
- **`skipGitRepoCheck`** (boolean, default: `false`)
|
||||||
|
- Skip git repository safety check before execution
|
||||||
|
- Recommended for CI environments or non-repository usage
|
||||||
|
- Example: `"skipGitRepoCheck": true`
|
||||||
|
|
||||||
|
#### Execution Control
|
||||||
|
|
||||||
|
- **`approvalMode`** (string)
|
||||||
|
- Controls when to require user approval for command execution
|
||||||
|
- Options:
|
||||||
|
- `"untrusted"`: Require approval for all commands
|
||||||
|
- `"on-failure"`: Only require approval after a command fails (default)
|
||||||
|
- `"on-request"`: Approve only when explicitly requested
|
||||||
|
- `"never"`: Never require approval (use with caution)
|
||||||
|
- Example: `"approvalMode": "on-failure"`
|
||||||
|
|
||||||
|
- **`sandboxMode`** (string)
|
||||||
|
- Controls filesystem access permissions
|
||||||
|
- Options:
|
||||||
|
- `"read-only"`: Read-only access to filesystem
|
||||||
|
- `"workspace-write"`: Allow writes to workspace directory (default)
|
||||||
|
- `"danger-full-access"`: Full filesystem access (use with extreme caution)
|
||||||
|
- Example: `"sandboxMode": "workspace-write"`
|
||||||
|
|
||||||
|
#### Path and Environment
|
||||||
|
|
||||||
|
- **`codexPath`** (string, optional)
|
||||||
|
- Custom path to Codex CLI executable
|
||||||
|
- Useful when Codex is installed in a non-standard location
|
||||||
|
- Example: `"codexPath": "/usr/local/bin/codex"`
|
||||||
|
|
||||||
|
- **`cwd`** (string, optional)
|
||||||
|
- Working directory for Codex CLI execution
|
||||||
|
- Defaults to current working directory
|
||||||
|
- Example: `"cwd": "/path/to/project"`
|
||||||
|
|
||||||
|
- **`env`** (object, optional)
|
||||||
|
- Additional environment variables for Codex CLI
|
||||||
|
- Example: `"env": { "DEBUG": "true" }`
|
||||||
|
|
||||||
|
#### Advanced Settings
|
||||||
|
|
||||||
|
- **`fullAuto`** (boolean, optional)
|
||||||
|
- Fully automatic mode (equivalent to `--full-auto` flag)
|
||||||
|
- Bypasses most approvals for fully automated workflows
|
||||||
|
- Example: `"fullAuto": true`
|
||||||
|
|
||||||
|
- **`dangerouslyBypassApprovalsAndSandbox`** (boolean, optional)
|
||||||
|
- Bypass all safety checks including approvals and sandbox
|
||||||
|
- **WARNING**: Use with extreme caution - can execute arbitrary code
|
||||||
|
- Example: `"dangerouslyBypassApprovalsAndSandbox": false`
|
||||||
|
|
||||||
|
- **`color`** (string, optional)
|
||||||
|
- Force color handling in Codex CLI output
|
||||||
|
- Options: `"always"`, `"never"`, `"auto"`
|
||||||
|
- Example: `"color": "auto"`
|
||||||
|
|
||||||
|
- **`outputLastMessageFile`** (string, optional)
|
||||||
|
- Write last agent message to specified file
|
||||||
|
- Useful for debugging or logging
|
||||||
|
- Example: `"outputLastMessageFile": "./last-message.txt"`
|
||||||
|
|
||||||
|
- **`verbose`** (boolean, optional)
|
||||||
|
- Enable verbose provider logging
|
||||||
|
- Helpful for debugging issues
|
||||||
|
- Example: `"verbose": true`
|
||||||
|
|
||||||
|
### Command-Specific Settings
|
||||||
|
|
||||||
|
Override settings for specific Task Master commands:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"codexCli": {
|
||||||
|
"allowNpx": true,
|
||||||
|
"approvalMode": "on-failure",
|
||||||
|
"commandSpecific": {
|
||||||
|
"parse-prd": {
|
||||||
|
"approvalMode": "never",
|
||||||
|
"verbose": true
|
||||||
|
},
|
||||||
|
"expand": {
|
||||||
|
"sandboxMode": "read-only"
|
||||||
|
},
|
||||||
|
"add-task": {
|
||||||
|
"approvalMode": "untrusted"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Setting Codex CLI Models
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Set Codex CLI for main role
|
||||||
|
task-master models --set-main gpt-5-codex --codex-cli
|
||||||
|
|
||||||
|
# Set Codex CLI for fallback role
|
||||||
|
task-master models --set-fallback gpt-5 --codex-cli
|
||||||
|
|
||||||
|
# Set Codex CLI for research role
|
||||||
|
task-master models --set-research gpt-5 --codex-cli
|
||||||
|
|
||||||
|
# Verify configuration
|
||||||
|
task-master models
|
||||||
|
```
|
||||||
|
|
||||||
|
### Using Codex CLI with Task Master Commands
|
||||||
|
|
||||||
|
Once configured, use Task Master commands as normal:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Parse a PRD with Codex CLI
|
||||||
|
task-master parse-prd my-requirements.txt
|
||||||
|
|
||||||
|
# Analyze project complexity
|
||||||
|
task-master analyze-complexity --research
|
||||||
|
|
||||||
|
# Expand a task into subtasks
|
||||||
|
task-master expand --id=1.2
|
||||||
|
|
||||||
|
# Add a new task with AI assistance
|
||||||
|
task-master add-task --prompt="Implement user authentication" --research
|
||||||
|
```
|
||||||
|
|
||||||
|
The provider will automatically use your OAuth credentials when Codex CLI is configured.
|
||||||
|
|
||||||
|
## Codebase Features
|
||||||
|
|
||||||
|
The Codex CLI provider is **codebase-capable**, meaning it can analyze and interact with your project files. This enables advanced features like:
|
||||||
|
|
||||||
|
- **Code Analysis**: Understanding your project structure and dependencies
|
||||||
|
- **Intelligent Suggestions**: Context-aware task recommendations
|
||||||
|
- **File Operations**: Reading and analyzing project files for better task generation
|
||||||
|
- **Pattern Recognition**: Identifying common patterns and best practices in your codebase
|
||||||
|
|
||||||
|
### Enabling Codebase Analysis
|
||||||
|
|
||||||
|
Codebase analysis is automatically enabled when:
|
||||||
|
1. Your provider is set to `codex-cli`
|
||||||
|
2. `enableCodebaseAnalysis` is `true` in your global configuration (default)
|
||||||
|
|
||||||
|
To verify or configure:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"global": {
|
||||||
|
"enableCodebaseAnalysis": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### "codex: command not found" Error
|
||||||
|
|
||||||
|
**Symptoms**: Task Master reports that the Codex CLI is not found.
|
||||||
|
|
||||||
|
**Solutions**:
|
||||||
|
1. **Install Codex CLI globally**:
|
||||||
|
```bash
|
||||||
|
npm install -g @openai/codex
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Verify installation**:
|
||||||
|
```bash
|
||||||
|
codex --version
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Alternative: Enable npx fallback**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"codexCli": {
|
||||||
|
"allowNpx": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### "Not logged in" Errors
|
||||||
|
|
||||||
|
**Symptoms**: Authentication errors when trying to use Codex CLI.
|
||||||
|
|
||||||
|
**Solutions**:
|
||||||
|
1. **Authenticate with OAuth**:
|
||||||
|
```bash
|
||||||
|
codex login
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Verify authentication status**:
|
||||||
|
```bash
|
||||||
|
codex
|
||||||
|
# Then use /about command
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Re-authenticate if needed**:
|
||||||
|
```bash
|
||||||
|
# Logout first
|
||||||
|
codex
|
||||||
|
# Use /auth command to change auth method
|
||||||
|
|
||||||
|
# Then login again
|
||||||
|
codex login
|
||||||
|
```
|
||||||
|
|
||||||
|
### "Old version" Warnings
|
||||||
|
|
||||||
|
**Symptoms**: Warnings about Codex CLI version being outdated.
|
||||||
|
|
||||||
|
**Solutions**:
|
||||||
|
1. **Check current version**:
|
||||||
|
```bash
|
||||||
|
codex --version
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Upgrade to latest version**:
|
||||||
|
```bash
|
||||||
|
npm install -g @openai/codex@latest
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Verify upgrade**:
|
||||||
|
```bash
|
||||||
|
codex --version
|
||||||
|
```
|
||||||
|
Should show >= 0.44.0
|
||||||
|
|
||||||
|
### "Model not available" Errors
|
||||||
|
|
||||||
|
**Symptoms**: Error indicating the requested model is not available.
|
||||||
|
|
||||||
|
**Causes and Solutions**:
|
||||||
|
|
||||||
|
1. **Using unsupported model**:
|
||||||
|
- Only `gpt-5` and `gpt-5-codex` are available via Codex CLI
|
||||||
|
- For other OpenAI models, use the standard `openai` provider
|
||||||
|
|
||||||
|
2. **Subscription not active**:
|
||||||
|
- Verify your ChatGPT subscription is active
|
||||||
|
- Check subscription status at <https://platform.openai.com>
|
||||||
|
|
||||||
|
3. **Wrong provider selected**:
|
||||||
|
- Verify you're using `--codex-cli` flag when setting models
|
||||||
|
- Check `.taskmaster/config.json` shows `"provider": "codex-cli"`
|
||||||
|
|
||||||
|
### API Key Not Being Used
|
||||||
|
|
||||||
|
**Symptoms**: You've set `OPENAI_CODEX_API_KEY` but it's not being used.
|
||||||
|
|
||||||
|
**Expected Behavior**:
|
||||||
|
- OAuth authentication is always preferred
|
||||||
|
- API key is only injected when explicitly provided
|
||||||
|
- API key doesn't grant access to subscription-only models
|
||||||
|
|
||||||
|
**Solutions**:
|
||||||
|
1. **Verify OAuth is working**:
|
||||||
|
```bash
|
||||||
|
codex
|
||||||
|
# Check /about for auth status
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **If you want to force API key usage**:
|
||||||
|
- This is not recommended with Codex CLI
|
||||||
|
- Consider using the standard `openai` provider instead
|
||||||
|
|
||||||
|
3. **Verify .env file is being loaded**:
|
||||||
|
```bash
|
||||||
|
# Check if .env exists in project root
|
||||||
|
ls -la .env
|
||||||
|
|
||||||
|
# Verify OPENAI_CODEX_API_KEY is set
|
||||||
|
grep OPENAI_CODEX_API_KEY .env
|
||||||
|
```
|
||||||
|
|
||||||
|
### Approval/Sandbox Issues
|
||||||
|
|
||||||
|
**Symptoms**: Commands are blocked or filesystem access is denied.
|
||||||
|
|
||||||
|
**Solutions**:
|
||||||
|
|
||||||
|
1. **Adjust approval mode**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"codexCli": {
|
||||||
|
"approvalMode": "on-request"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Adjust sandbox mode**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"codexCli": {
|
||||||
|
"sandboxMode": "workspace-write"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **For fully automated workflows** (use cautiously):
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"codexCli": {
|
||||||
|
"fullAuto": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Important Notes
|
||||||
|
|
||||||
|
- **OAuth subscription required**: No API key needed for basic operation, but requires active ChatGPT subscription
|
||||||
|
- **Limited model selection**: Only `gpt-5` and `gpt-5-codex` available via OAuth
|
||||||
|
- **Pricing information**: Not available for OAuth models (shows as "Unknown" in cost calculations)
|
||||||
|
- **No automatic dependency**: The `@openai/codex` package is not added to Task Master's dependencies - install it globally or enable `allowNpx`
|
||||||
|
- **Codebase analysis**: Automatically enabled when using `codex-cli` provider
|
||||||
|
- **Safety first**: Default settings prioritize safety with `approvalMode: "on-failure"` and `sandboxMode: "workspace-write"`
|
||||||
|
|
||||||
|
## See Also
|
||||||
|
|
||||||
|
- [Configuration Guide](../configuration.md#codex-cli-provider) - Complete Codex CLI configuration reference
|
||||||
|
- [Command Reference](../command-reference.md) - Using `--codex-cli` flag with commands
|
||||||
|
- [Gemini CLI Provider](./gemini-cli.md) - Similar CLI-based provider for Google Gemini
|
||||||
|
- [Claude Code Integration](../claude-code-integration.md) - Another CLI-based provider
|
||||||
|
- [ai-sdk-provider-codex-cli](https://github.com/ben-vargas/ai-sdk-provider-codex-cli) - Source code for the provider package
|
||||||
@@ -69,11 +69,29 @@ export function resolveTasksPath(args, log = silentLogger) {
|
|||||||
|
|
||||||
// Use core findTasksPath with explicit path and normalized projectRoot context
|
// Use core findTasksPath with explicit path and normalized projectRoot context
|
||||||
if (projectRoot) {
|
if (projectRoot) {
|
||||||
return coreFindTasksPath(explicitPath, { projectRoot }, log);
|
const foundPath = coreFindTasksPath(explicitPath, { projectRoot }, log);
|
||||||
|
// If core function returns null and no explicit path was provided,
|
||||||
|
// construct the expected default path as documented
|
||||||
|
if (foundPath === null && !explicitPath) {
|
||||||
|
const defaultPath = path.join(
|
||||||
|
projectRoot,
|
||||||
|
'.taskmaster',
|
||||||
|
'tasks',
|
||||||
|
'tasks.json'
|
||||||
|
);
|
||||||
|
log?.info?.(
|
||||||
|
`Core findTasksPath returned null, using default path: ${defaultPath}`
|
||||||
|
);
|
||||||
|
return defaultPath;
|
||||||
|
}
|
||||||
|
return foundPath;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fallback to core function without projectRoot context
|
// Fallback to core function without projectRoot context
|
||||||
return coreFindTasksPath(explicitPath, null, log);
|
const foundPath = coreFindTasksPath(explicitPath, null, log);
|
||||||
|
// Note: When no projectRoot is available, we can't construct a default path
|
||||||
|
// so we return null and let the calling code handle the error
|
||||||
|
return foundPath;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -75,13 +75,50 @@ function generateExampleFromSchema(schema) {
|
|||||||
return result;
|
return result;
|
||||||
|
|
||||||
case 'ZodString':
|
case 'ZodString':
|
||||||
return 'string';
|
// Check for min/max length constraints
|
||||||
|
if (def.checks) {
|
||||||
|
const minCheck = def.checks.find((c) => c.kind === 'min');
|
||||||
|
const maxCheck = def.checks.find((c) => c.kind === 'max');
|
||||||
|
if (minCheck && maxCheck) {
|
||||||
|
return (
|
||||||
|
'<string between ' +
|
||||||
|
minCheck.value +
|
||||||
|
'-' +
|
||||||
|
maxCheck.value +
|
||||||
|
' characters>'
|
||||||
|
);
|
||||||
|
} else if (minCheck) {
|
||||||
|
return '<string with at least ' + minCheck.value + ' characters>';
|
||||||
|
} else if (maxCheck) {
|
||||||
|
return '<string up to ' + maxCheck.value + ' characters>';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return '<string>';
|
||||||
|
|
||||||
case 'ZodNumber':
|
case 'ZodNumber':
|
||||||
return 0;
|
// Check for int, positive, min/max constraints
|
||||||
|
if (def.checks) {
|
||||||
|
const intCheck = def.checks.find((c) => c.kind === 'int');
|
||||||
|
const minCheck = def.checks.find((c) => c.kind === 'min');
|
||||||
|
const maxCheck = def.checks.find((c) => c.kind === 'max');
|
||||||
|
|
||||||
|
if (intCheck && minCheck && minCheck.value > 0) {
|
||||||
|
return '<positive integer>';
|
||||||
|
} else if (intCheck) {
|
||||||
|
return '<integer>';
|
||||||
|
} else if (minCheck || maxCheck) {
|
||||||
|
return (
|
||||||
|
'<number' +
|
||||||
|
(minCheck ? ' >= ' + minCheck.value : '') +
|
||||||
|
(maxCheck ? ' <= ' + maxCheck.value : '') +
|
||||||
|
'>'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return '<number>';
|
||||||
|
|
||||||
case 'ZodBoolean':
|
case 'ZodBoolean':
|
||||||
return false;
|
return '<boolean>';
|
||||||
|
|
||||||
case 'ZodArray':
|
case 'ZodArray':
|
||||||
const elementExample = generateExampleFromSchema(def.type);
|
const elementExample = generateExampleFromSchema(def.type);
|
||||||
|
|||||||
49
output.txt
Normal file
49
output.txt
Normal file
File diff suppressed because one or more lines are too long
11788
package-lock.json
generated
11788
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
53
package.json
53
package.json
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "task-master-ai",
|
"name": "task-master-ai",
|
||||||
"version": "0.27.3",
|
"version": "0.28.0-rc.1",
|
||||||
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
|
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
|
||||||
"main": "index.js",
|
"main": "index.js",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
@@ -17,7 +17,7 @@
|
|||||||
"turbo:build": "turbo build",
|
"turbo:build": "turbo build",
|
||||||
"turbo:typecheck": "turbo typecheck",
|
"turbo:typecheck": "turbo typecheck",
|
||||||
"build:build-config": "npm run build -w @tm/build-config",
|
"build:build-config": "npm run build -w @tm/build-config",
|
||||||
"test": "node --experimental-vm-modules node_modules/.bin/jest",
|
"test": "cross-env NODE_ENV=test node --experimental-vm-modules node_modules/.bin/jest",
|
||||||
"test:unit": "node --experimental-vm-modules node_modules/.bin/jest --testPathPattern=unit",
|
"test:unit": "node --experimental-vm-modules node_modules/.bin/jest --testPathPattern=unit",
|
||||||
"test:integration": "node --experimental-vm-modules node_modules/.bin/jest --testPathPattern=integration",
|
"test:integration": "node --experimental-vm-modules node_modules/.bin/jest --testPathPattern=integration",
|
||||||
"test:fails": "node --experimental-vm-modules node_modules/.bin/jest --onlyFailures",
|
"test:fails": "node --experimental-vm-modules node_modules/.bin/jest --onlyFailures",
|
||||||
@@ -52,23 +52,27 @@
|
|||||||
"author": "Eyal Toledano",
|
"author": "Eyal Toledano",
|
||||||
"license": "MIT WITH Commons-Clause",
|
"license": "MIT WITH Commons-Clause",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@ai-sdk/amazon-bedrock": "^2.2.9",
|
"@ai-sdk/amazon-bedrock": "^3.0.23",
|
||||||
"@ai-sdk/anthropic": "^1.2.10",
|
"@ai-sdk/anthropic": "^2.0.18",
|
||||||
"@ai-sdk/azure": "^1.3.17",
|
"@ai-sdk/azure": "^2.0.34",
|
||||||
"@ai-sdk/google": "^1.2.13",
|
"@ai-sdk/google": "^2.0.16",
|
||||||
"@ai-sdk/google-vertex": "^2.2.23",
|
"@ai-sdk/google-vertex": "^3.0.29",
|
||||||
"@ai-sdk/groq": "^1.2.9",
|
"@ai-sdk/groq": "^2.0.21",
|
||||||
"@ai-sdk/mistral": "^1.2.7",
|
"@ai-sdk/mistral": "^2.0.16",
|
||||||
"@ai-sdk/openai": "^1.3.20",
|
"@ai-sdk/openai": "^2.0.34",
|
||||||
"@ai-sdk/perplexity": "^1.1.7",
|
"@ai-sdk/perplexity": "^2.0.10",
|
||||||
"@ai-sdk/xai": "^1.2.15",
|
"@ai-sdk/provider": "^2.0.0",
|
||||||
"@anthropic-ai/sdk": "^0.39.0",
|
"@ai-sdk/provider-utils": "^3.0.10",
|
||||||
"@aws-sdk/credential-providers": "^3.817.0",
|
"@ai-sdk/xai": "^2.0.22",
|
||||||
|
"@aws-sdk/credential-providers": "^3.895.0",
|
||||||
"@inquirer/search": "^3.0.15",
|
"@inquirer/search": "^3.0.15",
|
||||||
"@openrouter/ai-sdk-provider": "^0.4.5",
|
"@openrouter/ai-sdk-provider": "^1.2.0",
|
||||||
"@streamparser/json": "^0.0.22",
|
"@streamparser/json": "^0.0.22",
|
||||||
"@supabase/supabase-js": "^2.57.4",
|
"@supabase/supabase-js": "^2.57.4",
|
||||||
"ai": "^4.3.10",
|
"ai": "^5.0.51",
|
||||||
|
"ai-sdk-provider-claude-code": "^1.1.4",
|
||||||
|
"ai-sdk-provider-codex-cli": "^0.3.0",
|
||||||
|
"ai-sdk-provider-gemini-cli": "^1.1.1",
|
||||||
"ajv": "^8.17.1",
|
"ajv": "^8.17.1",
|
||||||
"ajv-formats": "^3.0.1",
|
"ajv-formats": "^3.0.1",
|
||||||
"boxen": "^8.0.1",
|
"boxen": "^8.0.1",
|
||||||
@@ -78,7 +82,7 @@
|
|||||||
"cli-table3": "^0.6.5",
|
"cli-table3": "^0.6.5",
|
||||||
"commander": "^12.1.0",
|
"commander": "^12.1.0",
|
||||||
"cors": "^2.8.5",
|
"cors": "^2.8.5",
|
||||||
"dotenv": "^16.3.1",
|
"dotenv": "^16.6.1",
|
||||||
"express": "^4.21.2",
|
"express": "^4.21.2",
|
||||||
"fastmcp": "^3.5.0",
|
"fastmcp": "^3.5.0",
|
||||||
"figlet": "^1.8.0",
|
"figlet": "^1.8.0",
|
||||||
@@ -93,17 +97,14 @@
|
|||||||
"lru-cache": "^10.2.0",
|
"lru-cache": "^10.2.0",
|
||||||
"marked": "^15.0.12",
|
"marked": "^15.0.12",
|
||||||
"marked-terminal": "^7.3.0",
|
"marked-terminal": "^7.3.0",
|
||||||
"ollama-ai-provider": "^1.2.0",
|
"ollama-ai-provider-v2": "^1.3.1",
|
||||||
"openai": "^4.89.0",
|
|
||||||
"ora": "^8.2.0",
|
"ora": "^8.2.0",
|
||||||
"uuid": "^11.1.0",
|
"uuid": "^11.1.0",
|
||||||
"zod": "^3.23.8",
|
"zod": "^4.1.11"
|
||||||
"zod-to-json-schema": "^3.24.5"
|
|
||||||
},
|
},
|
||||||
"optionalDependencies": {
|
"optionalDependencies": {
|
||||||
"@anthropic-ai/claude-code": "^1.0.88",
|
"@anthropic-ai/claude-code": "^1.0.88",
|
||||||
"@biomejs/cli-linux-x64": "^1.9.4",
|
"@biomejs/cli-linux-x64": "^1.9.4"
|
||||||
"ai-sdk-provider-gemini-cli": "^0.1.3"
|
|
||||||
},
|
},
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=18.0.0"
|
"node": ">=18.0.0"
|
||||||
@@ -127,12 +128,12 @@
|
|||||||
"@changesets/changelog-github": "^0.5.1",
|
"@changesets/changelog-github": "^0.5.1",
|
||||||
"@changesets/cli": "^2.28.1",
|
"@changesets/cli": "^2.28.1",
|
||||||
"@manypkg/cli": "^0.25.1",
|
"@manypkg/cli": "^0.25.1",
|
||||||
|
"@tm/ai-sdk-provider-grok-cli": "*",
|
||||||
"@tm/cli": "*",
|
"@tm/cli": "*",
|
||||||
"@types/jest": "^29.5.14",
|
"@types/jest": "^29.5.14",
|
||||||
"@types/marked-terminal": "^6.1.1",
|
"@types/marked-terminal": "^6.1.1",
|
||||||
"concurrently": "^9.2.1",
|
"concurrently": "^9.2.1",
|
||||||
"cross-env": "^10.0.0",
|
"cross-env": "^10.0.0",
|
||||||
"dotenv-mono": "^1.5.1",
|
|
||||||
"execa": "^8.0.1",
|
"execa": "^8.0.1",
|
||||||
"jest": "^29.7.0",
|
"jest": "^29.7.0",
|
||||||
"jest-environment-node": "^29.7.0",
|
"jest-environment-node": "^29.7.0",
|
||||||
@@ -142,7 +143,7 @@
|
|||||||
"ts-jest": "^29.4.2",
|
"ts-jest": "^29.4.2",
|
||||||
"tsdown": "^0.15.2",
|
"tsdown": "^0.15.2",
|
||||||
"tsx": "^4.20.4",
|
"tsx": "^4.20.4",
|
||||||
"turbo": "^2.5.6",
|
"turbo": "2.5.6",
|
||||||
"typescript": "^5.7.3"
|
"typescript": "^5.9.2"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
165
packages/ai-sdk-provider-grok-cli/README.md
Normal file
165
packages/ai-sdk-provider-grok-cli/README.md
Normal file
@@ -0,0 +1,165 @@
|
|||||||
|
# AI SDK Provider for Grok CLI
|
||||||
|
|
||||||
|
A provider for the [AI SDK](https://sdk.vercel.ai) that integrates with [Grok CLI](https://docs.x.ai/api) for accessing xAI's Grok language models.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- ✅ **AI SDK v5 Compatible** - Full support for the latest AI SDK interfaces
|
||||||
|
- ✅ **Streaming & Non-streaming** - Both generation modes supported
|
||||||
|
- ✅ **Error Handling** - Comprehensive error handling with retry logic
|
||||||
|
- ✅ **Type Safety** - Full TypeScript support with proper type definitions
|
||||||
|
- ✅ **JSON Mode** - Automatic JSON extraction from responses
|
||||||
|
- ✅ **Abort Signals** - Proper cancellation support
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm install @tm/ai-sdk-provider-grok-cli
|
||||||
|
# or
|
||||||
|
yarn add @tm/ai-sdk-provider-grok-cli
|
||||||
|
```
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
1. Install the Grok CLI:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm install -g grok-cli
|
||||||
|
# or follow xAI's installation instructions
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Set up authentication:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export GROK_CLI_API_KEY="your-api-key"
|
||||||
|
# or configure via grok CLI: grok config set api-key your-key
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Usage
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { grokCli } from '@tm/ai-sdk-provider-grok-cli';
|
||||||
|
import { generateText } from 'ai';
|
||||||
|
|
||||||
|
const result = await generateText({
|
||||||
|
model: grokCli('grok-3-latest'),
|
||||||
|
prompt: 'Write a haiku about TypeScript'
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(result.text);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Streaming
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { grokCli } from '@tm/ai-sdk-provider-grok-cli';
|
||||||
|
import { streamText } from 'ai';
|
||||||
|
|
||||||
|
const { textStream } = await streamText({
|
||||||
|
model: grokCli('grok-4-latest'),
|
||||||
|
prompt: 'Explain quantum computing'
|
||||||
|
});
|
||||||
|
|
||||||
|
for await (const delta of textStream) {
|
||||||
|
process.stdout.write(delta);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### JSON Mode
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { grokCli } from '@tm/ai-sdk-provider-grok-cli';
|
||||||
|
import { generateObject } from 'ai';
|
||||||
|
import { z } from 'zod';
|
||||||
|
|
||||||
|
const result = await generateObject({
|
||||||
|
model: grokCli('grok-3-latest'),
|
||||||
|
schema: z.object({
|
||||||
|
name: z.string(),
|
||||||
|
age: z.number(),
|
||||||
|
hobbies: z.array(z.string())
|
||||||
|
}),
|
||||||
|
prompt: 'Generate a person profile'
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(result.object);
|
||||||
|
```
|
||||||
|
|
||||||
|
## Supported Models
|
||||||
|
|
||||||
|
- `grok-3-latest` - Grok 3 (latest version)
|
||||||
|
- `grok-4-latest` - Grok 4 (latest version)
|
||||||
|
- `grok-4` - Grok 4 (stable)
|
||||||
|
- Custom model strings supported
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### Provider Settings
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import { createGrokCli } from '@tm/ai-sdk-provider-grok-cli';
|
||||||
|
|
||||||
|
const grok = createGrokCli({
|
||||||
|
apiKey: 'your-api-key', // Optional if set via env/CLI
|
||||||
|
timeout: 120000, // 2 minutes default
|
||||||
|
workingDirectory: '/path/to/project', // Optional
|
||||||
|
baseURL: 'https://api.x.ai' // Optional
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### Model Settings
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
const model = grok('grok-4-latest', {
|
||||||
|
timeout: 300000, // 5 minutes for grok-4
|
||||||
|
// Other CLI-specific settings
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
The provider includes comprehensive error handling:
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
import {
|
||||||
|
isAuthenticationError,
|
||||||
|
isTimeoutError,
|
||||||
|
isInstallationError
|
||||||
|
} from '@tm/ai-sdk-provider-grok-cli';
|
||||||
|
|
||||||
|
try {
|
||||||
|
const result = await generateText({
|
||||||
|
model: grokCli('grok-4-latest'),
|
||||||
|
prompt: 'Hello!'
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
if (isAuthenticationError(error)) {
|
||||||
|
console.error('Authentication failed:', error.message);
|
||||||
|
} else if (isTimeoutError(error)) {
|
||||||
|
console.error('Request timed out:', error.message);
|
||||||
|
} else if (isInstallationError(error)) {
|
||||||
|
console.error('Grok CLI not installed or not found in PATH');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Development
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install dependencies
|
||||||
|
npm install
|
||||||
|
|
||||||
|
# Start development mode (keep running during development)
|
||||||
|
npm run dev
|
||||||
|
|
||||||
|
# Type check
|
||||||
|
npm run typecheck
|
||||||
|
|
||||||
|
# Run tests (requires build first)
|
||||||
|
NODE_ENV=production npm run build
|
||||||
|
npm test
|
||||||
|
```
|
||||||
|
|
||||||
|
**Important**: Always run `npm run dev` and keep it running during development. This ensures proper compilation and hot-reloading of TypeScript files.
|
||||||
35
packages/ai-sdk-provider-grok-cli/package.json
Normal file
35
packages/ai-sdk-provider-grok-cli/package.json
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
{
|
||||||
|
"name": "@tm/ai-sdk-provider-grok-cli",
|
||||||
|
"private": true,
|
||||||
|
"description": "AI SDK provider for Grok CLI integration",
|
||||||
|
"type": "module",
|
||||||
|
"types": "./src/index.ts",
|
||||||
|
"main": "./dist/index.js",
|
||||||
|
"exports": {
|
||||||
|
".": "./src/index.ts"
|
||||||
|
},
|
||||||
|
"scripts": {
|
||||||
|
"test": "vitest run",
|
||||||
|
"test:watch": "vitest",
|
||||||
|
"test:ui": "vitest --ui",
|
||||||
|
"typecheck": "tsc --noEmit"
|
||||||
|
},
|
||||||
|
"dependencies": {
|
||||||
|
"@ai-sdk/provider": "^2.0.0",
|
||||||
|
"@ai-sdk/provider-utils": "^3.0.10",
|
||||||
|
"jsonc-parser": "^3.3.1"
|
||||||
|
},
|
||||||
|
"devDependencies": {
|
||||||
|
"@types/node": "^22.18.6",
|
||||||
|
"typescript": "^5.9.2",
|
||||||
|
"vitest": "^3.2.4"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=18"
|
||||||
|
},
|
||||||
|
"keywords": ["ai", "grok", "x.ai", "cli", "language-model", "provider"],
|
||||||
|
"files": ["dist/**/*", "README.md"],
|
||||||
|
"publishConfig": {
|
||||||
|
"access": "public"
|
||||||
|
}
|
||||||
|
}
|
||||||
188
packages/ai-sdk-provider-grok-cli/src/errors.test.ts
Normal file
188
packages/ai-sdk-provider-grok-cli/src/errors.test.ts
Normal file
@@ -0,0 +1,188 @@
|
|||||||
|
/**
|
||||||
|
* Tests for error handling utilities
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { APICallError, LoadAPIKeyError } from '@ai-sdk/provider';
|
||||||
|
import { describe, expect, it } from 'vitest';
|
||||||
|
import {
|
||||||
|
createAPICallError,
|
||||||
|
createAuthenticationError,
|
||||||
|
createInstallationError,
|
||||||
|
createTimeoutError,
|
||||||
|
getErrorMetadata,
|
||||||
|
isAuthenticationError,
|
||||||
|
isInstallationError,
|
||||||
|
isTimeoutError
|
||||||
|
} from './errors.js';
|
||||||
|
|
||||||
|
describe('createAPICallError', () => {
|
||||||
|
it('should create APICallError with metadata', () => {
|
||||||
|
const error = createAPICallError({
|
||||||
|
message: 'Test error',
|
||||||
|
code: 'TEST_ERROR',
|
||||||
|
exitCode: 1,
|
||||||
|
stderr: 'Error output',
|
||||||
|
stdout: 'Success output',
|
||||||
|
promptExcerpt: 'Test prompt',
|
||||||
|
isRetryable: true
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(error).toBeInstanceOf(APICallError);
|
||||||
|
expect(error.message).toBe('Test error');
|
||||||
|
expect(error.isRetryable).toBe(true);
|
||||||
|
expect(error.url).toBe('grok-cli://command');
|
||||||
|
expect(error.data).toEqual({
|
||||||
|
code: 'TEST_ERROR',
|
||||||
|
exitCode: 1,
|
||||||
|
stderr: 'Error output',
|
||||||
|
stdout: 'Success output',
|
||||||
|
promptExcerpt: 'Test prompt'
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create APICallError with minimal parameters', () => {
|
||||||
|
const error = createAPICallError({
|
||||||
|
message: 'Simple error'
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(error).toBeInstanceOf(APICallError);
|
||||||
|
expect(error.message).toBe('Simple error');
|
||||||
|
expect(error.isRetryable).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('createAuthenticationError', () => {
|
||||||
|
it('should create LoadAPIKeyError with custom message', () => {
|
||||||
|
const error = createAuthenticationError({
|
||||||
|
message: 'Custom auth error'
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(error).toBeInstanceOf(LoadAPIKeyError);
|
||||||
|
expect(error.message).toBe('Custom auth error');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create LoadAPIKeyError with default message', () => {
|
||||||
|
const error = createAuthenticationError({});
|
||||||
|
|
||||||
|
expect(error).toBeInstanceOf(LoadAPIKeyError);
|
||||||
|
expect(error.message).toContain('Authentication failed');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('createTimeoutError', () => {
|
||||||
|
it('should create APICallError for timeout', () => {
|
||||||
|
const error = createTimeoutError({
|
||||||
|
message: 'Operation timed out',
|
||||||
|
timeoutMs: 5000,
|
||||||
|
promptExcerpt: 'Test prompt'
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(error).toBeInstanceOf(APICallError);
|
||||||
|
expect(error.message).toBe('Operation timed out');
|
||||||
|
expect(error.isRetryable).toBe(true);
|
||||||
|
expect(error.data).toEqual({
|
||||||
|
code: 'TIMEOUT',
|
||||||
|
promptExcerpt: 'Test prompt',
|
||||||
|
timeoutMs: 5000
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('createInstallationError', () => {
|
||||||
|
it('should create APICallError for installation issues', () => {
|
||||||
|
const error = createInstallationError({
|
||||||
|
message: 'CLI not found'
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(error).toBeInstanceOf(APICallError);
|
||||||
|
expect(error.message).toBe('CLI not found');
|
||||||
|
expect(error.isRetryable).toBe(false);
|
||||||
|
expect(error.url).toBe('grok-cli://installation');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create APICallError with default message', () => {
|
||||||
|
const error = createInstallationError({});
|
||||||
|
|
||||||
|
expect(error).toBeInstanceOf(APICallError);
|
||||||
|
expect(error.message).toContain('Grok CLI is not installed');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('isAuthenticationError', () => {
|
||||||
|
it('should return true for LoadAPIKeyError', () => {
|
||||||
|
const error = new LoadAPIKeyError({ message: 'Auth failed' });
|
||||||
|
expect(isAuthenticationError(error)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return true for APICallError with 401 exit code', () => {
|
||||||
|
const error = new APICallError({
|
||||||
|
message: 'Unauthorized',
|
||||||
|
data: { exitCode: 401 }
|
||||||
|
});
|
||||||
|
expect(isAuthenticationError(error)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false for other errors', () => {
|
||||||
|
const error = new Error('Generic error');
|
||||||
|
expect(isAuthenticationError(error)).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('isTimeoutError', () => {
|
||||||
|
it('should return true for timeout APICallError', () => {
|
||||||
|
const error = new APICallError({
|
||||||
|
message: 'Timeout',
|
||||||
|
data: { code: 'TIMEOUT' }
|
||||||
|
});
|
||||||
|
expect(isTimeoutError(error)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false for other errors', () => {
|
||||||
|
const error = new APICallError({ message: 'Other error' });
|
||||||
|
expect(isTimeoutError(error)).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('isInstallationError', () => {
|
||||||
|
it('should return true for installation APICallError', () => {
|
||||||
|
const error = new APICallError({
|
||||||
|
message: 'Not installed',
|
||||||
|
url: 'grok-cli://installation'
|
||||||
|
});
|
||||||
|
expect(isInstallationError(error)).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false for other errors', () => {
|
||||||
|
const error = new APICallError({ message: 'Other error' });
|
||||||
|
expect(isInstallationError(error)).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getErrorMetadata', () => {
|
||||||
|
it('should return metadata from APICallError', () => {
|
||||||
|
const metadata = {
|
||||||
|
code: 'TEST_ERROR',
|
||||||
|
exitCode: 1,
|
||||||
|
stderr: 'Error output'
|
||||||
|
};
|
||||||
|
const error = new APICallError({
|
||||||
|
message: 'Test error',
|
||||||
|
data: metadata
|
||||||
|
});
|
||||||
|
|
||||||
|
const result = getErrorMetadata(error);
|
||||||
|
expect(result).toEqual(metadata);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return undefined for errors without metadata', () => {
|
||||||
|
const error = new Error('Generic error');
|
||||||
|
const result = getErrorMetadata(error);
|
||||||
|
expect(result).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return undefined for APICallError without data', () => {
|
||||||
|
const error = new APICallError({ message: 'Test error' });
|
||||||
|
const result = getErrorMetadata(error);
|
||||||
|
expect(result).toBeUndefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
187
packages/ai-sdk-provider-grok-cli/src/errors.ts
Normal file
187
packages/ai-sdk-provider-grok-cli/src/errors.ts
Normal file
@@ -0,0 +1,187 @@
|
|||||||
|
/**
|
||||||
|
* Error handling utilities for Grok CLI provider
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { APICallError, LoadAPIKeyError } from '@ai-sdk/provider';
|
||||||
|
import type { GrokCliErrorMetadata } from './types.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parameters for creating API call errors
|
||||||
|
*/
|
||||||
|
interface CreateAPICallErrorParams {
|
||||||
|
/** Error message */
|
||||||
|
message: string;
|
||||||
|
/** Error code */
|
||||||
|
code?: string;
|
||||||
|
/** Process exit code */
|
||||||
|
exitCode?: number;
|
||||||
|
/** Standard error output */
|
||||||
|
stderr?: string;
|
||||||
|
/** Standard output */
|
||||||
|
stdout?: string;
|
||||||
|
/** Excerpt of the prompt */
|
||||||
|
promptExcerpt?: string;
|
||||||
|
/** Whether the error is retryable */
|
||||||
|
isRetryable?: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parameters for creating authentication errors
|
||||||
|
*/
|
||||||
|
interface CreateAuthenticationErrorParams {
|
||||||
|
/** Error message */
|
||||||
|
message?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parameters for creating timeout errors
|
||||||
|
*/
|
||||||
|
interface CreateTimeoutErrorParams {
|
||||||
|
/** Error message */
|
||||||
|
message: string;
|
||||||
|
/** Excerpt of the prompt */
|
||||||
|
promptExcerpt?: string;
|
||||||
|
/** Timeout in milliseconds */
|
||||||
|
timeoutMs: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parameters for creating installation errors
|
||||||
|
*/
|
||||||
|
interface CreateInstallationErrorParams {
|
||||||
|
/** Error message */
|
||||||
|
message?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create an API call error with Grok CLI specific metadata
|
||||||
|
*/
|
||||||
|
export function createAPICallError({
|
||||||
|
message,
|
||||||
|
code,
|
||||||
|
exitCode,
|
||||||
|
stderr,
|
||||||
|
stdout,
|
||||||
|
promptExcerpt,
|
||||||
|
isRetryable = false
|
||||||
|
}: CreateAPICallErrorParams): APICallError {
|
||||||
|
const metadata: GrokCliErrorMetadata = {
|
||||||
|
code,
|
||||||
|
exitCode,
|
||||||
|
stderr,
|
||||||
|
stdout,
|
||||||
|
promptExcerpt
|
||||||
|
};
|
||||||
|
|
||||||
|
return new APICallError({
|
||||||
|
message,
|
||||||
|
isRetryable,
|
||||||
|
url: 'grok-cli://command',
|
||||||
|
requestBodyValues: promptExcerpt ? { prompt: promptExcerpt } : undefined,
|
||||||
|
data: metadata
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create an authentication error
|
||||||
|
*/
|
||||||
|
export function createAuthenticationError({
|
||||||
|
message
|
||||||
|
}: CreateAuthenticationErrorParams): LoadAPIKeyError {
|
||||||
|
return new LoadAPIKeyError({
|
||||||
|
message:
|
||||||
|
message ||
|
||||||
|
'Authentication failed. Please ensure Grok CLI is properly configured with API key.'
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a timeout error
|
||||||
|
*/
|
||||||
|
export function createTimeoutError({
|
||||||
|
message,
|
||||||
|
promptExcerpt,
|
||||||
|
timeoutMs
|
||||||
|
}: CreateTimeoutErrorParams): APICallError {
|
||||||
|
const metadata: GrokCliErrorMetadata & { timeoutMs: number } = {
|
||||||
|
code: 'TIMEOUT',
|
||||||
|
promptExcerpt,
|
||||||
|
timeoutMs
|
||||||
|
};
|
||||||
|
|
||||||
|
return new APICallError({
|
||||||
|
message,
|
||||||
|
isRetryable: true,
|
||||||
|
url: 'grok-cli://command',
|
||||||
|
requestBodyValues: promptExcerpt ? { prompt: promptExcerpt } : undefined,
|
||||||
|
data: metadata
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a CLI installation error
|
||||||
|
*/
|
||||||
|
export function createInstallationError({
|
||||||
|
message
|
||||||
|
}: CreateInstallationErrorParams): APICallError {
|
||||||
|
return new APICallError({
|
||||||
|
message:
|
||||||
|
message ||
|
||||||
|
'Grok CLI is not installed or not found in PATH. Please install with: npm install -g @vibe-kit/grok-cli',
|
||||||
|
isRetryable: false,
|
||||||
|
url: 'grok-cli://installation',
|
||||||
|
requestBodyValues: undefined
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if an error is an authentication error
|
||||||
|
*/
|
||||||
|
export function isAuthenticationError(
|
||||||
|
error: unknown
|
||||||
|
): error is LoadAPIKeyError {
|
||||||
|
if (error instanceof LoadAPIKeyError) return true;
|
||||||
|
if (error instanceof APICallError) {
|
||||||
|
const metadata = error.data as GrokCliErrorMetadata | undefined;
|
||||||
|
if (!metadata) return false;
|
||||||
|
return (
|
||||||
|
metadata.exitCode === 401 ||
|
||||||
|
metadata.code === 'AUTHENTICATION_ERROR' ||
|
||||||
|
metadata.code === 'UNAUTHORIZED'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if an error is a timeout error
|
||||||
|
*/
|
||||||
|
export function isTimeoutError(error: unknown): error is APICallError {
|
||||||
|
if (
|
||||||
|
error instanceof APICallError &&
|
||||||
|
(error.data as GrokCliErrorMetadata)?.code === 'TIMEOUT'
|
||||||
|
)
|
||||||
|
return true;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if an error is an installation error
|
||||||
|
*/
|
||||||
|
export function isInstallationError(error: unknown): error is APICallError {
|
||||||
|
if (error instanceof APICallError && error.url === 'grok-cli://installation')
|
||||||
|
return true;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get error metadata from an error
|
||||||
|
*/
|
||||||
|
export function getErrorMetadata(
|
||||||
|
error: unknown
|
||||||
|
): GrokCliErrorMetadata | undefined {
|
||||||
|
if (error instanceof APICallError && error.data) {
|
||||||
|
return error.data as GrokCliErrorMetadata;
|
||||||
|
}
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
@@ -1,53 +1,51 @@
|
|||||||
/**
|
/**
|
||||||
* @fileoverview Grok CLI Language Model implementation
|
* Grok CLI Language Model implementation for AI SDK v5
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
import { spawn } from 'child_process';
|
||||||
|
import { promises as fs } from 'fs';
|
||||||
|
import { homedir } from 'os';
|
||||||
|
import { join } from 'path';
|
||||||
|
import type {
|
||||||
|
LanguageModelV2,
|
||||||
|
LanguageModelV2CallOptions,
|
||||||
|
LanguageModelV2CallWarning
|
||||||
|
} from '@ai-sdk/provider';
|
||||||
import { NoSuchModelError } from '@ai-sdk/provider';
|
import { NoSuchModelError } from '@ai-sdk/provider';
|
||||||
import { generateId } from '@ai-sdk/provider-utils';
|
import { generateId } from '@ai-sdk/provider-utils';
|
||||||
import {
|
|
||||||
createPromptFromMessages,
|
|
||||||
convertFromGrokCliResponse,
|
|
||||||
escapeShellArg
|
|
||||||
} from './message-converter.js';
|
|
||||||
import { extractJson } from './json-extractor.js';
|
|
||||||
import {
|
import {
|
||||||
createAPICallError,
|
createAPICallError,
|
||||||
createAuthenticationError,
|
createAuthenticationError,
|
||||||
createInstallationError,
|
createInstallationError,
|
||||||
createTimeoutError
|
createTimeoutError
|
||||||
} from './errors.js';
|
} from './errors.js';
|
||||||
import { spawn } from 'child_process';
|
import { extractJson } from './json-extractor.js';
|
||||||
import { promises as fs } from 'fs';
|
import {
|
||||||
import { join } from 'path';
|
convertFromGrokCliResponse,
|
||||||
import { homedir } from 'os';
|
createPromptFromMessages,
|
||||||
|
escapeShellArg
|
||||||
|
} from './message-converter.js';
|
||||||
|
import type {
|
||||||
|
GrokCliLanguageModelOptions,
|
||||||
|
GrokCliModelId,
|
||||||
|
GrokCliSettings
|
||||||
|
} from './types.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @typedef {import('./types.js').GrokCliSettings} GrokCliSettings
|
* Grok CLI Language Model implementation for AI SDK v5
|
||||||
* @typedef {import('./types.js').GrokCliModelId} GrokCliModelId
|
|
||||||
*/
|
*/
|
||||||
|
export class GrokCliLanguageModel implements LanguageModelV2 {
|
||||||
|
readonly specificationVersion = 'v2' as const;
|
||||||
|
readonly defaultObjectGenerationMode = 'json' as const;
|
||||||
|
readonly supportsImageUrls = false;
|
||||||
|
readonly supportsStructuredOutputs = false;
|
||||||
|
readonly supportedUrls: Record<string, RegExp[]> = {};
|
||||||
|
|
||||||
/**
|
readonly modelId: GrokCliModelId;
|
||||||
* @typedef {Object} GrokCliLanguageModelOptions
|
readonly settings: GrokCliSettings;
|
||||||
* @property {GrokCliModelId} id - Model ID
|
|
||||||
* @property {GrokCliSettings} [settings] - Model settings
|
|
||||||
*/
|
|
||||||
|
|
||||||
export class GrokCliLanguageModel {
|
constructor(options: GrokCliLanguageModelOptions) {
|
||||||
specificationVersion = 'v1';
|
|
||||||
defaultObjectGenerationMode = 'json';
|
|
||||||
supportsImageUrls = false;
|
|
||||||
supportsStructuredOutputs = false;
|
|
||||||
|
|
||||||
/** @type {GrokCliModelId} */
|
|
||||||
modelId;
|
|
||||||
|
|
||||||
/** @type {GrokCliSettings} */
|
|
||||||
settings;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param {GrokCliLanguageModelOptions} options
|
|
||||||
*/
|
|
||||||
constructor(options) {
|
|
||||||
this.modelId = options.id;
|
this.modelId = options.id;
|
||||||
this.settings = options.settings ?? {};
|
this.settings = options.settings ?? {};
|
||||||
|
|
||||||
@@ -64,15 +62,14 @@ export class GrokCliLanguageModel {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
get provider() {
|
get provider(): string {
|
||||||
return 'grok-cli';
|
return 'grok-cli';
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if Grok CLI is installed and available
|
* Check if Grok CLI is installed and available
|
||||||
* @returns {Promise<boolean>}
|
|
||||||
*/
|
*/
|
||||||
async checkGrokCliInstallation() {
|
private async checkGrokCliInstallation(): Promise<boolean> {
|
||||||
return new Promise((resolve) => {
|
return new Promise((resolve) => {
|
||||||
const child = spawn('grok', ['--version'], {
|
const child = spawn('grok', ['--version'], {
|
||||||
stdio: 'pipe'
|
stdio: 'pipe'
|
||||||
@@ -85,9 +82,8 @@ export class GrokCliLanguageModel {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Get API key from settings or environment
|
* Get API key from settings or environment
|
||||||
* @returns {Promise<string|null>}
|
|
||||||
*/
|
*/
|
||||||
async getApiKey() {
|
private async getApiKey(): Promise<string | null> {
|
||||||
// Check settings first
|
// Check settings first
|
||||||
if (this.settings.apiKey) {
|
if (this.settings.apiKey) {
|
||||||
return this.settings.apiKey;
|
return this.settings.apiKey;
|
||||||
@@ -111,22 +107,32 @@ export class GrokCliLanguageModel {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Execute Grok CLI command
|
* Execute Grok CLI command
|
||||||
* @param {Array<string>} args - Command line arguments
|
|
||||||
* @param {Object} options - Execution options
|
|
||||||
* @returns {Promise<{stdout: string, stderr: string, exitCode: number}>}
|
|
||||||
*/
|
*/
|
||||||
async executeGrokCli(args, options = {}) {
|
private async executeGrokCli(
|
||||||
const timeout = options.timeout || this.settings.timeout || 120000; // 2 minutes default
|
args: string[],
|
||||||
|
options: { timeout?: number; apiKey?: string } = {}
|
||||||
|
): Promise<{ stdout: string; stderr: string; exitCode: number }> {
|
||||||
|
// Default timeout based on model type
|
||||||
|
let defaultTimeout = 120000; // 2 minutes default
|
||||||
|
if (this.modelId.includes('grok-4')) {
|
||||||
|
defaultTimeout = 600000; // 10 minutes for grok-4 models (they seem to hang during setup)
|
||||||
|
}
|
||||||
|
|
||||||
|
const timeout = options.timeout ?? this.settings.timeout ?? defaultTimeout;
|
||||||
|
|
||||||
return new Promise((resolve, reject) => {
|
return new Promise((resolve, reject) => {
|
||||||
const child = spawn('grok', args, {
|
const child = spawn('grok', args, {
|
||||||
stdio: 'pipe',
|
stdio: 'pipe',
|
||||||
cwd: this.settings.workingDirectory || process.cwd()
|
cwd: this.settings.workingDirectory || process.cwd(),
|
||||||
|
env:
|
||||||
|
options.apiKey === undefined
|
||||||
|
? process.env
|
||||||
|
: { ...process.env, GROK_CLI_API_KEY: options.apiKey }
|
||||||
});
|
});
|
||||||
|
|
||||||
let stdout = '';
|
let stdout = '';
|
||||||
let stderr = '';
|
let stderr = '';
|
||||||
let timeoutId;
|
let timeoutId: NodeJS.Timeout | undefined;
|
||||||
|
|
||||||
// Set up timeout
|
// Set up timeout
|
||||||
if (timeout > 0) {
|
if (timeout > 0) {
|
||||||
@@ -142,24 +148,26 @@ export class GrokCliLanguageModel {
|
|||||||
}, timeout);
|
}, timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
child.stdout.on('data', (data) => {
|
child.stdout?.on('data', (data) => {
|
||||||
stdout += data.toString();
|
const chunk = data.toString();
|
||||||
|
stdout += chunk;
|
||||||
});
|
});
|
||||||
|
|
||||||
child.stderr.on('data', (data) => {
|
child.stderr?.on('data', (data) => {
|
||||||
stderr += data.toString();
|
const chunk = data.toString();
|
||||||
|
stderr += chunk;
|
||||||
});
|
});
|
||||||
|
|
||||||
child.on('error', (error) => {
|
child.on('error', (error) => {
|
||||||
if (timeoutId) clearTimeout(timeoutId);
|
if (timeoutId) clearTimeout(timeoutId);
|
||||||
|
|
||||||
if (error.code === 'ENOENT') {
|
if ((error as any).code === 'ENOENT') {
|
||||||
reject(createInstallationError({}));
|
reject(createInstallationError({}));
|
||||||
} else {
|
} else {
|
||||||
reject(
|
reject(
|
||||||
createAPICallError({
|
createAPICallError({
|
||||||
message: `Failed to execute Grok CLI: ${error.message}`,
|
message: `Failed to execute Grok CLI: ${error.message}`,
|
||||||
code: error.code,
|
code: (error as any).code,
|
||||||
stderr: error.message,
|
stderr: error.message,
|
||||||
isRetryable: false
|
isRetryable: false
|
||||||
})
|
})
|
||||||
@@ -180,15 +188,18 @@ export class GrokCliLanguageModel {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generate unsupported parameter warnings
|
* Generate comprehensive warnings for unsupported parameters and validation issues
|
||||||
* @param {Object} options - Generation options
|
|
||||||
* @returns {Array} Warnings array
|
|
||||||
*/
|
*/
|
||||||
generateUnsupportedWarnings(options) {
|
private generateAllWarnings(
|
||||||
const warnings = [];
|
options: LanguageModelV2CallOptions,
|
||||||
const unsupportedParams = [];
|
prompt: string
|
||||||
|
): LanguageModelV2CallWarning[] {
|
||||||
|
const warnings: LanguageModelV2CallWarning[] = [];
|
||||||
|
const unsupportedParams: string[] = [];
|
||||||
|
|
||||||
// Grok CLI supports some parameters but not all AI SDK parameters
|
// Check for unsupported parameters
|
||||||
|
if (options.temperature !== undefined)
|
||||||
|
unsupportedParams.push('temperature');
|
||||||
if (options.topP !== undefined) unsupportedParams.push('topP');
|
if (options.topP !== undefined) unsupportedParams.push('topP');
|
||||||
if (options.topK !== undefined) unsupportedParams.push('topK');
|
if (options.topK !== undefined) unsupportedParams.push('topK');
|
||||||
if (options.presencePenalty !== undefined)
|
if (options.presencePenalty !== undefined)
|
||||||
@@ -200,24 +211,51 @@ export class GrokCliLanguageModel {
|
|||||||
if (options.seed !== undefined) unsupportedParams.push('seed');
|
if (options.seed !== undefined) unsupportedParams.push('seed');
|
||||||
|
|
||||||
if (unsupportedParams.length > 0) {
|
if (unsupportedParams.length > 0) {
|
||||||
|
// Add a warning for each unsupported parameter
|
||||||
for (const param of unsupportedParams) {
|
for (const param of unsupportedParams) {
|
||||||
warnings.push({
|
warnings.push({
|
||||||
type: 'unsupported-setting',
|
type: 'unsupported-setting',
|
||||||
setting: param,
|
setting: param as
|
||||||
|
| 'temperature'
|
||||||
|
| 'topP'
|
||||||
|
| 'topK'
|
||||||
|
| 'presencePenalty'
|
||||||
|
| 'frequencyPenalty'
|
||||||
|
| 'stopSequences'
|
||||||
|
| 'seed',
|
||||||
details: `Grok CLI does not support the ${param} parameter. It will be ignored.`
|
details: `Grok CLI does not support the ${param} parameter. It will be ignored.`
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add model validation warnings if needed
|
||||||
|
if (!this.modelId || this.modelId.trim() === '') {
|
||||||
|
warnings.push({
|
||||||
|
type: 'other',
|
||||||
|
message: 'Model ID is empty or invalid'
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add prompt validation
|
||||||
|
if (!prompt || prompt.trim() === '') {
|
||||||
|
warnings.push({
|
||||||
|
type: 'other',
|
||||||
|
message: 'Prompt is empty'
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
return warnings;
|
return warnings;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generate text using Grok CLI
|
* Generate text using Grok CLI
|
||||||
* @param {Object} options - Generation options
|
|
||||||
* @returns {Promise<Object>}
|
|
||||||
*/
|
*/
|
||||||
async doGenerate(options) {
|
async doGenerate(options: LanguageModelV2CallOptions) {
|
||||||
|
// Handle abort signal early
|
||||||
|
if (options.abortSignal?.aborted) {
|
||||||
|
throw options.abortSignal.reason || new Error('Request aborted');
|
||||||
|
}
|
||||||
|
|
||||||
// Check CLI installation
|
// Check CLI installation
|
||||||
const isInstalled = await this.checkGrokCliInstallation();
|
const isInstalled = await this.checkGrokCliInstallation();
|
||||||
if (!isInstalled) {
|
if (!isInstalled) {
|
||||||
@@ -234,7 +272,7 @@ export class GrokCliLanguageModel {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const prompt = createPromptFromMessages(options.prompt);
|
const prompt = createPromptFromMessages(options.prompt);
|
||||||
const warnings = this.generateUnsupportedWarnings(options);
|
const warnings = this.generateAllWarnings(options, prompt);
|
||||||
|
|
||||||
// Build command arguments
|
// Build command arguments
|
||||||
const args = ['--prompt', escapeShellArg(prompt)];
|
const args = ['--prompt', escapeShellArg(prompt)];
|
||||||
@@ -244,10 +282,11 @@ export class GrokCliLanguageModel {
|
|||||||
args.push('--model', this.modelId);
|
args.push('--model', this.modelId);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add API key if available
|
// Skip API key parameter if it's likely already configured to avoid hanging
|
||||||
if (apiKey) {
|
// The CLI seems to hang when trying to save API keys for grok-4 models
|
||||||
args.push('--api-key', apiKey);
|
// if (apiKey) {
|
||||||
}
|
// args.push('--api-key', apiKey);
|
||||||
|
// }
|
||||||
|
|
||||||
// Add base URL if provided in settings
|
// Add base URL if provided in settings
|
||||||
if (this.settings.baseURL) {
|
if (this.settings.baseURL) {
|
||||||
@@ -260,9 +299,7 @@ export class GrokCliLanguageModel {
|
|||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const result = await this.executeGrokCli(args, {
|
const result = await this.executeGrokCli(args, { apiKey });
|
||||||
timeout: this.settings.timeout
|
|
||||||
});
|
|
||||||
|
|
||||||
if (result.exitCode !== 0) {
|
if (result.exitCode !== 0) {
|
||||||
// Handle authentication errors
|
// Handle authentication errors
|
||||||
@@ -290,19 +327,37 @@ export class GrokCliLanguageModel {
|
|||||||
let text = response.text || '';
|
let text = response.text || '';
|
||||||
|
|
||||||
// Extract JSON if in object-json mode
|
// Extract JSON if in object-json mode
|
||||||
if (options.mode?.type === 'object-json' && text) {
|
const isObjectJson = (
|
||||||
|
o: unknown
|
||||||
|
): o is { mode: { type: 'object-json' } } =>
|
||||||
|
!!o &&
|
||||||
|
typeof o === 'object' &&
|
||||||
|
'mode' in o &&
|
||||||
|
(o as any).mode?.type === 'object-json';
|
||||||
|
if (isObjectJson(options) && text) {
|
||||||
text = extractJson(text);
|
text = extractJson(text);
|
||||||
}
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
text: text || undefined,
|
content: [
|
||||||
usage: response.usage || { promptTokens: 0, completionTokens: 0 },
|
{
|
||||||
finishReason: 'stop',
|
type: 'text' as const,
|
||||||
|
text: text || ''
|
||||||
|
}
|
||||||
|
],
|
||||||
|
usage: response.usage
|
||||||
|
? {
|
||||||
|
inputTokens: response.usage.promptTokens,
|
||||||
|
outputTokens: response.usage.completionTokens,
|
||||||
|
totalTokens: response.usage.totalTokens
|
||||||
|
}
|
||||||
|
: { inputTokens: 0, outputTokens: 0, totalTokens: 0 },
|
||||||
|
finishReason: 'stop' as const,
|
||||||
rawCall: {
|
rawCall: {
|
||||||
rawPrompt: prompt,
|
rawPrompt: prompt,
|
||||||
rawSettings: args
|
rawSettings: args
|
||||||
},
|
},
|
||||||
warnings: warnings.length > 0 ? warnings : undefined,
|
warnings: warnings,
|
||||||
response: {
|
response: {
|
||||||
id: generateId(),
|
id: generateId(),
|
||||||
timestamp: new Date(),
|
timestamp: new Date(),
|
||||||
@@ -314,20 +369,23 @@ export class GrokCliLanguageModel {
|
|||||||
providerMetadata: {
|
providerMetadata: {
|
||||||
'grok-cli': {
|
'grok-cli': {
|
||||||
exitCode: result.exitCode,
|
exitCode: result.exitCode,
|
||||||
stderr: result.stderr || undefined
|
...(result.stderr && { stderr: result.stderr })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Re-throw our custom errors
|
// Re-throw our custom errors
|
||||||
if (error.name === 'APICallError' || error.name === 'LoadAPIKeyError') {
|
if (
|
||||||
|
(error as any).name === 'APICallError' ||
|
||||||
|
(error as any).name === 'LoadAPIKeyError'
|
||||||
|
) {
|
||||||
throw error;
|
throw error;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrap other errors
|
// Wrap other errors
|
||||||
throw createAPICallError({
|
throw createAPICallError({
|
||||||
message: `Grok CLI execution failed: ${error.message}`,
|
message: `Grok CLI execution failed: ${(error as Error).message}`,
|
||||||
code: error.code,
|
code: (error as any).code,
|
||||||
promptExcerpt: prompt.substring(0, 200),
|
promptExcerpt: prompt.substring(0, 200),
|
||||||
isRetryable: false
|
isRetryable: false
|
||||||
});
|
});
|
||||||
@@ -338,15 +396,39 @@ export class GrokCliLanguageModel {
|
|||||||
* Stream text using Grok CLI
|
* Stream text using Grok CLI
|
||||||
* Note: Grok CLI doesn't natively support streaming, so this simulates streaming
|
* Note: Grok CLI doesn't natively support streaming, so this simulates streaming
|
||||||
* by generating the full response and then streaming it in chunks
|
* by generating the full response and then streaming it in chunks
|
||||||
* @param {Object} options - Stream options
|
|
||||||
* @returns {Promise<Object>}
|
|
||||||
*/
|
*/
|
||||||
async doStream(options) {
|
async doStream(options: LanguageModelV2CallOptions) {
|
||||||
const warnings = this.generateUnsupportedWarnings(options);
|
const prompt = createPromptFromMessages(options.prompt);
|
||||||
|
const warnings = this.generateAllWarnings(options, prompt);
|
||||||
|
|
||||||
const stream = new ReadableStream({
|
const stream = new ReadableStream({
|
||||||
start: async (controller) => {
|
start: async (controller) => {
|
||||||
|
let abortListener: (() => void) | undefined;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
// Handle abort signal
|
||||||
|
if (options.abortSignal?.aborted) {
|
||||||
|
throw options.abortSignal.reason || new Error('Request aborted');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set up abort listener
|
||||||
|
if (options.abortSignal) {
|
||||||
|
abortListener = () => {
|
||||||
|
controller.enqueue({
|
||||||
|
type: 'error',
|
||||||
|
error:
|
||||||
|
options.abortSignal?.reason || new Error('Request aborted')
|
||||||
|
});
|
||||||
|
controller.close();
|
||||||
|
};
|
||||||
|
options.abortSignal.addEventListener('abort', abortListener, {
|
||||||
|
once: true
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Emit stream-start with warnings
|
||||||
|
controller.enqueue({ type: 'stream-start', warnings });
|
||||||
|
|
||||||
// Generate the full response first
|
// Generate the full response first
|
||||||
const result = await this.doGenerate(options);
|
const result = await this.doGenerate(options);
|
||||||
|
|
||||||
@@ -359,20 +441,48 @@ export class GrokCliLanguageModel {
|
|||||||
});
|
});
|
||||||
|
|
||||||
// Simulate streaming by chunking the text
|
// Simulate streaming by chunking the text
|
||||||
const text = result.text || '';
|
const content = result.content || [];
|
||||||
|
const text =
|
||||||
|
content.length > 0 && content[0].type === 'text'
|
||||||
|
? content[0].text
|
||||||
|
: '';
|
||||||
const chunkSize = 50; // Characters per chunk
|
const chunkSize = 50; // Characters per chunk
|
||||||
|
let textPartId: string | undefined;
|
||||||
|
|
||||||
|
// Emit text-start if we have content
|
||||||
|
if (text.length > 0) {
|
||||||
|
textPartId = generateId();
|
||||||
|
controller.enqueue({
|
||||||
|
type: 'text-start',
|
||||||
|
id: textPartId
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
for (let i = 0; i < text.length; i += chunkSize) {
|
for (let i = 0; i < text.length; i += chunkSize) {
|
||||||
|
// Check for abort during streaming
|
||||||
|
if (options.abortSignal?.aborted) {
|
||||||
|
throw options.abortSignal.reason || new Error('Request aborted');
|
||||||
|
}
|
||||||
|
|
||||||
const chunk = text.slice(i, i + chunkSize);
|
const chunk = text.slice(i, i + chunkSize);
|
||||||
controller.enqueue({
|
controller.enqueue({
|
||||||
type: 'text-delta',
|
type: 'text-delta',
|
||||||
textDelta: chunk
|
id: textPartId!,
|
||||||
|
delta: chunk
|
||||||
});
|
});
|
||||||
|
|
||||||
// Add small delay to simulate streaming
|
// Add small delay to simulate streaming
|
||||||
await new Promise((resolve) => setTimeout(resolve, 20));
|
await new Promise((resolve) => setTimeout(resolve, 20));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Close text part if opened
|
||||||
|
if (textPartId) {
|
||||||
|
controller.enqueue({
|
||||||
|
type: 'text-end',
|
||||||
|
id: textPartId
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
// Emit finish event
|
// Emit finish event
|
||||||
controller.enqueue({
|
controller.enqueue({
|
||||||
type: 'finish',
|
type: 'finish',
|
||||||
@@ -388,19 +498,22 @@ export class GrokCliLanguageModel {
|
|||||||
error
|
error
|
||||||
});
|
});
|
||||||
controller.close();
|
controller.close();
|
||||||
|
} finally {
|
||||||
|
// Clean up abort listener
|
||||||
|
if (options.abortSignal && abortListener) {
|
||||||
|
options.abortSignal.removeEventListener('abort', abortListener);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
cancel: () => {
|
||||||
|
// Clean up if stream is cancelled
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
return {
|
return {
|
||||||
stream,
|
stream,
|
||||||
rawCall: {
|
|
||||||
rawPrompt: createPromptFromMessages(options.prompt),
|
|
||||||
rawSettings: {}
|
|
||||||
},
|
|
||||||
warnings: warnings.length > 0 ? warnings : undefined,
|
|
||||||
request: {
|
request: {
|
||||||
body: createPromptFromMessages(options.prompt)
|
body: prompt
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
121
packages/ai-sdk-provider-grok-cli/src/grok-cli-provider.test.ts
Normal file
121
packages/ai-sdk-provider-grok-cli/src/grok-cli-provider.test.ts
Normal file
@@ -0,0 +1,121 @@
|
|||||||
|
/**
|
||||||
|
* Tests for Grok CLI provider
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { NoSuchModelError } from '@ai-sdk/provider';
|
||||||
|
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
||||||
|
import { GrokCliLanguageModel } from './grok-cli-language-model.js';
|
||||||
|
import { createGrokCli, grokCli } from './grok-cli-provider.js';
|
||||||
|
|
||||||
|
// Mock the GrokCliLanguageModel
|
||||||
|
vi.mock('./grok-cli-language-model.js', () => ({
|
||||||
|
GrokCliLanguageModel: vi.fn().mockImplementation((options) => ({
|
||||||
|
modelId: options.id,
|
||||||
|
settings: options.settings,
|
||||||
|
provider: 'grok-cli'
|
||||||
|
}))
|
||||||
|
}));
|
||||||
|
|
||||||
|
describe('createGrokCli', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.clearAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create a provider with default settings', () => {
|
||||||
|
const provider = createGrokCli();
|
||||||
|
expect(typeof provider).toBe('function');
|
||||||
|
expect(typeof provider.languageModel).toBe('function');
|
||||||
|
expect(typeof provider.chat).toBe('function');
|
||||||
|
expect(typeof provider.textEmbeddingModel).toBe('function');
|
||||||
|
expect(typeof provider.imageModel).toBe('function');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create a provider with custom default settings', () => {
|
||||||
|
const defaultSettings = {
|
||||||
|
timeout: 5000,
|
||||||
|
workingDirectory: '/custom/path'
|
||||||
|
};
|
||||||
|
const provider = createGrokCli({ defaultSettings });
|
||||||
|
|
||||||
|
const model = provider('grok-2-mini');
|
||||||
|
|
||||||
|
expect(GrokCliLanguageModel).toHaveBeenCalledWith({
|
||||||
|
id: 'grok-2-mini',
|
||||||
|
settings: defaultSettings
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create language models with merged settings', () => {
|
||||||
|
const defaultSettings = { timeout: 5000 };
|
||||||
|
const provider = createGrokCli({ defaultSettings });
|
||||||
|
|
||||||
|
const modelSettings = { apiKey: 'test-key' };
|
||||||
|
const model = provider('grok-2', modelSettings);
|
||||||
|
|
||||||
|
expect(GrokCliLanguageModel).toHaveBeenCalledWith({
|
||||||
|
id: 'grok-2',
|
||||||
|
settings: { timeout: 5000, apiKey: 'test-key' }
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create models via languageModel method', () => {
|
||||||
|
const provider = createGrokCli();
|
||||||
|
const model = provider.languageModel('grok-2-mini', { timeout: 1000 });
|
||||||
|
|
||||||
|
expect(GrokCliLanguageModel).toHaveBeenCalledWith({
|
||||||
|
id: 'grok-2-mini',
|
||||||
|
settings: { timeout: 1000 }
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create models via chat method (alias)', () => {
|
||||||
|
const provider = createGrokCli();
|
||||||
|
const model = provider.chat('grok-2');
|
||||||
|
|
||||||
|
expect(GrokCliLanguageModel).toHaveBeenCalledWith({
|
||||||
|
id: 'grok-2',
|
||||||
|
settings: {}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw error when called with new keyword', () => {
|
||||||
|
const provider = createGrokCli();
|
||||||
|
expect(() => {
|
||||||
|
// @ts-expect-error - intentionally testing invalid usage
|
||||||
|
new provider('grok-2');
|
||||||
|
}).toThrow(
|
||||||
|
'The Grok CLI model function cannot be called with the new keyword.'
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw NoSuchModelError for textEmbeddingModel', () => {
|
||||||
|
const provider = createGrokCli();
|
||||||
|
expect(() => {
|
||||||
|
provider.textEmbeddingModel('test-model');
|
||||||
|
}).toThrow(NoSuchModelError);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw NoSuchModelError for imageModel', () => {
|
||||||
|
const provider = createGrokCli();
|
||||||
|
expect(() => {
|
||||||
|
provider.imageModel('test-model');
|
||||||
|
}).toThrow(NoSuchModelError);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('default grokCli provider', () => {
|
||||||
|
it('should be a pre-configured provider instance', () => {
|
||||||
|
expect(typeof grokCli).toBe('function');
|
||||||
|
expect(typeof grokCli.languageModel).toBe('function');
|
||||||
|
expect(typeof grokCli.chat).toBe('function');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create models with default configuration', () => {
|
||||||
|
const model = grokCli('grok-2-mini');
|
||||||
|
|
||||||
|
expect(GrokCliLanguageModel).toHaveBeenCalledWith({
|
||||||
|
id: 'grok-2-mini',
|
||||||
|
settings: {}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
108
packages/ai-sdk-provider-grok-cli/src/grok-cli-provider.ts
Normal file
108
packages/ai-sdk-provider-grok-cli/src/grok-cli-provider.ts
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
/**
|
||||||
|
* Grok CLI provider implementation for AI SDK v5
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { LanguageModelV2, ProviderV2 } from '@ai-sdk/provider';
|
||||||
|
import { NoSuchModelError } from '@ai-sdk/provider';
|
||||||
|
import { GrokCliLanguageModel } from './grok-cli-language-model.js';
|
||||||
|
import type { GrokCliModelId, GrokCliSettings } from './types.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Grok CLI provider interface that extends the AI SDK's ProviderV2
|
||||||
|
*/
|
||||||
|
export interface GrokCliProvider extends ProviderV2 {
|
||||||
|
/**
|
||||||
|
* Creates a language model instance for the specified model ID.
|
||||||
|
* This is a shorthand for calling `languageModel()`.
|
||||||
|
*/
|
||||||
|
(modelId: GrokCliModelId, settings?: GrokCliSettings): LanguageModelV2;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a language model instance for text generation.
|
||||||
|
*/
|
||||||
|
languageModel(
|
||||||
|
modelId: GrokCliModelId,
|
||||||
|
settings?: GrokCliSettings
|
||||||
|
): LanguageModelV2;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Alias for `languageModel()` to maintain compatibility with AI SDK patterns.
|
||||||
|
*/
|
||||||
|
chat(modelId: GrokCliModelId, settings?: GrokCliSettings): LanguageModelV2;
|
||||||
|
|
||||||
|
textEmbeddingModel(modelId: string): never;
|
||||||
|
imageModel(modelId: string): never;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Configuration options for creating a Grok CLI provider instance
|
||||||
|
*/
|
||||||
|
export interface GrokCliProviderSettings {
|
||||||
|
/**
|
||||||
|
* Default settings to use for all models created by this provider.
|
||||||
|
* Individual model settings will override these defaults.
|
||||||
|
*/
|
||||||
|
defaultSettings?: GrokCliSettings;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a Grok CLI provider instance with the specified configuration.
|
||||||
|
* The provider can be used to create language models for interacting with Grok models.
|
||||||
|
*/
|
||||||
|
export function createGrokCli(
|
||||||
|
options: GrokCliProviderSettings = {}
|
||||||
|
): GrokCliProvider {
|
||||||
|
const createModel = (
|
||||||
|
modelId: GrokCliModelId,
|
||||||
|
settings: GrokCliSettings = {}
|
||||||
|
): LanguageModelV2 => {
|
||||||
|
const mergedSettings = {
|
||||||
|
...options.defaultSettings,
|
||||||
|
...settings
|
||||||
|
};
|
||||||
|
|
||||||
|
return new GrokCliLanguageModel({
|
||||||
|
id: modelId,
|
||||||
|
settings: mergedSettings
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
const provider = function (
|
||||||
|
modelId: GrokCliModelId,
|
||||||
|
settings?: GrokCliSettings
|
||||||
|
) {
|
||||||
|
if (new.target) {
|
||||||
|
throw new Error(
|
||||||
|
'The Grok CLI model function cannot be called with the new keyword.'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return createModel(modelId, settings);
|
||||||
|
};
|
||||||
|
|
||||||
|
provider.languageModel = createModel;
|
||||||
|
provider.chat = createModel; // Alias for languageModel
|
||||||
|
|
||||||
|
// Add textEmbeddingModel method that throws NoSuchModelError
|
||||||
|
provider.textEmbeddingModel = (modelId: string) => {
|
||||||
|
throw new NoSuchModelError({
|
||||||
|
modelId,
|
||||||
|
modelType: 'textEmbeddingModel'
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
provider.imageModel = (modelId: string) => {
|
||||||
|
throw new NoSuchModelError({
|
||||||
|
modelId,
|
||||||
|
modelType: 'imageModel'
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
return provider as GrokCliProvider;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Default Grok CLI provider instance.
|
||||||
|
* Pre-configured provider for quick usage without custom settings.
|
||||||
|
*/
|
||||||
|
export const grokCli = createGrokCli();
|
||||||
64
packages/ai-sdk-provider-grok-cli/src/index.ts
Normal file
64
packages/ai-sdk-provider-grok-cli/src/index.ts
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
/**
|
||||||
|
* Provider exports for creating and configuring Grok CLI instances.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new Grok CLI provider instance and the default provider instance.
|
||||||
|
*/
|
||||||
|
export { createGrokCli, grokCli } from './grok-cli-provider.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Type definitions for the Grok CLI provider.
|
||||||
|
*/
|
||||||
|
export type {
|
||||||
|
GrokCliProvider,
|
||||||
|
GrokCliProviderSettings
|
||||||
|
} from './grok-cli-provider.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Language model implementation for Grok CLI.
|
||||||
|
* This class implements the AI SDK's LanguageModelV2 interface.
|
||||||
|
*/
|
||||||
|
export { GrokCliLanguageModel } from './grok-cli-language-model.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Type definitions for Grok CLI language models.
|
||||||
|
*/
|
||||||
|
export type {
|
||||||
|
GrokCliModelId,
|
||||||
|
GrokCliLanguageModelOptions,
|
||||||
|
GrokCliSettings,
|
||||||
|
GrokCliMessage,
|
||||||
|
GrokCliResponse,
|
||||||
|
GrokCliErrorMetadata
|
||||||
|
} from './types.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Error handling utilities for Grok CLI.
|
||||||
|
* These functions help create and identify specific error types.
|
||||||
|
*/
|
||||||
|
export {
|
||||||
|
isAuthenticationError,
|
||||||
|
isTimeoutError,
|
||||||
|
isInstallationError,
|
||||||
|
getErrorMetadata,
|
||||||
|
createAPICallError,
|
||||||
|
createAuthenticationError,
|
||||||
|
createTimeoutError,
|
||||||
|
createInstallationError
|
||||||
|
} from './errors.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Message conversion utilities for Grok CLI communication.
|
||||||
|
*/
|
||||||
|
export {
|
||||||
|
convertToGrokCliMessages,
|
||||||
|
convertFromGrokCliResponse,
|
||||||
|
createPromptFromMessages,
|
||||||
|
escapeShellArg
|
||||||
|
} from './message-converter.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* JSON extraction utilities for parsing Grok responses.
|
||||||
|
*/
|
||||||
|
export { extractJson } from './json-extractor.js';
|
||||||
81
packages/ai-sdk-provider-grok-cli/src/json-extractor.test.ts
Normal file
81
packages/ai-sdk-provider-grok-cli/src/json-extractor.test.ts
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
/**
|
||||||
|
* Tests for JSON extraction utilities
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, expect, it } from 'vitest';
|
||||||
|
import { extractJson } from './json-extractor.js';
|
||||||
|
|
||||||
|
describe('extractJson', () => {
|
||||||
|
it('should extract JSON from markdown code blocks', () => {
|
||||||
|
const text = '```json\n{"name": "test", "value": 42}\n```';
|
||||||
|
const result = extractJson(text);
|
||||||
|
expect(JSON.parse(result)).toEqual({ name: 'test', value: 42 });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should extract JSON from generic code blocks', () => {
|
||||||
|
const text = '```\n{"name": "test", "value": 42}\n```';
|
||||||
|
const result = extractJson(text);
|
||||||
|
expect(JSON.parse(result)).toEqual({ name: 'test', value: 42 });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should remove JavaScript variable declarations', () => {
|
||||||
|
const text = 'const result = {"name": "test", "value": 42};';
|
||||||
|
const result = extractJson(text);
|
||||||
|
expect(JSON.parse(result)).toEqual({ name: 'test', value: 42 });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle let variable declarations', () => {
|
||||||
|
const text = 'let data = {"name": "test", "value": 42};';
|
||||||
|
const result = extractJson(text);
|
||||||
|
expect(JSON.parse(result)).toEqual({ name: 'test', value: 42 });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle var variable declarations', () => {
|
||||||
|
const text = 'var config = {"name": "test", "value": 42};';
|
||||||
|
const result = extractJson(text);
|
||||||
|
expect(JSON.parse(result)).toEqual({ name: 'test', value: 42 });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should extract JSON arrays', () => {
|
||||||
|
const text = '[{"name": "test1"}, {"name": "test2"}]';
|
||||||
|
const result = extractJson(text);
|
||||||
|
expect(JSON.parse(result)).toEqual([{ name: 'test1' }, { name: 'test2' }]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should convert JavaScript object literals to JSON', () => {
|
||||||
|
const text = "{name: 'test', value: 42}";
|
||||||
|
const result = extractJson(text);
|
||||||
|
expect(JSON.parse(result)).toEqual({ name: 'test', value: 42 });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return valid JSON (canonical formatting)', () => {
|
||||||
|
const text = '{"name": "test", "value": 42}';
|
||||||
|
const result = extractJson(text);
|
||||||
|
expect(JSON.parse(result)).toEqual({ name: 'test', value: 42 });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return original text when JSON parsing fails completely', () => {
|
||||||
|
const text = 'This is not JSON at all';
|
||||||
|
const result = extractJson(text);
|
||||||
|
expect(result).toBe('This is not JSON at all');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle complex nested objects', () => {
|
||||||
|
const text =
|
||||||
|
'```json\n{\n "user": {\n "name": "John",\n "age": 30\n },\n "items": [1, 2, 3]\n}\n```';
|
||||||
|
const result = extractJson(text);
|
||||||
|
expect(JSON.parse(result)).toEqual({
|
||||||
|
user: {
|
||||||
|
name: 'John',
|
||||||
|
age: 30
|
||||||
|
},
|
||||||
|
items: [1, 2, 3]
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle mixed quotes in object literals', () => {
|
||||||
|
const text = `{name: "test", value: 'mixed quotes'}`;
|
||||||
|
const result = extractJson(text);
|
||||||
|
expect(JSON.parse(result)).toEqual({ name: 'test', value: 'mixed quotes' });
|
||||||
|
});
|
||||||
|
});
|
||||||
132
packages/ai-sdk-provider-grok-cli/src/json-extractor.ts
Normal file
132
packages/ai-sdk-provider-grok-cli/src/json-extractor.ts
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
/**
|
||||||
|
* Extract JSON from AI's response using a tolerant parser.
|
||||||
|
*
|
||||||
|
* The function removes common wrappers such as markdown fences or variable
|
||||||
|
* declarations and then attempts to parse the remaining text with
|
||||||
|
* `jsonc-parser`. If valid JSON (or JSONC) can be parsed, it is returned as a
|
||||||
|
* string via `JSON.stringify`. Otherwise the original text is returned.
|
||||||
|
*
|
||||||
|
* @param text - Raw text which may contain JSON
|
||||||
|
* @returns A valid JSON string if extraction succeeds, otherwise the original text
|
||||||
|
*/
|
||||||
|
import { parse, type ParseError } from 'jsonc-parser';
|
||||||
|
|
||||||
|
export function extractJson(text: string): string {
|
||||||
|
let content = text.trim();
|
||||||
|
|
||||||
|
// Strip ```json or ``` fences
|
||||||
|
const fenceMatch = /```(?:json)?\s*([\s\S]*?)\s*```/i.exec(content);
|
||||||
|
if (fenceMatch) {
|
||||||
|
content = fenceMatch[1];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strip variable declarations like `const foo =` or `let foo =`
|
||||||
|
const varMatch = /^\s*(?:const|let|var)\s+\w+\s*=\s*([\s\S]*)/i.exec(content);
|
||||||
|
if (varMatch) {
|
||||||
|
content = varMatch[1];
|
||||||
|
// Remove trailing semicolon if present
|
||||||
|
if (content.trim().endsWith(';')) {
|
||||||
|
content = content.trim().slice(0, -1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the first opening bracket
|
||||||
|
const firstObj = content.indexOf('{');
|
||||||
|
const firstArr = content.indexOf('[');
|
||||||
|
if (firstObj === -1 && firstArr === -1) {
|
||||||
|
return text;
|
||||||
|
}
|
||||||
|
const start =
|
||||||
|
firstArr === -1
|
||||||
|
? firstObj
|
||||||
|
: firstObj === -1
|
||||||
|
? firstArr
|
||||||
|
: Math.min(firstObj, firstArr);
|
||||||
|
content = content.slice(start);
|
||||||
|
|
||||||
|
// Try to parse the entire string with jsonc-parser
|
||||||
|
const tryParse = (value: string): string | undefined => {
|
||||||
|
const errors: ParseError[] = [];
|
||||||
|
try {
|
||||||
|
const result = parse(value, errors, { allowTrailingComma: true });
|
||||||
|
if (errors.length === 0) {
|
||||||
|
return JSON.stringify(result, null, 2);
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// ignore
|
||||||
|
}
|
||||||
|
return undefined;
|
||||||
|
};
|
||||||
|
|
||||||
|
const parsed = tryParse(content);
|
||||||
|
if (parsed !== undefined) {
|
||||||
|
return parsed;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If parsing the full string failed, use a more efficient approach
|
||||||
|
// to find valid JSON boundaries
|
||||||
|
const openChar = content[0];
|
||||||
|
const closeChar = openChar === '{' ? '}' : ']';
|
||||||
|
|
||||||
|
// Find all potential closing positions by tracking nesting depth
|
||||||
|
const closingPositions: number[] = [];
|
||||||
|
let depth = 0;
|
||||||
|
let inString = false;
|
||||||
|
let escapeNext = false;
|
||||||
|
|
||||||
|
for (let i = 0; i < content.length; i++) {
|
||||||
|
const char = content[i];
|
||||||
|
|
||||||
|
if (escapeNext) {
|
||||||
|
escapeNext = false;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (char === '\\') {
|
||||||
|
escapeNext = true;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (char === '"' && !inString) {
|
||||||
|
inString = true;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (char === '"' && inString) {
|
||||||
|
inString = false;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip content inside strings
|
||||||
|
if (inString) continue;
|
||||||
|
|
||||||
|
if (char === openChar) {
|
||||||
|
depth++;
|
||||||
|
} else if (char === closeChar) {
|
||||||
|
depth--;
|
||||||
|
if (depth === 0) {
|
||||||
|
closingPositions.push(i + 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try parsing at each valid closing position, starting from the end
|
||||||
|
for (let i = closingPositions.length - 1; i >= 0; i--) {
|
||||||
|
const attempt = tryParse(content.slice(0, closingPositions[i]));
|
||||||
|
if (attempt !== undefined) {
|
||||||
|
return attempt;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// As a final fallback, try the original character-by-character approach
|
||||||
|
// but only for the last 1000 characters to limit performance impact
|
||||||
|
const searchStart = Math.max(0, content.length - 1000);
|
||||||
|
for (let end = content.length - 1; end > searchStart; end--) {
|
||||||
|
const attempt = tryParse(content.slice(0, end));
|
||||||
|
if (attempt !== undefined) {
|
||||||
|
return attempt;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return text;
|
||||||
|
}
|
||||||
163
packages/ai-sdk-provider-grok-cli/src/message-converter.test.ts
Normal file
163
packages/ai-sdk-provider-grok-cli/src/message-converter.test.ts
Normal file
@@ -0,0 +1,163 @@
|
|||||||
|
/**
|
||||||
|
* Tests for message conversion utilities
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { describe, expect, it } from 'vitest';
|
||||||
|
import {
|
||||||
|
convertFromGrokCliResponse,
|
||||||
|
convertToGrokCliMessages,
|
||||||
|
createPromptFromMessages,
|
||||||
|
escapeShellArg
|
||||||
|
} from './message-converter.js';
|
||||||
|
|
||||||
|
describe('convertToGrokCliMessages', () => {
|
||||||
|
it('should convert string content messages', () => {
|
||||||
|
const messages = [
|
||||||
|
{ role: 'user', content: 'Hello, world!' },
|
||||||
|
{ role: 'assistant', content: 'Hi there!' }
|
||||||
|
];
|
||||||
|
|
||||||
|
const result = convertToGrokCliMessages(messages);
|
||||||
|
|
||||||
|
expect(result).toEqual([
|
||||||
|
{ role: 'user', content: 'Hello, world!' },
|
||||||
|
{ role: 'assistant', content: 'Hi there!' }
|
||||||
|
]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should convert array content messages', () => {
|
||||||
|
const messages = [
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
content: [
|
||||||
|
{ type: 'text', text: 'Hello' },
|
||||||
|
{ type: 'text', text: 'World' }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
const result = convertToGrokCliMessages(messages);
|
||||||
|
|
||||||
|
expect(result).toEqual([{ role: 'user', content: 'Hello\nWorld' }]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should convert object content messages', () => {
|
||||||
|
const messages = [
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
content: { text: 'Hello from object' }
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
const result = convertToGrokCliMessages(messages);
|
||||||
|
|
||||||
|
expect(result).toEqual([{ role: 'user', content: 'Hello from object' }]);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('convertFromGrokCliResponse', () => {
|
||||||
|
it('should parse JSONL response format', () => {
|
||||||
|
const responseText = `{"role": "assistant", "content": "Hello there!", "usage": {"prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15}}`;
|
||||||
|
|
||||||
|
const result = convertFromGrokCliResponse(responseText);
|
||||||
|
|
||||||
|
expect(result).toEqual({
|
||||||
|
text: 'Hello there!',
|
||||||
|
usage: {
|
||||||
|
promptTokens: 10,
|
||||||
|
completionTokens: 5,
|
||||||
|
totalTokens: 15
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle multiple lines in JSONL format', () => {
|
||||||
|
const responseText = `{"role": "user", "content": "Hello"}
|
||||||
|
{"role": "assistant", "content": "Hi there!", "usage": {"prompt_tokens": 5, "completion_tokens": 3}}`;
|
||||||
|
|
||||||
|
const result = convertFromGrokCliResponse(responseText);
|
||||||
|
|
||||||
|
expect(result).toEqual({
|
||||||
|
text: 'Hi there!',
|
||||||
|
usage: {
|
||||||
|
promptTokens: 5,
|
||||||
|
completionTokens: 3,
|
||||||
|
totalTokens: 0
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should fallback to raw text when parsing fails', () => {
|
||||||
|
const responseText = 'Invalid JSON response';
|
||||||
|
|
||||||
|
const result = convertFromGrokCliResponse(responseText);
|
||||||
|
|
||||||
|
expect(result).toEqual({
|
||||||
|
text: 'Invalid JSON response',
|
||||||
|
usage: undefined
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('createPromptFromMessages', () => {
|
||||||
|
it('should create formatted prompt from messages', () => {
|
||||||
|
const messages = [
|
||||||
|
{ role: 'system', content: 'You are a helpful assistant.' },
|
||||||
|
{ role: 'user', content: 'What is 2+2?' },
|
||||||
|
{ role: 'assistant', content: '2+2 equals 4.' }
|
||||||
|
];
|
||||||
|
|
||||||
|
const result = createPromptFromMessages(messages);
|
||||||
|
|
||||||
|
expect(result).toBe(
|
||||||
|
'System: You are a helpful assistant.\n\nUser: What is 2+2?\n\nAssistant: 2+2 equals 4.'
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle custom role names', () => {
|
||||||
|
const messages = [{ role: 'custom', content: 'Custom message' }];
|
||||||
|
|
||||||
|
const result = createPromptFromMessages(messages);
|
||||||
|
|
||||||
|
expect(result).toBe('custom: Custom message');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should trim whitespace from message content', () => {
|
||||||
|
const messages = [
|
||||||
|
{ role: 'user', content: ' Hello with spaces ' },
|
||||||
|
{ role: 'assistant', content: '\n\nResponse with newlines\n\n' }
|
||||||
|
];
|
||||||
|
|
||||||
|
const result = createPromptFromMessages(messages);
|
||||||
|
|
||||||
|
expect(result).toBe(
|
||||||
|
'User: Hello with spaces\n\nAssistant: Response with newlines'
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('escapeShellArg', () => {
|
||||||
|
it('should escape single quotes', () => {
|
||||||
|
const arg = "It's a test";
|
||||||
|
const result = escapeShellArg(arg);
|
||||||
|
expect(result).toBe("'It'\\''s a test'");
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle strings without special characters', () => {
|
||||||
|
const arg = 'simple string';
|
||||||
|
const result = escapeShellArg(arg);
|
||||||
|
expect(result).toBe("'simple string'");
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should convert non-string values to strings', () => {
|
||||||
|
const arg = 123;
|
||||||
|
const result = escapeShellArg(arg);
|
||||||
|
expect(result).toBe("'123'");
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle empty strings', () => {
|
||||||
|
const arg = '';
|
||||||
|
const result = escapeShellArg(arg);
|
||||||
|
expect(result).toBe("''");
|
||||||
|
});
|
||||||
|
});
|
||||||
@@ -1,17 +1,28 @@
|
|||||||
/**
|
/**
|
||||||
* @fileoverview Message format conversion utilities for Grok CLI provider
|
* Message format conversion utilities for Grok CLI provider
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
import type { GrokCliMessage, GrokCliResponse } from './types.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @typedef {import('./types.js').GrokCliMessage} GrokCliMessage
|
* AI SDK message type (simplified interface)
|
||||||
*/
|
*/
|
||||||
|
interface AISDKMessage {
|
||||||
|
role: string;
|
||||||
|
content:
|
||||||
|
| string
|
||||||
|
| Array<{ type: string; text?: string }>
|
||||||
|
| { text?: string; [key: string]: unknown };
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert AI SDK messages to Grok CLI compatible format
|
* Convert AI SDK messages to Grok CLI compatible format
|
||||||
* @param {Array<Object>} messages - AI SDK message array
|
* @param messages - AI SDK message array
|
||||||
* @returns {Array<GrokCliMessage>} Grok CLI compatible messages
|
* @returns Grok CLI compatible messages
|
||||||
*/
|
*/
|
||||||
export function convertToGrokCliMessages(messages) {
|
export function convertToGrokCliMessages(
|
||||||
|
messages: AISDKMessage[]
|
||||||
|
): GrokCliMessage[] {
|
||||||
return messages.map((message) => {
|
return messages.map((message) => {
|
||||||
// Handle different message content types
|
// Handle different message content types
|
||||||
let content = '';
|
let content = '';
|
||||||
@@ -22,7 +33,7 @@ export function convertToGrokCliMessages(messages) {
|
|||||||
// Handle multi-part content (text and images)
|
// Handle multi-part content (text and images)
|
||||||
content = message.content
|
content = message.content
|
||||||
.filter((part) => part.type === 'text')
|
.filter((part) => part.type === 'text')
|
||||||
.map((part) => part.text)
|
.map((part) => part.text || '')
|
||||||
.join('\n');
|
.join('\n');
|
||||||
} else if (message.content && typeof message.content === 'object') {
|
} else if (message.content && typeof message.content === 'object') {
|
||||||
// Handle object content
|
// Handle object content
|
||||||
@@ -38,10 +49,17 @@ export function convertToGrokCliMessages(messages) {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert Grok CLI response to AI SDK format
|
* Convert Grok CLI response to AI SDK format
|
||||||
* @param {string} responseText - Raw response text from Grok CLI (JSONL format)
|
* @param responseText - Raw response text from Grok CLI (JSONL format)
|
||||||
* @returns {Object} AI SDK compatible response object
|
* @returns AI SDK compatible response object
|
||||||
*/
|
*/
|
||||||
export function convertFromGrokCliResponse(responseText) {
|
export function convertFromGrokCliResponse(responseText: string): {
|
||||||
|
text: string;
|
||||||
|
usage?: {
|
||||||
|
promptTokens: number;
|
||||||
|
completionTokens: number;
|
||||||
|
totalTokens: number;
|
||||||
|
};
|
||||||
|
} {
|
||||||
try {
|
try {
|
||||||
// Grok CLI outputs JSONL format - each line is a separate JSON message
|
// Grok CLI outputs JSONL format - each line is a separate JSON message
|
||||||
const lines = responseText
|
const lines = responseText
|
||||||
@@ -50,10 +68,10 @@ export function convertFromGrokCliResponse(responseText) {
|
|||||||
.filter((line) => line.trim());
|
.filter((line) => line.trim());
|
||||||
|
|
||||||
// Parse each line as JSON and find assistant messages
|
// Parse each line as JSON and find assistant messages
|
||||||
const messages = [];
|
const messages: GrokCliResponse[] = [];
|
||||||
for (const line of lines) {
|
for (const line of lines) {
|
||||||
try {
|
try {
|
||||||
const message = JSON.parse(line);
|
const message = JSON.parse(line) as GrokCliResponse;
|
||||||
messages.push(message);
|
messages.push(message);
|
||||||
} catch (parseError) {
|
} catch (parseError) {
|
||||||
// Skip invalid JSON lines
|
// Skip invalid JSON lines
|
||||||
@@ -95,10 +113,10 @@ export function convertFromGrokCliResponse(responseText) {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a prompt string for Grok CLI from messages
|
* Create a prompt string for Grok CLI from messages
|
||||||
* @param {Array<Object>} messages - AI SDK message array
|
* @param messages - AI SDK message array
|
||||||
* @returns {string} Formatted prompt string
|
* @returns Formatted prompt string
|
||||||
*/
|
*/
|
||||||
export function createPromptFromMessages(messages) {
|
export function createPromptFromMessages(messages: AISDKMessage[]): string {
|
||||||
const grokMessages = convertToGrokCliMessages(messages);
|
const grokMessages = convertToGrokCliMessages(messages);
|
||||||
|
|
||||||
// Create a conversation-style prompt
|
// Create a conversation-style prompt
|
||||||
@@ -122,14 +140,14 @@ export function createPromptFromMessages(messages) {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Escape shell arguments for safe CLI execution
|
* Escape shell arguments for safe CLI execution
|
||||||
* @param {string} arg - Argument to escape
|
* @param arg - Argument to escape
|
||||||
* @returns {string} Shell-escaped argument
|
* @returns Shell-escaped argument
|
||||||
*/
|
*/
|
||||||
export function escapeShellArg(arg) {
|
export function escapeShellArg(arg: string | unknown): string {
|
||||||
if (typeof arg !== 'string') {
|
if (typeof arg !== 'string') {
|
||||||
arg = String(arg);
|
arg = String(arg);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Replace single quotes with '\''
|
// Replace single quotes with '\''
|
||||||
return "'" + arg.replace(/'/g, "'\\''") + "'";
|
return "'" + (arg as string).replace(/'/g, "'\\''") + "'";
|
||||||
}
|
}
|
||||||
81
packages/ai-sdk-provider-grok-cli/src/types.ts
Normal file
81
packages/ai-sdk-provider-grok-cli/src/types.ts
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
/**
|
||||||
|
* Type definitions for Grok CLI provider
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Settings for configuring Grok CLI behavior
|
||||||
|
*/
|
||||||
|
export interface GrokCliSettings {
|
||||||
|
/** API key for Grok CLI */
|
||||||
|
apiKey?: string;
|
||||||
|
/** Base URL for Grok API */
|
||||||
|
baseURL?: string;
|
||||||
|
/** Default model to use */
|
||||||
|
model?: string;
|
||||||
|
/** Timeout in milliseconds */
|
||||||
|
timeout?: number;
|
||||||
|
/** Working directory for CLI commands */
|
||||||
|
workingDirectory?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Model identifiers supported by Grok CLI
|
||||||
|
*/
|
||||||
|
export type GrokCliModelId = string;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Error metadata for Grok CLI operations
|
||||||
|
*/
|
||||||
|
export interface GrokCliErrorMetadata {
|
||||||
|
/** Error code */
|
||||||
|
code?: string;
|
||||||
|
/** Process exit code */
|
||||||
|
exitCode?: number;
|
||||||
|
/** Standard error output */
|
||||||
|
stderr?: string;
|
||||||
|
/** Standard output */
|
||||||
|
stdout?: string;
|
||||||
|
/** Excerpt of the prompt that caused the error */
|
||||||
|
promptExcerpt?: string;
|
||||||
|
/** Timeout value in milliseconds */
|
||||||
|
timeoutMs?: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Message format for Grok CLI communication
|
||||||
|
*/
|
||||||
|
export interface GrokCliMessage {
|
||||||
|
/** Message role (user, assistant, system) */
|
||||||
|
role: string;
|
||||||
|
/** Message content */
|
||||||
|
content: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Response format from Grok CLI
|
||||||
|
*/
|
||||||
|
export interface GrokCliResponse {
|
||||||
|
/** Message role */
|
||||||
|
role: string;
|
||||||
|
/** Response content */
|
||||||
|
content: string;
|
||||||
|
/** Token usage information */
|
||||||
|
usage?: {
|
||||||
|
/** Input tokens used */
|
||||||
|
prompt_tokens?: number;
|
||||||
|
/** Output tokens used */
|
||||||
|
completion_tokens?: number;
|
||||||
|
/** Total tokens used */
|
||||||
|
total_tokens?: number;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Configuration options for Grok CLI language model
|
||||||
|
*/
|
||||||
|
export interface GrokCliLanguageModelOptions {
|
||||||
|
/** Model identifier */
|
||||||
|
id: GrokCliModelId;
|
||||||
|
/** Model settings */
|
||||||
|
settings?: GrokCliSettings;
|
||||||
|
}
|
||||||
36
packages/ai-sdk-provider-grok-cli/tsconfig.json
Normal file
36
packages/ai-sdk-provider-grok-cli/tsconfig.json
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
{
|
||||||
|
"compilerOptions": {
|
||||||
|
"target": "ES2022",
|
||||||
|
"module": "ESNext",
|
||||||
|
"lib": ["ES2022"],
|
||||||
|
"declaration": true,
|
||||||
|
"declarationMap": true,
|
||||||
|
"sourceMap": true,
|
||||||
|
"outDir": "./dist",
|
||||||
|
"baseUrl": ".",
|
||||||
|
"rootDir": "./src",
|
||||||
|
"strict": true,
|
||||||
|
"noImplicitAny": true,
|
||||||
|
"strictNullChecks": true,
|
||||||
|
"strictFunctionTypes": true,
|
||||||
|
"strictBindCallApply": true,
|
||||||
|
"strictPropertyInitialization": true,
|
||||||
|
"noImplicitThis": true,
|
||||||
|
"alwaysStrict": true,
|
||||||
|
"noUnusedLocals": true,
|
||||||
|
"noUnusedParameters": true,
|
||||||
|
"noImplicitReturns": true,
|
||||||
|
"noFallthroughCasesInSwitch": true,
|
||||||
|
"esModuleInterop": true,
|
||||||
|
"skipLibCheck": true,
|
||||||
|
"forceConsistentCasingInFileNames": true,
|
||||||
|
"moduleResolution": "bundler",
|
||||||
|
"moduleDetection": "force",
|
||||||
|
"types": ["node"],
|
||||||
|
"resolveJsonModule": true,
|
||||||
|
"isolatedModules": true,
|
||||||
|
"allowImportingTsExtensions": false
|
||||||
|
},
|
||||||
|
"include": ["src/**/*"],
|
||||||
|
"exclude": ["node_modules", "dist", "tests", "**/*.test.ts", "**/*.spec.ts"]
|
||||||
|
}
|
||||||
@@ -20,8 +20,7 @@
|
|||||||
"typecheck": "tsc --noEmit"
|
"typecheck": "tsc --noEmit"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"dotenv-mono": "^1.5.1",
|
"typescript": "^5.9.2"
|
||||||
"typescript": "^5.7.3"
|
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"tsup": "^8.5.0"
|
"tsup": "^8.5.0"
|
||||||
|
|||||||
@@ -43,9 +43,9 @@ export const baseConfig: Partial<UserConfig> = {
|
|||||||
export function mergeConfig(
|
export function mergeConfig(
|
||||||
base: Partial<UserConfig>,
|
base: Partial<UserConfig>,
|
||||||
overrides: Partial<UserConfig>
|
overrides: Partial<UserConfig>
|
||||||
): Partial<UserConfig> {
|
): UserConfig {
|
||||||
return {
|
return {
|
||||||
...base,
|
...base,
|
||||||
...overrides
|
...overrides
|
||||||
};
|
} as UserConfig;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -31,21 +31,13 @@
|
|||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@supabase/supabase-js": "^2.57.4",
|
"@supabase/supabase-js": "^2.57.4",
|
||||||
"zod": "^3.23.8"
|
"zod": "^4.1.11"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@biomejs/biome": "^1.9.4",
|
|
||||||
"@tm/build-config": "*",
|
|
||||||
"@types/node": "^22.10.5",
|
"@types/node": "^22.10.5",
|
||||||
"@vitest/coverage-v8": "^2.0.5",
|
"@vitest/coverage-v8": "^3.2.4",
|
||||||
"dotenv-mono": "^1.5.1",
|
"typescript": "^5.9.2",
|
||||||
"ts-node": "^10.9.2",
|
"vitest": "^3.2.4"
|
||||||
"tsup": "^8.5.0",
|
|
||||||
"typescript": "^5.7.3",
|
|
||||||
"vitest": "^2.1.8"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">=18.0.0"
|
|
||||||
},
|
},
|
||||||
"files": ["src", "README.md", "CHANGELOG.md"],
|
"files": ["src", "README.md", "CHANGELOG.md"],
|
||||||
"keywords": ["task-management", "typescript", "ai", "prd", "parser"],
|
"keywords": ["task-management", "typescript", "ai", "prd", "parser"],
|
||||||
|
|||||||
@@ -33,6 +33,9 @@ export class TaskEntity implements Task {
|
|||||||
tags?: string[];
|
tags?: string[];
|
||||||
assignee?: string;
|
assignee?: string;
|
||||||
complexity?: Task['complexity'];
|
complexity?: Task['complexity'];
|
||||||
|
recommendedSubtasks?: number;
|
||||||
|
expansionPrompt?: string;
|
||||||
|
complexityReasoning?: string;
|
||||||
|
|
||||||
constructor(data: Task | (Omit<Task, 'id'> & { id: number | string })) {
|
constructor(data: Task | (Omit<Task, 'id'> & { id: number | string })) {
|
||||||
this.validate(data);
|
this.validate(data);
|
||||||
@@ -62,6 +65,9 @@ export class TaskEntity implements Task {
|
|||||||
this.tags = data.tags;
|
this.tags = data.tags;
|
||||||
this.assignee = data.assignee;
|
this.assignee = data.assignee;
|
||||||
this.complexity = data.complexity;
|
this.complexity = data.complexity;
|
||||||
|
this.recommendedSubtasks = data.recommendedSubtasks;
|
||||||
|
this.expansionPrompt = data.expansionPrompt;
|
||||||
|
this.complexityReasoning = data.complexityReasoning;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -246,7 +252,10 @@ export class TaskEntity implements Task {
|
|||||||
actualEffort: this.actualEffort,
|
actualEffort: this.actualEffort,
|
||||||
tags: this.tags,
|
tags: this.tags,
|
||||||
assignee: this.assignee,
|
assignee: this.assignee,
|
||||||
complexity: this.complexity
|
complexity: this.complexity,
|
||||||
|
recommendedSubtasks: this.recommendedSubtasks,
|
||||||
|
expansionPrompt: this.expansionPrompt,
|
||||||
|
complexityReasoning: this.complexityReasoning
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -61,3 +61,12 @@ export { getLogger, createLogger, setGlobalLogger } from './logger/index.js';
|
|||||||
|
|
||||||
// Re-export executors
|
// Re-export executors
|
||||||
export * from './executors/index.js';
|
export * from './executors/index.js';
|
||||||
|
|
||||||
|
// Re-export reports
|
||||||
|
export {
|
||||||
|
ComplexityReportManager,
|
||||||
|
type ComplexityReport,
|
||||||
|
type ComplexityReportMetadata,
|
||||||
|
type ComplexityAnalysis,
|
||||||
|
type TaskComplexityData
|
||||||
|
} from './reports/index.js';
|
||||||
|
|||||||
185
packages/tm-core/src/reports/complexity-report-manager.ts
Normal file
185
packages/tm-core/src/reports/complexity-report-manager.ts
Normal file
@@ -0,0 +1,185 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview ComplexityReportManager - Handles loading and managing complexity analysis reports
|
||||||
|
* Follows the same pattern as ConfigManager and AuthManager
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { promises as fs } from 'fs';
|
||||||
|
import path from 'path';
|
||||||
|
import type {
|
||||||
|
ComplexityReport,
|
||||||
|
ComplexityAnalysis,
|
||||||
|
TaskComplexityData
|
||||||
|
} from './types.js';
|
||||||
|
import { getLogger } from '../logger/index.js';
|
||||||
|
|
||||||
|
const logger = getLogger('ComplexityReportManager');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Manages complexity analysis reports
|
||||||
|
* Handles loading, caching, and providing complexity data for tasks
|
||||||
|
*/
|
||||||
|
export class ComplexityReportManager {
|
||||||
|
private projectRoot: string;
|
||||||
|
private reportCache: Map<string, ComplexityReport> = new Map();
|
||||||
|
|
||||||
|
constructor(projectRoot: string) {
|
||||||
|
this.projectRoot = projectRoot;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the path to the complexity report file for a given tag
|
||||||
|
*/
|
||||||
|
private getReportPath(tag?: string): string {
|
||||||
|
const reportsDir = path.join(this.projectRoot, '.taskmaster', 'reports');
|
||||||
|
const tagSuffix = tag && tag !== 'master' ? `_${tag}` : '';
|
||||||
|
return path.join(reportsDir, `task-complexity-report${tagSuffix}.json`);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Load complexity report for a given tag
|
||||||
|
* Results are cached to avoid repeated file reads
|
||||||
|
*/
|
||||||
|
async loadReport(tag?: string): Promise<ComplexityReport | null> {
|
||||||
|
const resolvedTag = tag || 'master';
|
||||||
|
const cacheKey = resolvedTag;
|
||||||
|
|
||||||
|
// Check cache first
|
||||||
|
if (this.reportCache.has(cacheKey)) {
|
||||||
|
return this.reportCache.get(cacheKey)!;
|
||||||
|
}
|
||||||
|
|
||||||
|
const reportPath = this.getReportPath(tag);
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Check if file exists
|
||||||
|
await fs.access(reportPath);
|
||||||
|
|
||||||
|
// Read and parse the report
|
||||||
|
const content = await fs.readFile(reportPath, 'utf-8');
|
||||||
|
const report = JSON.parse(content) as ComplexityReport;
|
||||||
|
|
||||||
|
// Validate basic structure
|
||||||
|
if (!report.meta || !Array.isArray(report.complexityAnalysis)) {
|
||||||
|
logger.warn(
|
||||||
|
`Invalid complexity report structure at ${reportPath}, ignoring`
|
||||||
|
);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cache the report
|
||||||
|
this.reportCache.set(cacheKey, report);
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
`Loaded complexity report for tag '${resolvedTag}' with ${report.complexityAnalysis.length} analyses`
|
||||||
|
);
|
||||||
|
|
||||||
|
return report;
|
||||||
|
} catch (error: any) {
|
||||||
|
if (error.code === 'ENOENT') {
|
||||||
|
// File doesn't exist - this is normal, not all projects have complexity reports
|
||||||
|
logger.debug(`No complexity report found for tag '${resolvedTag}'`);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Other errors (parsing, permissions, etc.)
|
||||||
|
logger.warn(
|
||||||
|
`Failed to load complexity report for tag '${resolvedTag}': ${error.message}`
|
||||||
|
);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get complexity data for a specific task ID
|
||||||
|
*/
|
||||||
|
async getComplexityForTask(
|
||||||
|
taskId: string | number,
|
||||||
|
tag?: string
|
||||||
|
): Promise<TaskComplexityData | null> {
|
||||||
|
const report = await this.loadReport(tag);
|
||||||
|
if (!report) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the analysis for this task
|
||||||
|
const analysis = report.complexityAnalysis.find(
|
||||||
|
(a) => String(a.taskId) === String(taskId)
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!analysis) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert to TaskComplexityData format
|
||||||
|
return {
|
||||||
|
complexityScore: analysis.complexityScore,
|
||||||
|
recommendedSubtasks: analysis.recommendedSubtasks,
|
||||||
|
expansionPrompt: analysis.expansionPrompt,
|
||||||
|
complexityReasoning: analysis.complexityReasoning
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get complexity data for multiple tasks at once
|
||||||
|
* More efficient than calling getComplexityForTask multiple times
|
||||||
|
*/
|
||||||
|
async getComplexityForTasks(
|
||||||
|
taskIds: (string | number)[],
|
||||||
|
tag?: string
|
||||||
|
): Promise<Map<string, TaskComplexityData>> {
|
||||||
|
const result = new Map<string, TaskComplexityData>();
|
||||||
|
const report = await this.loadReport(tag);
|
||||||
|
|
||||||
|
if (!report) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a map for fast lookups
|
||||||
|
const analysisMap = new Map<string, ComplexityAnalysis>();
|
||||||
|
report.complexityAnalysis.forEach((analysis) => {
|
||||||
|
analysisMap.set(String(analysis.taskId), analysis);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Map each task ID to its complexity data
|
||||||
|
taskIds.forEach((taskId) => {
|
||||||
|
const analysis = analysisMap.get(String(taskId));
|
||||||
|
if (analysis) {
|
||||||
|
result.set(String(taskId), {
|
||||||
|
complexityScore: analysis.complexityScore,
|
||||||
|
recommendedSubtasks: analysis.recommendedSubtasks,
|
||||||
|
expansionPrompt: analysis.expansionPrompt,
|
||||||
|
complexityReasoning: analysis.complexityReasoning
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clear the report cache
|
||||||
|
* @param tag - Specific tag to clear, or undefined to clear all cached reports
|
||||||
|
* Useful when reports are regenerated or modified externally
|
||||||
|
*/
|
||||||
|
clearCache(tag?: string): void {
|
||||||
|
if (tag) {
|
||||||
|
this.reportCache.delete(tag);
|
||||||
|
} else {
|
||||||
|
// Clear all cached reports
|
||||||
|
this.reportCache.clear();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if a complexity report exists for a tag
|
||||||
|
*/
|
||||||
|
async hasReport(tag?: string): Promise<boolean> {
|
||||||
|
const reportPath = this.getReportPath(tag);
|
||||||
|
try {
|
||||||
|
await fs.access(reportPath);
|
||||||
|
return true;
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
11
packages/tm-core/src/reports/index.ts
Normal file
11
packages/tm-core/src/reports/index.ts
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview Reports module exports
|
||||||
|
*/
|
||||||
|
|
||||||
|
export { ComplexityReportManager } from './complexity-report-manager.js';
|
||||||
|
export type {
|
||||||
|
ComplexityReport,
|
||||||
|
ComplexityReportMetadata,
|
||||||
|
ComplexityAnalysis,
|
||||||
|
TaskComplexityData
|
||||||
|
} from './types.js';
|
||||||
65
packages/tm-core/src/reports/types.ts
Normal file
65
packages/tm-core/src/reports/types.ts
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
/**
|
||||||
|
* @fileoverview Type definitions for complexity analysis reports
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Analysis result for a single task
|
||||||
|
*/
|
||||||
|
export interface ComplexityAnalysis {
|
||||||
|
/** Task ID being analyzed */
|
||||||
|
taskId: string | number;
|
||||||
|
/** Task title */
|
||||||
|
taskTitle: string;
|
||||||
|
/** Complexity score (1-10 scale) */
|
||||||
|
complexityScore: number;
|
||||||
|
/** Recommended number of subtasks */
|
||||||
|
recommendedSubtasks: number;
|
||||||
|
/** AI-generated prompt for task expansion */
|
||||||
|
expansionPrompt: string;
|
||||||
|
/** Reasoning behind the complexity assessment */
|
||||||
|
complexityReasoning: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Metadata about the complexity report
|
||||||
|
*/
|
||||||
|
export interface ComplexityReportMetadata {
|
||||||
|
/** When the report was generated */
|
||||||
|
generatedAt: string;
|
||||||
|
/** Number of tasks analyzed in this run */
|
||||||
|
tasksAnalyzed: number;
|
||||||
|
/** Total number of tasks in the file */
|
||||||
|
totalTasks?: number;
|
||||||
|
/** Total analyses in the report (across all runs) */
|
||||||
|
analysisCount?: number;
|
||||||
|
/** Complexity threshold score used */
|
||||||
|
thresholdScore: number;
|
||||||
|
/** Project name */
|
||||||
|
projectName?: string;
|
||||||
|
/** Whether research mode was used */
|
||||||
|
usedResearch: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Complete complexity analysis report
|
||||||
|
*/
|
||||||
|
export interface ComplexityReport {
|
||||||
|
/** Report metadata */
|
||||||
|
meta: ComplexityReportMetadata;
|
||||||
|
/** Array of complexity analyses */
|
||||||
|
complexityAnalysis: ComplexityAnalysis[];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Complexity data to be attached to a Task
|
||||||
|
*/
|
||||||
|
export interface TaskComplexityData {
|
||||||
|
/** Complexity score (1-10 scale) */
|
||||||
|
complexityScore?: number;
|
||||||
|
/** Recommended number of subtasks */
|
||||||
|
recommendedSubtasks?: number;
|
||||||
|
/** AI-generated expansion prompt */
|
||||||
|
expansionPrompt?: string;
|
||||||
|
/** Reasoning behind the assessment */
|
||||||
|
complexityReasoning?: string;
|
||||||
|
}
|
||||||
@@ -162,7 +162,7 @@ export class SupabaseTaskRepository {
|
|||||||
TaskUpdateSchema.parse(updates);
|
TaskUpdateSchema.parse(updates);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
if (error instanceof z.ZodError) {
|
if (error instanceof z.ZodError) {
|
||||||
const errorMessages = error.errors
|
const errorMessages = error.issues
|
||||||
.map((err) => `${err.path.join('.')}: ${err.message}`)
|
.map((err) => `${err.path.join('.')}: ${err.message}`)
|
||||||
.join(', ');
|
.join(', ');
|
||||||
throw new Error(`Invalid task update data: ${errorMessages}`);
|
throw new Error(`Invalid task update data: ${errorMessages}`);
|
||||||
|
|||||||
@@ -397,16 +397,6 @@ export class TaskService {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Complexity filter
|
|
||||||
if (filter.complexity) {
|
|
||||||
const complexities = Array.isArray(filter.complexity)
|
|
||||||
? filter.complexity
|
|
||||||
: [filter.complexity];
|
|
||||||
if (!task.complexity || !complexities.includes(task.complexity)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Search filter
|
// Search filter
|
||||||
if (filter.search) {
|
if (filter.search) {
|
||||||
const searchLower = filter.search.toLowerCase();
|
const searchLower = filter.search.toLowerCase();
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import type {
|
|||||||
import { FormatHandler } from './format-handler.js';
|
import { FormatHandler } from './format-handler.js';
|
||||||
import { FileOperations } from './file-operations.js';
|
import { FileOperations } from './file-operations.js';
|
||||||
import { PathResolver } from './path-resolver.js';
|
import { PathResolver } from './path-resolver.js';
|
||||||
|
import { ComplexityReportManager } from '../../reports/complexity-report-manager.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* File-based storage implementation using a single tasks.json file with separated concerns
|
* File-based storage implementation using a single tasks.json file with separated concerns
|
||||||
@@ -19,11 +20,13 @@ export class FileStorage implements IStorage {
|
|||||||
private formatHandler: FormatHandler;
|
private formatHandler: FormatHandler;
|
||||||
private fileOps: FileOperations;
|
private fileOps: FileOperations;
|
||||||
private pathResolver: PathResolver;
|
private pathResolver: PathResolver;
|
||||||
|
private complexityManager: ComplexityReportManager;
|
||||||
|
|
||||||
constructor(projectPath: string) {
|
constructor(projectPath: string) {
|
||||||
this.formatHandler = new FormatHandler();
|
this.formatHandler = new FormatHandler();
|
||||||
this.fileOps = new FileOperations();
|
this.fileOps = new FileOperations();
|
||||||
this.pathResolver = new PathResolver(projectPath);
|
this.pathResolver = new PathResolver(projectPath);
|
||||||
|
this.complexityManager = new ComplexityReportManager(projectPath);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -87,6 +90,7 @@ export class FileStorage implements IStorage {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Load tasks from the single tasks.json file for a specific tag
|
* Load tasks from the single tasks.json file for a specific tag
|
||||||
|
* Enriches tasks with complexity data from the complexity report
|
||||||
*/
|
*/
|
||||||
async loadTasks(tag?: string): Promise<Task[]> {
|
async loadTasks(tag?: string): Promise<Task[]> {
|
||||||
const filePath = this.pathResolver.getTasksPath();
|
const filePath = this.pathResolver.getTasksPath();
|
||||||
@@ -94,7 +98,10 @@ export class FileStorage implements IStorage {
|
|||||||
|
|
||||||
try {
|
try {
|
||||||
const rawData = await this.fileOps.readJson(filePath);
|
const rawData = await this.fileOps.readJson(filePath);
|
||||||
return this.formatHandler.extractTasks(rawData, resolvedTag);
|
const tasks = this.formatHandler.extractTasks(rawData, resolvedTag);
|
||||||
|
|
||||||
|
// Enrich tasks with complexity data
|
||||||
|
return await this.enrichTasksWithComplexity(tasks, resolvedTag);
|
||||||
} catch (error: any) {
|
} catch (error: any) {
|
||||||
if (error.code === 'ENOENT') {
|
if (error.code === 'ENOENT') {
|
||||||
return []; // File doesn't exist, return empty array
|
return []; // File doesn't exist, return empty array
|
||||||
@@ -465,8 +472,11 @@ export class FileStorage implements IStorage {
|
|||||||
const allDone = subs.every(isDoneLike);
|
const allDone = subs.every(isDoneLike);
|
||||||
const anyInProgress = subs.some((s) => norm(s) === 'in-progress');
|
const anyInProgress = subs.some((s) => norm(s) === 'in-progress');
|
||||||
const anyDone = subs.some(isDoneLike);
|
const anyDone = subs.some(isDoneLike);
|
||||||
|
const allPending = subs.every((s) => norm(s) === 'pending');
|
||||||
|
|
||||||
if (allDone) parentNewStatus = 'done';
|
if (allDone) parentNewStatus = 'done';
|
||||||
else if (anyInProgress || anyDone) parentNewStatus = 'in-progress';
|
else if (anyInProgress || anyDone) parentNewStatus = 'in-progress';
|
||||||
|
else if (allPending) parentNewStatus = 'pending';
|
||||||
}
|
}
|
||||||
|
|
||||||
// Always bump updatedAt; update status only if changed
|
// Always bump updatedAt; update status only if changed
|
||||||
@@ -593,6 +603,46 @@ export class FileStorage implements IStorage {
|
|||||||
|
|
||||||
await this.saveTasks(tasks, targetTag);
|
await this.saveTasks(tasks, targetTag);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Enrich tasks with complexity data from the complexity report
|
||||||
|
* Private helper method called by loadTasks()
|
||||||
|
*/
|
||||||
|
private async enrichTasksWithComplexity(
|
||||||
|
tasks: Task[],
|
||||||
|
tag: string
|
||||||
|
): Promise<Task[]> {
|
||||||
|
// Get all task IDs for bulk lookup
|
||||||
|
const taskIds = tasks.map((t) => t.id);
|
||||||
|
|
||||||
|
// Load complexity data for all tasks at once (more efficient)
|
||||||
|
const complexityMap = await this.complexityManager.getComplexityForTasks(
|
||||||
|
taskIds,
|
||||||
|
tag
|
||||||
|
);
|
||||||
|
|
||||||
|
// If no complexity data found, return tasks as-is
|
||||||
|
if (complexityMap.size === 0) {
|
||||||
|
return tasks;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enrich each task with its complexity data
|
||||||
|
return tasks.map((task) => {
|
||||||
|
const complexityData = complexityMap.get(String(task.id));
|
||||||
|
if (!complexityData) {
|
||||||
|
return task;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge complexity data into the task
|
||||||
|
return {
|
||||||
|
...task,
|
||||||
|
complexity: complexityData.complexityScore,
|
||||||
|
recommendedSubtasks: complexityData.recommendedSubtasks,
|
||||||
|
expansionPrompt: complexityData.expansionPrompt,
|
||||||
|
complexityReasoning: complexityData.complexityReasoning
|
||||||
|
};
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Export as default for convenience
|
// Export as default for convenience
|
||||||
|
|||||||
@@ -82,7 +82,7 @@ export class StorageFactory {
|
|||||||
apiAccessToken: credentials.token,
|
apiAccessToken: credentials.token,
|
||||||
apiEndpoint:
|
apiEndpoint:
|
||||||
config.storage?.apiEndpoint ||
|
config.storage?.apiEndpoint ||
|
||||||
process.env.HAMSTER_API_URL ||
|
process.env.TM_PUBLIC_BASE_DOMAIN ||
|
||||||
'https://tryhamster.com/api'
|
'https://tryhamster.com/api'
|
||||||
};
|
};
|
||||||
config.storage = nextStorage;
|
config.storage = nextStorage;
|
||||||
@@ -112,7 +112,7 @@ export class StorageFactory {
|
|||||||
apiAccessToken: credentials.token,
|
apiAccessToken: credentials.token,
|
||||||
apiEndpoint:
|
apiEndpoint:
|
||||||
config.storage?.apiEndpoint ||
|
config.storage?.apiEndpoint ||
|
||||||
process.env.HAMSTER_API_URL ||
|
process.env.TM_PUBLIC_BASE_DOMAIN ||
|
||||||
'https://tryhamster.com/api'
|
'https://tryhamster.com/api'
|
||||||
};
|
};
|
||||||
config.storage = nextStorage;
|
config.storage = nextStorage;
|
||||||
|
|||||||
@@ -72,7 +72,13 @@ export interface Task {
|
|||||||
actualEffort?: number;
|
actualEffort?: number;
|
||||||
tags?: string[];
|
tags?: string[];
|
||||||
assignee?: string;
|
assignee?: string;
|
||||||
complexity?: TaskComplexity;
|
|
||||||
|
// Complexity analysis (from complexity report)
|
||||||
|
// Can be either enum ('simple' | 'moderate' | 'complex' | 'very-complex') or numeric score (1-10)
|
||||||
|
complexity?: TaskComplexity | number;
|
||||||
|
recommendedSubtasks?: number;
|
||||||
|
expansionPrompt?: string;
|
||||||
|
complexityReasoning?: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -145,7 +151,6 @@ export interface TaskFilter {
|
|||||||
hasSubtasks?: boolean;
|
hasSubtasks?: boolean;
|
||||||
search?: string;
|
search?: string;
|
||||||
assignee?: string;
|
assignee?: string;
|
||||||
complexity?: TaskComplexity | TaskComplexity[];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -41,6 +41,7 @@ import {
|
|||||||
AzureProvider,
|
AzureProvider,
|
||||||
BedrockAIProvider,
|
BedrockAIProvider,
|
||||||
ClaudeCodeProvider,
|
ClaudeCodeProvider,
|
||||||
|
CodexCliProvider,
|
||||||
GeminiCliProvider,
|
GeminiCliProvider,
|
||||||
GoogleAIProvider,
|
GoogleAIProvider,
|
||||||
GrokCliProvider,
|
GrokCliProvider,
|
||||||
@@ -70,6 +71,7 @@ const PROVIDERS = {
|
|||||||
azure: new AzureProvider(),
|
azure: new AzureProvider(),
|
||||||
vertex: new VertexAIProvider(),
|
vertex: new VertexAIProvider(),
|
||||||
'claude-code': new ClaudeCodeProvider(),
|
'claude-code': new ClaudeCodeProvider(),
|
||||||
|
'codex-cli': new CodexCliProvider(),
|
||||||
'gemini-cli': new GeminiCliProvider(),
|
'gemini-cli': new GeminiCliProvider(),
|
||||||
'grok-cli': new GrokCliProvider()
|
'grok-cli': new GrokCliProvider()
|
||||||
};
|
};
|
||||||
@@ -93,31 +95,55 @@ function _getProvider(providerName) {
|
|||||||
|
|
||||||
// Helper function to get cost for a specific model
|
// Helper function to get cost for a specific model
|
||||||
function _getCostForModel(providerName, modelId) {
|
function _getCostForModel(providerName, modelId) {
|
||||||
const DEFAULT_COST = { inputCost: 0, outputCost: 0, currency: 'USD' };
|
const DEFAULT_COST = {
|
||||||
|
inputCost: 0,
|
||||||
|
outputCost: 0,
|
||||||
|
currency: 'USD',
|
||||||
|
isUnknown: false
|
||||||
|
};
|
||||||
|
|
||||||
if (!MODEL_MAP || !MODEL_MAP[providerName]) {
|
if (!MODEL_MAP || !MODEL_MAP[providerName]) {
|
||||||
log(
|
log(
|
||||||
'warn',
|
'warn',
|
||||||
`Provider "${providerName}" not found in MODEL_MAP. Cannot determine cost for model ${modelId}.`
|
`Provider "${providerName}" not found in MODEL_MAP. Cannot determine cost for model ${modelId}.`
|
||||||
);
|
);
|
||||||
return DEFAULT_COST;
|
return { ...DEFAULT_COST, isUnknown: true };
|
||||||
}
|
}
|
||||||
|
|
||||||
const modelData = MODEL_MAP[providerName].find((m) => m.id === modelId);
|
const modelData = MODEL_MAP[providerName].find((m) => m.id === modelId);
|
||||||
|
|
||||||
if (!modelData?.cost_per_1m_tokens) {
|
if (!modelData) {
|
||||||
log(
|
log(
|
||||||
'debug',
|
'debug',
|
||||||
`Cost data not found for model "${modelId}" under provider "${providerName}". Assuming zero cost.`
|
`Model "${modelId}" not found under provider "${providerName}". Assuming unknown cost.`
|
||||||
);
|
);
|
||||||
return DEFAULT_COST;
|
return { ...DEFAULT_COST, isUnknown: true };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if cost_per_1m_tokens is explicitly null (unknown pricing)
|
||||||
|
if (modelData.cost_per_1m_tokens === null) {
|
||||||
|
log(
|
||||||
|
'debug',
|
||||||
|
`Cost data is null for model "${modelId}" under provider "${providerName}". Pricing unknown.`
|
||||||
|
);
|
||||||
|
return { ...DEFAULT_COST, isUnknown: true };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if cost_per_1m_tokens is missing/undefined (also unknown)
|
||||||
|
if (modelData.cost_per_1m_tokens === undefined) {
|
||||||
|
log(
|
||||||
|
'debug',
|
||||||
|
`Cost data not found for model "${modelId}" under provider "${providerName}". Pricing unknown.`
|
||||||
|
);
|
||||||
|
return { ...DEFAULT_COST, isUnknown: true };
|
||||||
}
|
}
|
||||||
|
|
||||||
const costs = modelData.cost_per_1m_tokens;
|
const costs = modelData.cost_per_1m_tokens;
|
||||||
return {
|
return {
|
||||||
inputCost: costs.input || 0,
|
inputCost: costs.input || 0,
|
||||||
outputCost: costs.output || 0,
|
outputCost: costs.output || 0,
|
||||||
currency: costs.currency || 'USD'
|
currency: costs.currency || 'USD',
|
||||||
|
isUnknown: false
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -867,8 +893,8 @@ async function logAiUsage({
|
|||||||
const timestamp = new Date().toISOString();
|
const timestamp = new Date().toISOString();
|
||||||
const totalTokens = (inputTokens || 0) + (outputTokens || 0);
|
const totalTokens = (inputTokens || 0) + (outputTokens || 0);
|
||||||
|
|
||||||
// Destructure currency along with costs
|
// Destructure currency along with costs and unknown flag
|
||||||
const { inputCost, outputCost, currency } = _getCostForModel(
|
const { inputCost, outputCost, currency, isUnknown } = _getCostForModel(
|
||||||
providerName,
|
providerName,
|
||||||
modelId
|
modelId
|
||||||
);
|
);
|
||||||
@@ -890,7 +916,8 @@ async function logAiUsage({
|
|||||||
outputTokens: outputTokens || 0,
|
outputTokens: outputTokens || 0,
|
||||||
totalTokens,
|
totalTokens,
|
||||||
totalCost,
|
totalCost,
|
||||||
currency // Add currency to the telemetry data
|
currency, // Add currency to the telemetry data
|
||||||
|
isUnknownCost: isUnknown // Flag to indicate if pricing is unknown
|
||||||
};
|
};
|
||||||
|
|
||||||
if (getDebugFlag()) {
|
if (getDebugFlag()) {
|
||||||
|
|||||||
@@ -1847,7 +1847,7 @@ function registerCommands(programInstance) {
|
|||||||
)
|
)
|
||||||
.option(
|
.option(
|
||||||
'-r, --research',
|
'-r, --research',
|
||||||
'Use Perplexity AI for research-backed complexity analysis'
|
'Use configured research model for research-backed complexity analysis'
|
||||||
)
|
)
|
||||||
.option(
|
.option(
|
||||||
'-i, --id <ids>',
|
'-i, --id <ids>',
|
||||||
@@ -3586,6 +3586,10 @@ ${result.result}
|
|||||||
'--gemini-cli',
|
'--gemini-cli',
|
||||||
'Allow setting a Gemini CLI model ID (use with --set-*)'
|
'Allow setting a Gemini CLI model ID (use with --set-*)'
|
||||||
)
|
)
|
||||||
|
.option(
|
||||||
|
'--codex-cli',
|
||||||
|
'Allow setting a Codex CLI model ID (use with --set-*)'
|
||||||
|
)
|
||||||
.addHelpText(
|
.addHelpText(
|
||||||
'after',
|
'after',
|
||||||
`
|
`
|
||||||
@@ -3601,6 +3605,7 @@ Examples:
|
|||||||
$ task-master models --set-main gpt-4o --azure # Set custom Azure OpenAI model for main role
|
$ task-master models --set-main gpt-4o --azure # Set custom Azure OpenAI model for main role
|
||||||
$ task-master models --set-main claude-3-5-sonnet@20241022 --vertex # Set custom Vertex AI model for main role
|
$ task-master models --set-main claude-3-5-sonnet@20241022 --vertex # Set custom Vertex AI model for main role
|
||||||
$ task-master models --set-main gemini-2.5-pro --gemini-cli # Set Gemini CLI model for main role
|
$ task-master models --set-main gemini-2.5-pro --gemini-cli # Set Gemini CLI model for main role
|
||||||
|
$ task-master models --set-main gpt-5-codex --codex-cli # Set Codex CLI model for main role
|
||||||
$ task-master models --setup # Run interactive setup`
|
$ task-master models --setup # Run interactive setup`
|
||||||
)
|
)
|
||||||
.action(async (options) => {
|
.action(async (options) => {
|
||||||
@@ -3617,12 +3622,13 @@ Examples:
|
|||||||
options.ollama,
|
options.ollama,
|
||||||
options.bedrock,
|
options.bedrock,
|
||||||
options.claudeCode,
|
options.claudeCode,
|
||||||
options.geminiCli
|
options.geminiCli,
|
||||||
|
options.codexCli
|
||||||
].filter(Boolean).length;
|
].filter(Boolean).length;
|
||||||
if (providerFlags > 1) {
|
if (providerFlags > 1) {
|
||||||
console.error(
|
console.error(
|
||||||
chalk.red(
|
chalk.red(
|
||||||
'Error: Cannot use multiple provider flags (--openrouter, --ollama, --bedrock, --claude-code, --gemini-cli) simultaneously.'
|
'Error: Cannot use multiple provider flags (--openrouter, --ollama, --bedrock, --claude-code, --gemini-cli, --codex-cli) simultaneously.'
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
process.exit(1);
|
process.exit(1);
|
||||||
@@ -3668,7 +3674,9 @@ Examples:
|
|||||||
? 'claude-code'
|
? 'claude-code'
|
||||||
: options.geminiCli
|
: options.geminiCli
|
||||||
? 'gemini-cli'
|
? 'gemini-cli'
|
||||||
: undefined
|
: options.codexCli
|
||||||
|
? 'codex-cli'
|
||||||
|
: undefined
|
||||||
});
|
});
|
||||||
if (result.success) {
|
if (result.success) {
|
||||||
console.log(chalk.green(`✅ ${result.data.message}`));
|
console.log(chalk.green(`✅ ${result.data.message}`));
|
||||||
@@ -3694,7 +3702,9 @@ Examples:
|
|||||||
? 'claude-code'
|
? 'claude-code'
|
||||||
: options.geminiCli
|
: options.geminiCli
|
||||||
? 'gemini-cli'
|
? 'gemini-cli'
|
||||||
: undefined
|
: options.codexCli
|
||||||
|
? 'codex-cli'
|
||||||
|
: undefined
|
||||||
});
|
});
|
||||||
if (result.success) {
|
if (result.success) {
|
||||||
console.log(chalk.green(`✅ ${result.data.message}`));
|
console.log(chalk.green(`✅ ${result.data.message}`));
|
||||||
@@ -3722,7 +3732,9 @@ Examples:
|
|||||||
? 'claude-code'
|
? 'claude-code'
|
||||||
: options.geminiCli
|
: options.geminiCli
|
||||||
? 'gemini-cli'
|
? 'gemini-cli'
|
||||||
: undefined
|
: options.codexCli
|
||||||
|
? 'codex-cli'
|
||||||
|
: undefined
|
||||||
});
|
});
|
||||||
if (result.success) {
|
if (result.success) {
|
||||||
console.log(chalk.green(`✅ ${result.data.message}`));
|
console.log(chalk.green(`✅ ${result.data.message}`));
|
||||||
|
|||||||
@@ -58,6 +58,7 @@ const DEFAULTS = {
|
|||||||
enableCodebaseAnalysis: true
|
enableCodebaseAnalysis: true
|
||||||
},
|
},
|
||||||
claudeCode: {},
|
claudeCode: {},
|
||||||
|
codexCli: {},
|
||||||
grokCli: {
|
grokCli: {
|
||||||
timeout: 120000,
|
timeout: 120000,
|
||||||
workingDirectory: null,
|
workingDirectory: null,
|
||||||
@@ -138,6 +139,7 @@ function _loadAndValidateConfig(explicitRoot = null) {
|
|||||||
},
|
},
|
||||||
global: { ...defaults.global, ...parsedConfig?.global },
|
global: { ...defaults.global, ...parsedConfig?.global },
|
||||||
claudeCode: { ...defaults.claudeCode, ...parsedConfig?.claudeCode },
|
claudeCode: { ...defaults.claudeCode, ...parsedConfig?.claudeCode },
|
||||||
|
codexCli: { ...defaults.codexCli, ...parsedConfig?.codexCli },
|
||||||
grokCli: { ...defaults.grokCli, ...parsedConfig?.grokCli }
|
grokCli: { ...defaults.grokCli, ...parsedConfig?.grokCli }
|
||||||
};
|
};
|
||||||
configSource = `file (${configPath})`; // Update source info
|
configSource = `file (${configPath})`; // Update source info
|
||||||
@@ -184,6 +186,9 @@ function _loadAndValidateConfig(explicitRoot = null) {
|
|||||||
if (config.claudeCode && !isEmpty(config.claudeCode)) {
|
if (config.claudeCode && !isEmpty(config.claudeCode)) {
|
||||||
config.claudeCode = validateClaudeCodeSettings(config.claudeCode);
|
config.claudeCode = validateClaudeCodeSettings(config.claudeCode);
|
||||||
}
|
}
|
||||||
|
if (config.codexCli && !isEmpty(config.codexCli)) {
|
||||||
|
config.codexCli = validateCodexCliSettings(config.codexCli);
|
||||||
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Use console.error for actual errors during parsing
|
// Use console.error for actual errors during parsing
|
||||||
console.error(
|
console.error(
|
||||||
@@ -310,6 +315,8 @@ function validateProviderModelCombination(providerName, modelId) {
|
|||||||
function validateClaudeCodeSettings(settings) {
|
function validateClaudeCodeSettings(settings) {
|
||||||
// Define the base settings schema without commandSpecific first
|
// Define the base settings schema without commandSpecific first
|
||||||
const BaseSettingsSchema = z.object({
|
const BaseSettingsSchema = z.object({
|
||||||
|
pathToClaudeCodeExecutable: z.string().optional(),
|
||||||
|
// Use number().int() for integer validation in Zod
|
||||||
maxTurns: z.number().int().positive().optional(),
|
maxTurns: z.number().int().positive().optional(),
|
||||||
customSystemPrompt: z.string().optional(),
|
customSystemPrompt: z.string().optional(),
|
||||||
appendSystemPrompt: z.string().optional(),
|
appendSystemPrompt: z.string().optional(),
|
||||||
@@ -325,19 +332,22 @@ function validateClaudeCodeSettings(settings) {
|
|||||||
type: z.enum(['stdio', 'sse']).optional(),
|
type: z.enum(['stdio', 'sse']).optional(),
|
||||||
command: z.string(),
|
command: z.string(),
|
||||||
args: z.array(z.string()).optional(),
|
args: z.array(z.string()).optional(),
|
||||||
env: z.record(z.string()).optional(),
|
env: z.record(z.string(), z.string()).optional(),
|
||||||
url: z.string().url().optional(),
|
url: z.url().optional(),
|
||||||
headers: z.record(z.string()).optional()
|
headers: z.record(z.string(), z.string()).optional()
|
||||||
})
|
})
|
||||||
)
|
)
|
||||||
.optional()
|
.optional()
|
||||||
});
|
});
|
||||||
|
|
||||||
// Define CommandSpecificSchema using the base schema
|
// Define CommandSpecificSchema using flexible keys, but restrict to known commands
|
||||||
const CommandSpecificSchema = z.record(
|
const CommandSpecificSchema = z
|
||||||
z.enum(AI_COMMAND_NAMES),
|
.record(z.string(), BaseSettingsSchema)
|
||||||
BaseSettingsSchema
|
.refine(
|
||||||
);
|
(obj) =>
|
||||||
|
Object.keys(obj || {}).every((k) => AI_COMMAND_NAMES.includes(k)),
|
||||||
|
{ message: 'Invalid command name in commandSpecific' }
|
||||||
|
);
|
||||||
|
|
||||||
// Define the full settings schema with commandSpecific
|
// Define the full settings schema with commandSpecific
|
||||||
const SettingsSchema = BaseSettingsSchema.extend({
|
const SettingsSchema = BaseSettingsSchema.extend({
|
||||||
@@ -361,6 +371,57 @@ function validateClaudeCodeSettings(settings) {
|
|||||||
return validatedSettings;
|
return validatedSettings;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validates Codex CLI provider custom settings
|
||||||
|
* Mirrors the ai-sdk-provider-codex-cli options
|
||||||
|
* @param {object} settings The settings to validate
|
||||||
|
* @returns {object} The validated settings
|
||||||
|
*/
|
||||||
|
function validateCodexCliSettings(settings) {
|
||||||
|
const BaseSettingsSchema = z.object({
|
||||||
|
codexPath: z.string().optional(),
|
||||||
|
cwd: z.string().optional(),
|
||||||
|
approvalMode: z
|
||||||
|
.enum(['untrusted', 'on-failure', 'on-request', 'never'])
|
||||||
|
.optional(),
|
||||||
|
sandboxMode: z
|
||||||
|
.enum(['read-only', 'workspace-write', 'danger-full-access'])
|
||||||
|
.optional(),
|
||||||
|
fullAuto: z.boolean().optional(),
|
||||||
|
dangerouslyBypassApprovalsAndSandbox: z.boolean().optional(),
|
||||||
|
skipGitRepoCheck: z.boolean().optional(),
|
||||||
|
color: z.enum(['always', 'never', 'auto']).optional(),
|
||||||
|
allowNpx: z.boolean().optional(),
|
||||||
|
outputLastMessageFile: z.string().optional(),
|
||||||
|
env: z.record(z.string(), z.string()).optional(),
|
||||||
|
verbose: z.boolean().optional(),
|
||||||
|
logger: z.union([z.object({}).passthrough(), z.literal(false)]).optional()
|
||||||
|
});
|
||||||
|
|
||||||
|
const CommandSpecificSchema = z
|
||||||
|
.record(z.string(), BaseSettingsSchema)
|
||||||
|
.refine(
|
||||||
|
(obj) =>
|
||||||
|
Object.keys(obj || {}).every((k) => AI_COMMAND_NAMES.includes(k)),
|
||||||
|
{ message: 'Invalid command name in commandSpecific' }
|
||||||
|
);
|
||||||
|
|
||||||
|
const SettingsSchema = BaseSettingsSchema.extend({
|
||||||
|
commandSpecific: CommandSpecificSchema.optional()
|
||||||
|
});
|
||||||
|
|
||||||
|
try {
|
||||||
|
return SettingsSchema.parse(settings);
|
||||||
|
} catch (error) {
|
||||||
|
console.warn(
|
||||||
|
chalk.yellow(
|
||||||
|
`Warning: Invalid Codex CLI settings in config: ${error.message}. Falling back to default.`
|
||||||
|
)
|
||||||
|
);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// --- Claude Code Settings Getters ---
|
// --- Claude Code Settings Getters ---
|
||||||
|
|
||||||
function getClaudeCodeSettings(explicitRoot = null, forceReload = false) {
|
function getClaudeCodeSettings(explicitRoot = null, forceReload = false) {
|
||||||
@@ -369,6 +430,23 @@ function getClaudeCodeSettings(explicitRoot = null, forceReload = false) {
|
|||||||
return { ...DEFAULTS.claudeCode, ...(config?.claudeCode || {}) };
|
return { ...DEFAULTS.claudeCode, ...(config?.claudeCode || {}) };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// --- Codex CLI Settings Getters ---
|
||||||
|
|
||||||
|
function getCodexCliSettings(explicitRoot = null, forceReload = false) {
|
||||||
|
const config = getConfig(explicitRoot, forceReload);
|
||||||
|
return { ...DEFAULTS.codexCli, ...(config?.codexCli || {}) };
|
||||||
|
}
|
||||||
|
|
||||||
|
function getCodexCliSettingsForCommand(
|
||||||
|
commandName,
|
||||||
|
explicitRoot = null,
|
||||||
|
forceReload = false
|
||||||
|
) {
|
||||||
|
const settings = getCodexCliSettings(explicitRoot, forceReload);
|
||||||
|
const commandSpecific = settings?.commandSpecific || {};
|
||||||
|
return { ...settings, ...commandSpecific[commandName] };
|
||||||
|
}
|
||||||
|
|
||||||
function getClaudeCodeSettingsForCommand(
|
function getClaudeCodeSettingsForCommand(
|
||||||
commandName,
|
commandName,
|
||||||
explicitRoot = null,
|
explicitRoot = null,
|
||||||
@@ -486,7 +564,8 @@ function hasCodebaseAnalysis(
|
|||||||
return (
|
return (
|
||||||
currentProvider === CUSTOM_PROVIDERS.CLAUDE_CODE ||
|
currentProvider === CUSTOM_PROVIDERS.CLAUDE_CODE ||
|
||||||
currentProvider === CUSTOM_PROVIDERS.GEMINI_CLI ||
|
currentProvider === CUSTOM_PROVIDERS.GEMINI_CLI ||
|
||||||
currentProvider === CUSTOM_PROVIDERS.GROK_CLI
|
currentProvider === CUSTOM_PROVIDERS.GROK_CLI ||
|
||||||
|
currentProvider === CUSTOM_PROVIDERS.CODEX_CLI
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -716,7 +795,8 @@ function isApiKeySet(providerName, session = null, projectRoot = null) {
|
|||||||
CUSTOM_PROVIDERS.BEDROCK,
|
CUSTOM_PROVIDERS.BEDROCK,
|
||||||
CUSTOM_PROVIDERS.MCP,
|
CUSTOM_PROVIDERS.MCP,
|
||||||
CUSTOM_PROVIDERS.GEMINI_CLI,
|
CUSTOM_PROVIDERS.GEMINI_CLI,
|
||||||
CUSTOM_PROVIDERS.GROK_CLI
|
CUSTOM_PROVIDERS.GROK_CLI,
|
||||||
|
CUSTOM_PROVIDERS.CODEX_CLI
|
||||||
];
|
];
|
||||||
|
|
||||||
if (providersWithoutApiKeys.includes(providerName?.toLowerCase())) {
|
if (providersWithoutApiKeys.includes(providerName?.toLowerCase())) {
|
||||||
@@ -728,6 +808,11 @@ function isApiKeySet(providerName, session = null, projectRoot = null) {
|
|||||||
return true; // No API key needed
|
return true; // No API key needed
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Codex CLI supports OAuth via codex login; API key optional
|
||||||
|
if (providerName?.toLowerCase() === 'codex-cli') {
|
||||||
|
return true; // Treat as OK even without key
|
||||||
|
}
|
||||||
|
|
||||||
const keyMap = {
|
const keyMap = {
|
||||||
openai: 'OPENAI_API_KEY',
|
openai: 'OPENAI_API_KEY',
|
||||||
anthropic: 'ANTHROPIC_API_KEY',
|
anthropic: 'ANTHROPIC_API_KEY',
|
||||||
@@ -831,6 +916,8 @@ function getMcpApiKeyStatus(providerName, projectRoot = null) {
|
|||||||
return true; // No key needed
|
return true; // No key needed
|
||||||
case 'claude-code':
|
case 'claude-code':
|
||||||
return true; // No key needed
|
return true; // No key needed
|
||||||
|
case 'codex-cli':
|
||||||
|
return true; // OAuth/subscription via Codex CLI
|
||||||
case 'mistral':
|
case 'mistral':
|
||||||
apiKeyToCheck = mcpEnv.MISTRAL_API_KEY;
|
apiKeyToCheck = mcpEnv.MISTRAL_API_KEY;
|
||||||
placeholderValue = 'YOUR_MISTRAL_API_KEY_HERE';
|
placeholderValue = 'YOUR_MISTRAL_API_KEY_HERE';
|
||||||
@@ -1023,7 +1110,8 @@ export const providersWithoutApiKeys = [
|
|||||||
CUSTOM_PROVIDERS.BEDROCK,
|
CUSTOM_PROVIDERS.BEDROCK,
|
||||||
CUSTOM_PROVIDERS.GEMINI_CLI,
|
CUSTOM_PROVIDERS.GEMINI_CLI,
|
||||||
CUSTOM_PROVIDERS.GROK_CLI,
|
CUSTOM_PROVIDERS.GROK_CLI,
|
||||||
CUSTOM_PROVIDERS.MCP
|
CUSTOM_PROVIDERS.MCP,
|
||||||
|
CUSTOM_PROVIDERS.CODEX_CLI
|
||||||
];
|
];
|
||||||
|
|
||||||
export {
|
export {
|
||||||
@@ -1035,6 +1123,9 @@ export {
|
|||||||
// Claude Code settings
|
// Claude Code settings
|
||||||
getClaudeCodeSettings,
|
getClaudeCodeSettings,
|
||||||
getClaudeCodeSettingsForCommand,
|
getClaudeCodeSettingsForCommand,
|
||||||
|
// Codex CLI settings
|
||||||
|
getCodexCliSettings,
|
||||||
|
getCodexCliSettingsForCommand,
|
||||||
// Grok CLI settings
|
// Grok CLI settings
|
||||||
getGrokCliSettings,
|
getGrokCliSettings,
|
||||||
getGrokCliSettingsForCommand,
|
getGrokCliSettingsForCommand,
|
||||||
@@ -1042,6 +1133,7 @@ export {
|
|||||||
validateProvider,
|
validateProvider,
|
||||||
validateProviderModelCombination,
|
validateProviderModelCombination,
|
||||||
validateClaudeCodeSettings,
|
validateClaudeCodeSettings,
|
||||||
|
validateCodexCliSettings,
|
||||||
VALIDATED_PROVIDERS,
|
VALIDATED_PROVIDERS,
|
||||||
CUSTOM_PROVIDERS,
|
CUSTOM_PROVIDERS,
|
||||||
ALL_PROVIDERS,
|
ALL_PROVIDERS,
|
||||||
|
|||||||
@@ -69,6 +69,30 @@
|
|||||||
"supported": true
|
"supported": true
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
"codex-cli": [
|
||||||
|
{
|
||||||
|
"id": "gpt-5",
|
||||||
|
"swe_score": 0.749,
|
||||||
|
"cost_per_1m_tokens": {
|
||||||
|
"input": 0,
|
||||||
|
"output": 0
|
||||||
|
},
|
||||||
|
"allowed_roles": ["main", "fallback", "research"],
|
||||||
|
"max_tokens": 128000,
|
||||||
|
"supported": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "gpt-5-codex",
|
||||||
|
"swe_score": 0.749,
|
||||||
|
"cost_per_1m_tokens": {
|
||||||
|
"input": 0,
|
||||||
|
"output": 0
|
||||||
|
},
|
||||||
|
"allowed_roles": ["main", "fallback", "research"],
|
||||||
|
"max_tokens": 128000,
|
||||||
|
"supported": true
|
||||||
|
}
|
||||||
|
],
|
||||||
"mcp": [
|
"mcp": [
|
||||||
{
|
{
|
||||||
"id": "mcp-sampling",
|
"id": "mcp-sampling",
|
||||||
@@ -522,7 +546,7 @@
|
|||||||
"supported": true
|
"supported": true
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "deep-research",
|
"id": "sonar-deep-research",
|
||||||
"swe_score": 0.211,
|
"swe_score": 0.211,
|
||||||
"cost_per_1m_tokens": {
|
"cost_per_1m_tokens": {
|
||||||
"input": 2,
|
"input": 2,
|
||||||
|
|||||||
@@ -1,8 +1,5 @@
|
|||||||
import path from 'path';
|
|
||||||
|
|
||||||
import { log, readJSON, writeJSON, getCurrentTag } from '../utils.js';
|
import { log, readJSON, writeJSON, getCurrentTag } from '../utils.js';
|
||||||
import { isTaskDependentOn } from '../task-manager.js';
|
import { isTaskDependentOn } from '../task-manager.js';
|
||||||
import generateTaskFiles from './generate-task-files.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add a subtask to a parent task
|
* Add a subtask to a parent task
|
||||||
@@ -142,11 +139,7 @@ async function addSubtask(
|
|||||||
// Write the updated tasks back to the file with proper context
|
// Write the updated tasks back to the file with proper context
|
||||||
writeJSON(tasksPath, data, projectRoot, tag);
|
writeJSON(tasksPath, data, projectRoot, tag);
|
||||||
|
|
||||||
// Generate task files if requested
|
// Note: Task file generation is no longer supported and has been removed
|
||||||
if (generateFiles) {
|
|
||||||
log('info', 'Regenerating task files...');
|
|
||||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath), context);
|
|
||||||
}
|
|
||||||
|
|
||||||
return newSubtask;
|
return newSubtask;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ import path from 'path';
|
|||||||
import chalk from 'chalk';
|
import chalk from 'chalk';
|
||||||
import boxen from 'boxen';
|
import boxen from 'boxen';
|
||||||
import Table from 'cli-table3';
|
import Table from 'cli-table3';
|
||||||
import { z } from 'zod';
|
|
||||||
import Fuse from 'fuse.js'; // Import Fuse.js for advanced fuzzy search
|
import Fuse from 'fuse.js'; // Import Fuse.js for advanced fuzzy search
|
||||||
|
|
||||||
import {
|
import {
|
||||||
@@ -29,6 +28,7 @@ import { getDefaultPriority, hasCodebaseAnalysis } from '../config-manager.js';
|
|||||||
import { getPromptManager } from '../prompt-manager.js';
|
import { getPromptManager } from '../prompt-manager.js';
|
||||||
import ContextGatherer from '../utils/contextGatherer.js';
|
import ContextGatherer from '../utils/contextGatherer.js';
|
||||||
import generateTaskFiles from './generate-task-files.js';
|
import generateTaskFiles from './generate-task-files.js';
|
||||||
|
import { COMMAND_SCHEMAS } from '../../../src/schemas/registry.js';
|
||||||
import {
|
import {
|
||||||
TASK_PRIORITY_OPTIONS,
|
TASK_PRIORITY_OPTIONS,
|
||||||
DEFAULT_TASK_PRIORITY,
|
DEFAULT_TASK_PRIORITY,
|
||||||
@@ -36,26 +36,6 @@ import {
|
|||||||
normalizeTaskPriority
|
normalizeTaskPriority
|
||||||
} from '../../../src/constants/task-priority.js';
|
} from '../../../src/constants/task-priority.js';
|
||||||
|
|
||||||
// Define Zod schema for the expected AI output object
|
|
||||||
const AiTaskDataSchema = z.object({
|
|
||||||
title: z.string().describe('Clear, concise title for the task'),
|
|
||||||
description: z
|
|
||||||
.string()
|
|
||||||
.describe('A one or two sentence description of the task'),
|
|
||||||
details: z
|
|
||||||
.string()
|
|
||||||
.describe('In-depth implementation details, considerations, and guidance'),
|
|
||||||
testStrategy: z
|
|
||||||
.string()
|
|
||||||
.describe('Detailed approach for verifying task completion'),
|
|
||||||
dependencies: z
|
|
||||||
.array(z.number())
|
|
||||||
.nullable()
|
|
||||||
.describe(
|
|
||||||
'Array of task IDs that this task depends on (must be completed before this task can start)'
|
|
||||||
)
|
|
||||||
});
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get all tasks from all tags
|
* Get all tasks from all tags
|
||||||
* @param {Object} rawData - The raw tagged data object
|
* @param {Object} rawData - The raw tagged data object
|
||||||
@@ -451,7 +431,7 @@ async function addTask(
|
|||||||
role: serviceRole,
|
role: serviceRole,
|
||||||
session: session,
|
session: session,
|
||||||
projectRoot: projectRoot,
|
projectRoot: projectRoot,
|
||||||
schema: AiTaskDataSchema,
|
schema: COMMAND_SCHEMAS['add-task'],
|
||||||
objectName: 'newTaskData',
|
objectName: 'newTaskData',
|
||||||
systemPrompt: systemPrompt,
|
systemPrompt: systemPrompt,
|
||||||
prompt: userPrompt,
|
prompt: userPrompt,
|
||||||
|
|||||||
@@ -11,7 +11,8 @@ import {
|
|||||||
displayAiUsageSummary
|
displayAiUsageSummary
|
||||||
} from '../ui.js';
|
} from '../ui.js';
|
||||||
|
|
||||||
import { generateTextService } from '../ai-services-unified.js';
|
import { generateObjectService } from '../ai-services-unified.js';
|
||||||
|
import { COMMAND_SCHEMAS } from '../../../src/schemas/registry.js';
|
||||||
|
|
||||||
import {
|
import {
|
||||||
getDebugFlag,
|
getDebugFlag,
|
||||||
@@ -29,46 +30,6 @@ import { ContextGatherer } from '../utils/contextGatherer.js';
|
|||||||
import { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';
|
import { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';
|
||||||
import { flattenTasksWithSubtasks } from '../utils.js';
|
import { flattenTasksWithSubtasks } from '../utils.js';
|
||||||
|
|
||||||
/**
|
|
||||||
* Generates the prompt for complexity analysis.
|
|
||||||
* (Moved from ai-services.js and simplified)
|
|
||||||
* @param {Object} tasksData - The tasks data object.
|
|
||||||
* @param {string} [gatheredContext] - The gathered context for the analysis.
|
|
||||||
* @returns {string} The generated prompt.
|
|
||||||
*/
|
|
||||||
function generateInternalComplexityAnalysisPrompt(
|
|
||||||
tasksData,
|
|
||||||
gatheredContext = ''
|
|
||||||
) {
|
|
||||||
const tasksString = JSON.stringify(tasksData.tasks, null, 2);
|
|
||||||
let prompt = `Analyze the following tasks to determine their complexity (1-10 scale) and recommend the number of subtasks for expansion. Provide a brief reasoning and an initial expansion prompt for each.
|
|
||||||
|
|
||||||
Tasks:
|
|
||||||
${tasksString}`;
|
|
||||||
|
|
||||||
if (gatheredContext) {
|
|
||||||
prompt += `\n\n# Project Context\n\n${gatheredContext}`;
|
|
||||||
}
|
|
||||||
|
|
||||||
prompt += `
|
|
||||||
|
|
||||||
Respond ONLY with a valid JSON array matching the schema:
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"taskId": <number>,
|
|
||||||
"taskTitle": "<string>",
|
|
||||||
"complexityScore": <number 1-10>,
|
|
||||||
"recommendedSubtasks": <number>,
|
|
||||||
"expansionPrompt": "<string>",
|
|
||||||
"reasoning": "<string>"
|
|
||||||
},
|
|
||||||
...
|
|
||||||
]
|
|
||||||
|
|
||||||
Do not include any explanatory text, markdown formatting, or code block markers before or after the JSON array.`;
|
|
||||||
return prompt;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Analyzes task complexity and generates expansion recommendations
|
* Analyzes task complexity and generates expansion recommendations
|
||||||
* @param {Object} options Command options
|
* @param {Object} options Command options
|
||||||
@@ -446,12 +407,14 @@ async function analyzeTaskComplexity(options, context = {}) {
|
|||||||
try {
|
try {
|
||||||
const role = useResearch ? 'research' : 'main';
|
const role = useResearch ? 'research' : 'main';
|
||||||
|
|
||||||
aiServiceResponse = await generateTextService({
|
aiServiceResponse = await generateObjectService({
|
||||||
prompt,
|
prompt,
|
||||||
systemPrompt,
|
systemPrompt,
|
||||||
role,
|
role,
|
||||||
session,
|
session,
|
||||||
projectRoot,
|
projectRoot,
|
||||||
|
schema: COMMAND_SCHEMAS['analyze-complexity'],
|
||||||
|
objectName: 'complexityAnalysis',
|
||||||
commandName: 'analyze-complexity',
|
commandName: 'analyze-complexity',
|
||||||
outputType: mcpLog ? 'mcp' : 'cli'
|
outputType: mcpLog ? 'mcp' : 'cli'
|
||||||
});
|
});
|
||||||
@@ -463,63 +426,15 @@ async function analyzeTaskComplexity(options, context = {}) {
|
|||||||
if (outputFormat === 'text') {
|
if (outputFormat === 'text') {
|
||||||
readline.clearLine(process.stdout, 0);
|
readline.clearLine(process.stdout, 0);
|
||||||
readline.cursorTo(process.stdout, 0);
|
readline.cursorTo(process.stdout, 0);
|
||||||
console.log(
|
console.log(chalk.green('AI service call complete.'));
|
||||||
chalk.green('AI service call complete. Parsing response...')
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
reportLog('Parsing complexity analysis from text response...', 'info');
|
// With generateObject, we get structured data directly
|
||||||
try {
|
complexityAnalysis = aiServiceResponse.mainResult.complexityAnalysis;
|
||||||
let cleanedResponse = aiServiceResponse.mainResult;
|
reportLog(
|
||||||
cleanedResponse = cleanedResponse.trim();
|
`Received ${complexityAnalysis.length} complexity analyses from AI.`,
|
||||||
|
'info'
|
||||||
const codeBlockMatch = cleanedResponse.match(
|
);
|
||||||
/```(?:json)?\s*([\s\S]*?)\s*```/
|
|
||||||
);
|
|
||||||
if (codeBlockMatch) {
|
|
||||||
cleanedResponse = codeBlockMatch[1].trim();
|
|
||||||
} else {
|
|
||||||
const firstBracket = cleanedResponse.indexOf('[');
|
|
||||||
const lastBracket = cleanedResponse.lastIndexOf(']');
|
|
||||||
if (firstBracket !== -1 && lastBracket > firstBracket) {
|
|
||||||
cleanedResponse = cleanedResponse.substring(
|
|
||||||
firstBracket,
|
|
||||||
lastBracket + 1
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
reportLog(
|
|
||||||
'Warning: Response does not appear to be a JSON array.',
|
|
||||||
'warn'
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (outputFormat === 'text' && getDebugFlag(session)) {
|
|
||||||
console.log(chalk.gray('Attempting to parse cleaned JSON...'));
|
|
||||||
console.log(chalk.gray('Cleaned response (first 100 chars):'));
|
|
||||||
console.log(chalk.gray(cleanedResponse.substring(0, 100)));
|
|
||||||
console.log(chalk.gray('Last 100 chars:'));
|
|
||||||
console.log(
|
|
||||||
chalk.gray(cleanedResponse.substring(cleanedResponse.length - 100))
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
complexityAnalysis = JSON.parse(cleanedResponse);
|
|
||||||
} catch (parseError) {
|
|
||||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
|
||||||
reportLog(
|
|
||||||
`Error parsing complexity analysis JSON: ${parseError.message}`,
|
|
||||||
'error'
|
|
||||||
);
|
|
||||||
if (outputFormat === 'text') {
|
|
||||||
console.error(
|
|
||||||
chalk.red(
|
|
||||||
`Error parsing complexity analysis JSON: ${parseError.message}`
|
|
||||||
)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
throw parseError;
|
|
||||||
}
|
|
||||||
|
|
||||||
const taskIds = tasksData.tasks.map((t) => t.id);
|
const taskIds = tasksData.tasks.map((t) => t.id);
|
||||||
const analysisTaskIds = complexityAnalysis.map((a) => a.taskId);
|
const analysisTaskIds = complexityAnalysis.map((a) => a.taskId);
|
||||||
|
|||||||
@@ -1,22 +1,22 @@
|
|||||||
import fs from 'fs';
|
import fs from 'fs';
|
||||||
import path from 'path';
|
import path from 'path';
|
||||||
import { z } from 'zod';
|
|
||||||
|
|
||||||
import {
|
import {
|
||||||
|
getTagAwareFilePath,
|
||||||
|
isSilentMode,
|
||||||
log,
|
log,
|
||||||
readJSON,
|
readJSON,
|
||||||
writeJSON,
|
writeJSON
|
||||||
isSilentMode,
|
|
||||||
getTagAwareFilePath
|
|
||||||
} from '../utils.js';
|
} from '../utils.js';
|
||||||
|
|
||||||
import {
|
import {
|
||||||
|
displayAiUsageSummary,
|
||||||
startLoadingIndicator,
|
startLoadingIndicator,
|
||||||
stopLoadingIndicator,
|
stopLoadingIndicator
|
||||||
displayAiUsageSummary
|
|
||||||
} from '../ui.js';
|
} from '../ui.js';
|
||||||
|
|
||||||
import { generateTextService } from '../ai-services-unified.js';
|
import { COMMAND_SCHEMAS } from '../../../src/schemas/registry.js';
|
||||||
|
import { generateObjectService } from '../ai-services-unified.js';
|
||||||
|
|
||||||
import {
|
import {
|
||||||
getDefaultSubtasks,
|
getDefaultSubtasks,
|
||||||
@@ -24,265 +24,12 @@ import {
|
|||||||
hasCodebaseAnalysis
|
hasCodebaseAnalysis
|
||||||
} from '../config-manager.js';
|
} from '../config-manager.js';
|
||||||
import { getPromptManager } from '../prompt-manager.js';
|
import { getPromptManager } from '../prompt-manager.js';
|
||||||
import generateTaskFiles from './generate-task-files.js';
|
import { findProjectRoot, flattenTasksWithSubtasks } from '../utils.js';
|
||||||
import { COMPLEXITY_REPORT_FILE } from '../../../src/constants/paths.js';
|
|
||||||
import { ContextGatherer } from '../utils/contextGatherer.js';
|
import { ContextGatherer } from '../utils/contextGatherer.js';
|
||||||
import { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';
|
import { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';
|
||||||
import { flattenTasksWithSubtasks, findProjectRoot } from '../utils.js';
|
|
||||||
|
|
||||||
// --- Zod Schemas (Keep from previous step) ---
|
|
||||||
const subtaskSchema = z
|
|
||||||
.object({
|
|
||||||
id: z
|
|
||||||
.number()
|
|
||||||
.int()
|
|
||||||
.positive()
|
|
||||||
.describe('Sequential subtask ID starting from 1'),
|
|
||||||
title: z.string().min(5).describe('Clear, specific title for the subtask'),
|
|
||||||
description: z
|
|
||||||
.string()
|
|
||||||
.min(10)
|
|
||||||
.describe('Detailed description of the subtask'),
|
|
||||||
dependencies: z
|
|
||||||
.array(z.string())
|
|
||||||
.describe(
|
|
||||||
'Array of subtask dependencies within the same parent task. Use format ["parentTaskId.1", "parentTaskId.2"]. Subtasks can only depend on siblings, not external tasks.'
|
|
||||||
),
|
|
||||||
details: z.string().min(20).describe('Implementation details and guidance'),
|
|
||||||
status: z
|
|
||||||
.string()
|
|
||||||
.describe(
|
|
||||||
'The current status of the subtask (should be pending initially)'
|
|
||||||
),
|
|
||||||
testStrategy: z
|
|
||||||
.string()
|
|
||||||
.nullable()
|
|
||||||
.describe('Approach for testing this subtask')
|
|
||||||
.default('')
|
|
||||||
})
|
|
||||||
.strict();
|
|
||||||
const subtaskArraySchema = z.array(subtaskSchema);
|
|
||||||
const subtaskWrapperSchema = z.object({
|
|
||||||
subtasks: subtaskArraySchema.describe('The array of generated subtasks.')
|
|
||||||
});
|
|
||||||
// --- End Zod Schemas ---
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Parse subtasks from AI's text response. Includes basic cleanup.
|
* Expand a task into subtasks using the unified AI service (generateObjectService).
|
||||||
* @param {string} text - Response text from AI.
|
|
||||||
* @param {number} startId - Starting subtask ID expected.
|
|
||||||
* @param {number} expectedCount - Expected number of subtasks.
|
|
||||||
* @param {number} parentTaskId - Parent task ID for context.
|
|
||||||
* @param {Object} logger - Logging object (mcpLog or console log).
|
|
||||||
* @returns {Array} Parsed and potentially corrected subtasks array.
|
|
||||||
* @throws {Error} If parsing fails or JSON is invalid/malformed.
|
|
||||||
*/
|
|
||||||
function parseSubtasksFromText(
|
|
||||||
text,
|
|
||||||
startId,
|
|
||||||
expectedCount,
|
|
||||||
parentTaskId,
|
|
||||||
logger
|
|
||||||
) {
|
|
||||||
if (typeof text !== 'string') {
|
|
||||||
logger.error(
|
|
||||||
`AI response text is not a string. Received type: ${typeof text}, Value: ${text}`
|
|
||||||
);
|
|
||||||
throw new Error('AI response text is not a string.');
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!text || text.trim() === '') {
|
|
||||||
throw new Error('AI response text is empty after trimming.');
|
|
||||||
}
|
|
||||||
|
|
||||||
const originalTrimmedResponse = text.trim(); // Store the original trimmed response
|
|
||||||
let jsonToParse = originalTrimmedResponse; // Initialize jsonToParse with it
|
|
||||||
|
|
||||||
logger.debug(
|
|
||||||
`Original AI Response for parsing (full length: ${jsonToParse.length}): ${jsonToParse.substring(0, 1000)}...`
|
|
||||||
);
|
|
||||||
|
|
||||||
// --- Pre-emptive cleanup for known AI JSON issues ---
|
|
||||||
// Fix for "dependencies": , or "dependencies":,
|
|
||||||
if (jsonToParse.includes('"dependencies":')) {
|
|
||||||
const malformedPattern = /"dependencies":\s*,/g;
|
|
||||||
if (malformedPattern.test(jsonToParse)) {
|
|
||||||
logger.warn('Attempting to fix malformed "dependencies": , issue.');
|
|
||||||
jsonToParse = jsonToParse.replace(
|
|
||||||
malformedPattern,
|
|
||||||
'"dependencies": [],'
|
|
||||||
);
|
|
||||||
logger.debug(
|
|
||||||
`JSON after fixing "dependencies": ${jsonToParse.substring(0, 500)}...`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// --- End pre-emptive cleanup ---
|
|
||||||
|
|
||||||
let parsedObject;
|
|
||||||
let primaryParseAttemptFailed = false;
|
|
||||||
|
|
||||||
// --- Attempt 1: Simple Parse (with optional Markdown cleanup) ---
|
|
||||||
logger.debug('Attempting simple parse...');
|
|
||||||
try {
|
|
||||||
// Check for markdown code block
|
|
||||||
const codeBlockMatch = jsonToParse.match(/```(?:json)?\s*([\s\S]*?)\s*```/);
|
|
||||||
let contentToParseDirectly = jsonToParse;
|
|
||||||
if (codeBlockMatch && codeBlockMatch[1]) {
|
|
||||||
contentToParseDirectly = codeBlockMatch[1].trim();
|
|
||||||
logger.debug('Simple parse: Extracted content from markdown code block.');
|
|
||||||
} else {
|
|
||||||
logger.debug(
|
|
||||||
'Simple parse: No markdown code block found, using trimmed original.'
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
parsedObject = JSON.parse(contentToParseDirectly);
|
|
||||||
logger.debug('Simple parse successful!');
|
|
||||||
|
|
||||||
// Quick check if it looks like our target object
|
|
||||||
if (
|
|
||||||
!parsedObject ||
|
|
||||||
typeof parsedObject !== 'object' ||
|
|
||||||
!Array.isArray(parsedObject.subtasks)
|
|
||||||
) {
|
|
||||||
logger.warn(
|
|
||||||
'Simple parse succeeded, but result is not the expected {"subtasks": []} structure. Will proceed to advanced extraction.'
|
|
||||||
);
|
|
||||||
primaryParseAttemptFailed = true;
|
|
||||||
parsedObject = null; // Reset parsedObject so we enter the advanced logic
|
|
||||||
}
|
|
||||||
// If it IS the correct structure, we'll skip advanced extraction.
|
|
||||||
} catch (e) {
|
|
||||||
logger.warn(
|
|
||||||
`Simple parse failed: ${e.message}. Proceeding to advanced extraction logic.`
|
|
||||||
);
|
|
||||||
primaryParseAttemptFailed = true;
|
|
||||||
// jsonToParse is already originalTrimmedResponse if simple parse failed before modifying it for markdown
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Attempt 2: Advanced Extraction (if simple parse failed or produced wrong structure) ---
|
|
||||||
if (primaryParseAttemptFailed || !parsedObject) {
|
|
||||||
// Ensure we try advanced if simple parse gave wrong structure
|
|
||||||
logger.debug('Attempting advanced extraction logic...');
|
|
||||||
// Reset jsonToParse to the original full trimmed response for advanced logic
|
|
||||||
jsonToParse = originalTrimmedResponse;
|
|
||||||
|
|
||||||
// (Insert the more complex extraction logic here - the one we worked on with:
|
|
||||||
// - targetPattern = '{"subtasks":';
|
|
||||||
// - careful brace counting for that targetPattern
|
|
||||||
// - fallbacks to last '{' and '}' if targetPattern logic fails)
|
|
||||||
// This was the logic from my previous message. Let's assume it's here.
|
|
||||||
// This block should ultimately set `jsonToParse` to the best candidate string.
|
|
||||||
|
|
||||||
// Example snippet of that advanced logic's start:
|
|
||||||
const targetPattern = '{"subtasks":';
|
|
||||||
const patternStartIndex = jsonToParse.indexOf(targetPattern);
|
|
||||||
|
|
||||||
if (patternStartIndex !== -1) {
|
|
||||||
const openBraces = 0;
|
|
||||||
const firstBraceFound = false;
|
|
||||||
const extractedJsonBlock = '';
|
|
||||||
// ... (loop for brace counting as before) ...
|
|
||||||
// ... (if successful, jsonToParse = extractedJsonBlock) ...
|
|
||||||
// ... (if that fails, fallbacks as before) ...
|
|
||||||
} else {
|
|
||||||
// ... (fallback to last '{' and '}' if targetPattern not found) ...
|
|
||||||
}
|
|
||||||
// End of advanced logic excerpt
|
|
||||||
|
|
||||||
logger.debug(
|
|
||||||
`Advanced extraction: JSON string that will be parsed: ${jsonToParse.substring(0, 500)}...`
|
|
||||||
);
|
|
||||||
try {
|
|
||||||
parsedObject = JSON.parse(jsonToParse);
|
|
||||||
logger.debug('Advanced extraction parse successful!');
|
|
||||||
} catch (parseError) {
|
|
||||||
logger.error(
|
|
||||||
`Advanced extraction: Failed to parse JSON object: ${parseError.message}`
|
|
||||||
);
|
|
||||||
logger.error(
|
|
||||||
`Advanced extraction: Problematic JSON string for parse (first 500 chars): ${jsonToParse.substring(0, 500)}`
|
|
||||||
);
|
|
||||||
throw new Error(
|
|
||||||
// Re-throw a more specific error if advanced also fails
|
|
||||||
`Failed to parse JSON response object after both simple and advanced attempts: ${parseError.message}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Validation (applies to successfully parsedObject from either attempt) ---
|
|
||||||
if (
|
|
||||||
!parsedObject ||
|
|
||||||
typeof parsedObject !== 'object' ||
|
|
||||||
!Array.isArray(parsedObject.subtasks)
|
|
||||||
) {
|
|
||||||
logger.error(
|
|
||||||
`Final parsed content is not an object or missing 'subtasks' array. Content: ${JSON.stringify(parsedObject).substring(0, 200)}`
|
|
||||||
);
|
|
||||||
throw new Error(
|
|
||||||
'Parsed AI response is not a valid object containing a "subtasks" array after all attempts.'
|
|
||||||
);
|
|
||||||
}
|
|
||||||
const parsedSubtasks = parsedObject.subtasks;
|
|
||||||
|
|
||||||
if (expectedCount && parsedSubtasks.length !== expectedCount) {
|
|
||||||
logger.warn(
|
|
||||||
`Expected ${expectedCount} subtasks, but parsed ${parsedSubtasks.length}.`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let currentId = startId;
|
|
||||||
const validatedSubtasks = [];
|
|
||||||
const validationErrors = [];
|
|
||||||
|
|
||||||
for (const rawSubtask of parsedSubtasks) {
|
|
||||||
const correctedSubtask = {
|
|
||||||
...rawSubtask,
|
|
||||||
id: currentId,
|
|
||||||
dependencies: Array.isArray(rawSubtask.dependencies)
|
|
||||||
? rawSubtask.dependencies.filter(
|
|
||||||
(dep) =>
|
|
||||||
typeof dep === 'string' && dep.startsWith(`${parentTaskId}.`)
|
|
||||||
)
|
|
||||||
: [],
|
|
||||||
status: 'pending'
|
|
||||||
};
|
|
||||||
|
|
||||||
const result = subtaskSchema.safeParse(correctedSubtask);
|
|
||||||
|
|
||||||
if (result.success) {
|
|
||||||
validatedSubtasks.push(result.data);
|
|
||||||
} else {
|
|
||||||
logger.warn(
|
|
||||||
`Subtask validation failed for raw data: ${JSON.stringify(rawSubtask).substring(0, 100)}...`
|
|
||||||
);
|
|
||||||
result.error.errors.forEach((err) => {
|
|
||||||
const errorMessage = ` - Field '${err.path.join('.')}': ${err.message}`;
|
|
||||||
logger.warn(errorMessage);
|
|
||||||
validationErrors.push(`Subtask ${currentId}: ${errorMessage}`);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
currentId++;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (validationErrors.length > 0) {
|
|
||||||
logger.error(
|
|
||||||
`Found ${validationErrors.length} validation errors in the generated subtasks.`
|
|
||||||
);
|
|
||||||
logger.warn('Proceeding with only the successfully validated subtasks.');
|
|
||||||
}
|
|
||||||
|
|
||||||
if (validatedSubtasks.length === 0 && parsedSubtasks.length > 0) {
|
|
||||||
throw new Error(
|
|
||||||
'AI response contained potential subtasks, but none passed validation.'
|
|
||||||
);
|
|
||||||
}
|
|
||||||
return validatedSubtasks.slice(0, expectedCount || validatedSubtasks.length);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Expand a task into subtasks using the unified AI service (generateTextService).
|
|
||||||
* Appends new subtasks by default. Replaces existing subtasks if force=true.
|
* Appends new subtasks by default. Replaces existing subtasks if force=true.
|
||||||
* Integrates complexity report to determine subtask count and prompt if available,
|
* Integrates complexity report to determine subtask count and prompt if available,
|
||||||
* unless numSubtasks is explicitly provided.
|
* unless numSubtasks is explicitly provided.
|
||||||
@@ -450,6 +197,10 @@ async function expandTask(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Determine prompt content AND system prompt
|
// Determine prompt content AND system prompt
|
||||||
|
// Calculate the next subtask ID to match current behavior:
|
||||||
|
// - Start from the number of existing subtasks + 1
|
||||||
|
// - This creates sequential IDs: 1, 2, 3, 4...
|
||||||
|
// - Display format shows as parentTaskId.subtaskId (e.g., "1.1", "1.2", "2.1")
|
||||||
const nextSubtaskId = (task.subtasks?.length || 0) + 1;
|
const nextSubtaskId = (task.subtasks?.length || 0) + 1;
|
||||||
|
|
||||||
// Load prompts using PromptManager
|
// Load prompts using PromptManager
|
||||||
@@ -510,7 +261,6 @@ async function expandTask(
|
|||||||
hasCodebaseAnalysis: hasCodebaseAnalysisCapability,
|
hasCodebaseAnalysis: hasCodebaseAnalysisCapability,
|
||||||
projectRoot: projectRoot || ''
|
projectRoot: projectRoot || ''
|
||||||
};
|
};
|
||||||
|
|
||||||
let variantKey = 'default';
|
let variantKey = 'default';
|
||||||
if (expansionPromptText) {
|
if (expansionPromptText) {
|
||||||
variantKey = 'complexity-report';
|
variantKey = 'complexity-report';
|
||||||
@@ -540,7 +290,7 @@ async function expandTask(
|
|||||||
);
|
);
|
||||||
// --- End Complexity Report / Prompt Logic ---
|
// --- End Complexity Report / Prompt Logic ---
|
||||||
|
|
||||||
// --- AI Subtask Generation using generateTextService ---
|
// --- AI Subtask Generation using generateObjectService ---
|
||||||
let generatedSubtasks = [];
|
let generatedSubtasks = [];
|
||||||
let loadingIndicator = null;
|
let loadingIndicator = null;
|
||||||
if (outputFormat === 'text') {
|
if (outputFormat === 'text') {
|
||||||
@@ -549,48 +299,36 @@ async function expandTask(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let responseText = '';
|
|
||||||
let aiServiceResponse = null;
|
let aiServiceResponse = null;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const role = useResearch ? 'research' : 'main';
|
const role = useResearch ? 'research' : 'main';
|
||||||
|
|
||||||
// Call generateTextService with the determined prompts and telemetry params
|
// Call generateObjectService with the determined prompts and telemetry params
|
||||||
aiServiceResponse = await generateTextService({
|
aiServiceResponse = await generateObjectService({
|
||||||
prompt: promptContent,
|
prompt: promptContent,
|
||||||
systemPrompt: systemPrompt,
|
systemPrompt: systemPrompt,
|
||||||
role,
|
role,
|
||||||
session,
|
session,
|
||||||
projectRoot,
|
projectRoot,
|
||||||
|
schema: COMMAND_SCHEMAS['expand-task'],
|
||||||
|
objectName: 'subtasks',
|
||||||
commandName: 'expand-task',
|
commandName: 'expand-task',
|
||||||
outputType: outputFormat
|
outputType: outputFormat
|
||||||
});
|
});
|
||||||
responseText = aiServiceResponse.mainResult;
|
|
||||||
|
|
||||||
// Parse Subtasks
|
// With generateObject, we expect structured data – verify it before use
|
||||||
generatedSubtasks = parseSubtasksFromText(
|
const mainResult = aiServiceResponse?.mainResult;
|
||||||
responseText,
|
if (!mainResult || !Array.isArray(mainResult.subtasks)) {
|
||||||
nextSubtaskId,
|
throw new Error('AI response did not include a valid subtasks array.');
|
||||||
finalSubtaskCount,
|
}
|
||||||
task.id,
|
generatedSubtasks = mainResult.subtasks;
|
||||||
logger
|
logger.info(`Received ${generatedSubtasks.length} subtasks from AI.`);
|
||||||
);
|
|
||||||
logger.info(
|
|
||||||
`Successfully parsed ${generatedSubtasks.length} subtasks from AI response.`
|
|
||||||
);
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
||||||
logger.error(
|
logger.error(
|
||||||
`Error during AI call or parsing for task ${taskId}: ${error.message}`, // Added task ID context
|
`Error during AI call or parsing for task ${taskId}: ${error.message}`, // Added task ID context
|
||||||
'error'
|
'error'
|
||||||
);
|
);
|
||||||
// Log raw response in debug mode if parsing failed
|
|
||||||
if (
|
|
||||||
error.message.includes('Failed to parse valid subtasks') &&
|
|
||||||
getDebugFlag(session)
|
|
||||||
) {
|
|
||||||
logger.error(`Raw AI Response that failed parsing:\n${responseText}`);
|
|
||||||
}
|
|
||||||
throw error;
|
throw error;
|
||||||
} finally {
|
} finally {
|
||||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
||||||
|
|||||||
@@ -539,6 +539,22 @@ async function setModel(role, modelId, options = {}) {
|
|||||||
warningMessage = `Warning: Gemini CLI model '${modelId}' not found in supported models. Setting without validation.`;
|
warningMessage = `Warning: Gemini CLI model '${modelId}' not found in supported models. Setting without validation.`;
|
||||||
report('warn', warningMessage);
|
report('warn', warningMessage);
|
||||||
}
|
}
|
||||||
|
} else if (providerHint === CUSTOM_PROVIDERS.CODEX_CLI) {
|
||||||
|
// Codex CLI provider - enforce supported model list
|
||||||
|
determinedProvider = CUSTOM_PROVIDERS.CODEX_CLI;
|
||||||
|
const codexCliModels = availableModels.filter(
|
||||||
|
(m) => m.provider === 'codex-cli'
|
||||||
|
);
|
||||||
|
const codexCliModelData = codexCliModels.find(
|
||||||
|
(m) => m.id === modelId
|
||||||
|
);
|
||||||
|
if (codexCliModelData) {
|
||||||
|
modelData = codexCliModelData;
|
||||||
|
report('info', `Setting Codex CLI model '${modelId}'.`);
|
||||||
|
} else {
|
||||||
|
warningMessage = `Warning: Codex CLI model '${modelId}' not found in supported models. Setting without validation.`;
|
||||||
|
report('warn', warningMessage);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// Invalid provider hint - should not happen with our constants
|
// Invalid provider hint - should not happen with our constants
|
||||||
throw new Error(`Invalid provider hint received: ${providerHint}`);
|
throw new Error(`Invalid provider hint received: ${providerHint}`);
|
||||||
@@ -559,7 +575,7 @@ async function setModel(role, modelId, options = {}) {
|
|||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'MODEL_NOT_FOUND_NO_HINT',
|
code: 'MODEL_NOT_FOUND_NO_HINT',
|
||||||
message: `Model ID "${modelId}" not found in Taskmaster's supported models. If this is a custom model, please specify the provider using --openrouter, --ollama, --bedrock, --azure, or --vertex.`
|
message: `Model ID "${modelId}" not found in Taskmaster's supported models. If this is a custom model, please specify the provider using --openrouter, --ollama, --bedrock, --azure, --vertex, --gemini-cli, or --codex-cli.`
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ import {
|
|||||||
setTasksForTag,
|
setTasksForTag,
|
||||||
traverseDependencies
|
traverseDependencies
|
||||||
} from '../utils.js';
|
} from '../utils.js';
|
||||||
import generateTaskFiles from './generate-task-files.js';
|
|
||||||
import {
|
import {
|
||||||
findCrossTagDependencies,
|
findCrossTagDependencies,
|
||||||
getDependentTaskIds,
|
getDependentTaskIds,
|
||||||
@@ -142,13 +141,7 @@ async function moveTask(
|
|||||||
results.push(result);
|
results.push(result);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate files once at the end if requested
|
// Note: Task file generation is no longer supported and has been removed
|
||||||
if (generateFiles) {
|
|
||||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath), {
|
|
||||||
tag: tag,
|
|
||||||
projectRoot: projectRoot
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
message: `Successfully moved ${sourceIds.length} tasks/subtasks`,
|
message: `Successfully moved ${sourceIds.length} tasks/subtasks`,
|
||||||
@@ -209,12 +202,7 @@ async function moveTask(
|
|||||||
// The writeJSON function will filter out _rawTaggedData automatically
|
// The writeJSON function will filter out _rawTaggedData automatically
|
||||||
writeJSON(tasksPath, rawData, options.projectRoot, tag);
|
writeJSON(tasksPath, rawData, options.projectRoot, tag);
|
||||||
|
|
||||||
if (generateFiles) {
|
// Note: Task file generation is no longer supported and has been removed
|
||||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath), {
|
|
||||||
tag: tag,
|
|
||||||
projectRoot: projectRoot
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,4 @@
|
|||||||
import path from 'path';
|
|
||||||
import { log, readJSON, writeJSON } from '../utils.js';
|
import { log, readJSON, writeJSON } from '../utils.js';
|
||||||
import generateTaskFiles from './generate-task-files.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Remove a subtask from its parent task
|
* Remove a subtask from its parent task
|
||||||
@@ -108,11 +106,7 @@ async function removeSubtask(
|
|||||||
// Write the updated tasks back to the file with proper context
|
// Write the updated tasks back to the file with proper context
|
||||||
writeJSON(tasksPath, data, projectRoot, tag);
|
writeJSON(tasksPath, data, projectRoot, tag);
|
||||||
|
|
||||||
// Generate task files if requested
|
// Note: Task file generation is no longer supported and has been removed
|
||||||
if (generateFiles) {
|
|
||||||
log('info', 'Regenerating task files...');
|
|
||||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath), context);
|
|
||||||
}
|
|
||||||
|
|
||||||
return convertedTask;
|
return convertedTask;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
|||||||
@@ -355,7 +355,7 @@ Ensure the JSON is valid and properly formatted.`;
|
|||||||
const subtaskSchema = z.object({
|
const subtaskSchema = z.object({
|
||||||
subtasks: z.array(
|
subtasks: z.array(
|
||||||
z.object({
|
z.object({
|
||||||
id: z.number().int().positive(),
|
id: z.int().positive(),
|
||||||
title: z.string().min(5),
|
title: z.string().min(5),
|
||||||
description: z.string().min(10),
|
description: z.string().min(10),
|
||||||
dependencies: z.array(z.string()),
|
dependencies: z.array(z.string()),
|
||||||
@@ -386,14 +386,44 @@ Ensure the JSON is valid and properly formatted.`;
|
|||||||
testStrategy: subtask.testStrategy || ''
|
testStrategy: subtask.testStrategy || ''
|
||||||
}));
|
}));
|
||||||
|
|
||||||
|
// Ensure new subtasks have unique sequential IDs after the preserved ones
|
||||||
|
const maxPreservedId = preservedSubtasks.reduce(
|
||||||
|
(max, st) => Math.max(max, st.id || 0),
|
||||||
|
0
|
||||||
|
);
|
||||||
|
let nextId = maxPreservedId + 1;
|
||||||
|
const idMapping = new Map();
|
||||||
|
const normalizedGeneratedSubtasks = processedGeneratedSubtasks
|
||||||
|
.map((st) => {
|
||||||
|
const originalId = st.id;
|
||||||
|
const newId = nextId++;
|
||||||
|
idMapping.set(originalId, newId);
|
||||||
|
return {
|
||||||
|
...st,
|
||||||
|
id: newId
|
||||||
|
};
|
||||||
|
})
|
||||||
|
.map((st) => ({
|
||||||
|
...st,
|
||||||
|
dependencies: (st.dependencies || []).map((dep) => {
|
||||||
|
if (typeof dep !== 'string' || !dep.startsWith(`${task.id}.`)) {
|
||||||
|
return dep;
|
||||||
|
}
|
||||||
|
const [, siblingIdPart] = dep.split('.');
|
||||||
|
const originalSiblingId = Number.parseInt(siblingIdPart, 10);
|
||||||
|
const remappedSiblingId = idMapping.get(originalSiblingId);
|
||||||
|
return remappedSiblingId ? `${task.id}.${remappedSiblingId}` : dep;
|
||||||
|
})
|
||||||
|
}));
|
||||||
|
|
||||||
// Update task with preserved subtasks + newly generated ones
|
// Update task with preserved subtasks + newly generated ones
|
||||||
task.subtasks = [...preservedSubtasks, ...processedGeneratedSubtasks];
|
task.subtasks = [...preservedSubtasks, ...normalizedGeneratedSubtasks];
|
||||||
|
|
||||||
return {
|
return {
|
||||||
updatedTask: task,
|
updatedTask: task,
|
||||||
regenerated: true,
|
regenerated: true,
|
||||||
preserved: preservedSubtasks.length,
|
preserved: preservedSubtasks.length,
|
||||||
generated: processedGeneratedSubtasks.length
|
generated: normalizedGeneratedSubtasks.length
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log(
|
log(
|
||||||
|
|||||||
@@ -619,9 +619,29 @@ async function tags(
|
|||||||
headers.push(chalk.cyan.bold('Description'));
|
headers.push(chalk.cyan.bold('Description'));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Calculate dynamic column widths based on terminal width
|
||||||
|
const terminalWidth = Math.max(process.stdout.columns || 120, 80);
|
||||||
|
const usableWidth = Math.floor(terminalWidth * 0.95);
|
||||||
|
|
||||||
|
let colWidths;
|
||||||
|
if (showMetadata) {
|
||||||
|
// With metadata: Tag Name, Tasks, Completed, Created, Description
|
||||||
|
const widths = [0.25, 0.1, 0.12, 0.15, 0.38];
|
||||||
|
colWidths = widths.map((w, i) =>
|
||||||
|
Math.max(Math.floor(usableWidth * w), i === 0 ? 15 : 8)
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
// Without metadata: Tag Name, Tasks, Completed
|
||||||
|
const widths = [0.7, 0.15, 0.15];
|
||||||
|
colWidths = widths.map((w, i) =>
|
||||||
|
Math.max(Math.floor(usableWidth * w), i === 0 ? 20 : 10)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
const table = new Table({
|
const table = new Table({
|
||||||
head: headers,
|
head: headers,
|
||||||
colWidths: showMetadata ? [20, 10, 12, 15, 50] : [25, 10, 12]
|
colWidths: colWidths,
|
||||||
|
wordWrap: true
|
||||||
});
|
});
|
||||||
|
|
||||||
// Add rows
|
// Add rows
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ import path from 'path';
|
|||||||
import chalk from 'chalk';
|
import chalk from 'chalk';
|
||||||
import boxen from 'boxen';
|
import boxen from 'boxen';
|
||||||
import Table from 'cli-table3';
|
import Table from 'cli-table3';
|
||||||
import { z } from 'zod'; // Keep Zod for post-parse validation
|
|
||||||
|
|
||||||
import {
|
import {
|
||||||
log as consoleLog,
|
log as consoleLog,
|
||||||
@@ -22,7 +21,11 @@ import {
|
|||||||
displayAiUsageSummary
|
displayAiUsageSummary
|
||||||
} from '../ui.js';
|
} from '../ui.js';
|
||||||
|
|
||||||
import { generateTextService } from '../ai-services-unified.js';
|
import {
|
||||||
|
generateTextService,
|
||||||
|
generateObjectService
|
||||||
|
} from '../ai-services-unified.js';
|
||||||
|
import { COMMAND_SCHEMAS } from '../../../src/schemas/registry.js';
|
||||||
import {
|
import {
|
||||||
getDebugFlag,
|
getDebugFlag,
|
||||||
isApiKeySet,
|
isApiKeySet,
|
||||||
@@ -32,229 +35,6 @@ import { getPromptManager } from '../prompt-manager.js';
|
|||||||
import { ContextGatherer } from '../utils/contextGatherer.js';
|
import { ContextGatherer } from '../utils/contextGatherer.js';
|
||||||
import { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';
|
import { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';
|
||||||
|
|
||||||
// Zod schema for post-parsing validation of the updated task object
|
|
||||||
const updatedTaskSchema = z
|
|
||||||
.object({
|
|
||||||
id: z.number().int(),
|
|
||||||
title: z.string(), // Title should be preserved, but check it exists
|
|
||||||
description: z.string(),
|
|
||||||
status: z.string(),
|
|
||||||
dependencies: z.array(z.union([z.number().int(), z.string()])),
|
|
||||||
priority: z.string().nullable().default('medium'),
|
|
||||||
details: z.string().nullable().default(''),
|
|
||||||
testStrategy: z.string().nullable().default(''),
|
|
||||||
subtasks: z
|
|
||||||
.array(
|
|
||||||
z.object({
|
|
||||||
id: z
|
|
||||||
.number()
|
|
||||||
.int()
|
|
||||||
.positive()
|
|
||||||
.describe('Sequential subtask ID starting from 1'),
|
|
||||||
title: z.string(),
|
|
||||||
description: z.string(),
|
|
||||||
status: z.string(),
|
|
||||||
dependencies: z.array(z.number().int()).nullable().default([]),
|
|
||||||
details: z.string().nullable().default(''),
|
|
||||||
testStrategy: z.string().nullable().default('')
|
|
||||||
})
|
|
||||||
)
|
|
||||||
.nullable()
|
|
||||||
.default([])
|
|
||||||
})
|
|
||||||
.strip(); // Allows parsing even if AI adds extra fields, but validation focuses on schema
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Parses a single updated task object from AI's text response.
|
|
||||||
* @param {string} text - Response text from AI.
|
|
||||||
* @param {number} expectedTaskId - The ID of the task expected.
|
|
||||||
* @param {Function | Object} logFn - Logging function or MCP logger.
|
|
||||||
* @param {boolean} isMCP - Flag indicating MCP context.
|
|
||||||
* @returns {Object} Parsed and validated task object.
|
|
||||||
* @throws {Error} If parsing or validation fails.
|
|
||||||
*/
|
|
||||||
function parseUpdatedTaskFromText(text, expectedTaskId, logFn, isMCP) {
|
|
||||||
// Report helper consistent with the established pattern
|
|
||||||
const report = (level, ...args) => {
|
|
||||||
if (isMCP) {
|
|
||||||
if (typeof logFn[level] === 'function') logFn[level](...args);
|
|
||||||
else logFn.info(...args);
|
|
||||||
} else if (!isSilentMode()) {
|
|
||||||
logFn(level, ...args);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
report(
|
|
||||||
'info',
|
|
||||||
'Attempting to parse updated task object from text response...'
|
|
||||||
);
|
|
||||||
if (!text || text.trim() === '')
|
|
||||||
throw new Error('AI response text is empty.');
|
|
||||||
|
|
||||||
let cleanedResponse = text.trim();
|
|
||||||
const originalResponseForDebug = cleanedResponse;
|
|
||||||
let parseMethodUsed = 'raw'; // Keep track of which method worked
|
|
||||||
|
|
||||||
// --- NEW Step 1: Try extracting between {} first ---
|
|
||||||
const firstBraceIndex = cleanedResponse.indexOf('{');
|
|
||||||
const lastBraceIndex = cleanedResponse.lastIndexOf('}');
|
|
||||||
let potentialJsonFromBraces = null;
|
|
||||||
|
|
||||||
if (firstBraceIndex !== -1 && lastBraceIndex > firstBraceIndex) {
|
|
||||||
potentialJsonFromBraces = cleanedResponse.substring(
|
|
||||||
firstBraceIndex,
|
|
||||||
lastBraceIndex + 1
|
|
||||||
);
|
|
||||||
if (potentialJsonFromBraces.length <= 2) {
|
|
||||||
potentialJsonFromBraces = null; // Ignore empty braces {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If {} extraction yielded something, try parsing it immediately
|
|
||||||
if (potentialJsonFromBraces) {
|
|
||||||
try {
|
|
||||||
const testParse = JSON.parse(potentialJsonFromBraces);
|
|
||||||
// It worked! Use this as the primary cleaned response.
|
|
||||||
cleanedResponse = potentialJsonFromBraces;
|
|
||||||
parseMethodUsed = 'braces';
|
|
||||||
} catch (e) {
|
|
||||||
report(
|
|
||||||
'info',
|
|
||||||
'Content between {} looked promising but failed initial parse. Proceeding to other methods.'
|
|
||||||
);
|
|
||||||
// Reset cleanedResponse to original if brace parsing failed
|
|
||||||
cleanedResponse = originalResponseForDebug;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Step 2: If brace parsing didn't work or wasn't applicable, try code block extraction ---
|
|
||||||
if (parseMethodUsed === 'raw') {
|
|
||||||
const codeBlockMatch = cleanedResponse.match(
|
|
||||||
/```(?:json|javascript)?\s*([\s\S]*?)\s*```/i
|
|
||||||
);
|
|
||||||
if (codeBlockMatch) {
|
|
||||||
cleanedResponse = codeBlockMatch[1].trim();
|
|
||||||
parseMethodUsed = 'codeblock';
|
|
||||||
report('info', 'Extracted JSON content from Markdown code block.');
|
|
||||||
} else {
|
|
||||||
// --- Step 3: If code block failed, try stripping prefixes ---
|
|
||||||
const commonPrefixes = [
|
|
||||||
'json\n',
|
|
||||||
'javascript\n'
|
|
||||||
// ... other prefixes ...
|
|
||||||
];
|
|
||||||
let prefixFound = false;
|
|
||||||
for (const prefix of commonPrefixes) {
|
|
||||||
if (cleanedResponse.toLowerCase().startsWith(prefix)) {
|
|
||||||
cleanedResponse = cleanedResponse.substring(prefix.length).trim();
|
|
||||||
parseMethodUsed = 'prefix';
|
|
||||||
report('info', `Stripped prefix: "${prefix.trim()}"`);
|
|
||||||
prefixFound = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!prefixFound) {
|
|
||||||
report(
|
|
||||||
'warn',
|
|
||||||
'Response does not appear to contain {}, code block, or known prefix. Attempting raw parse.'
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Step 4: Attempt final parse ---
|
|
||||||
let parsedTask;
|
|
||||||
try {
|
|
||||||
parsedTask = JSON.parse(cleanedResponse);
|
|
||||||
} catch (parseError) {
|
|
||||||
report('error', `Failed to parse JSON object: ${parseError.message}`);
|
|
||||||
report(
|
|
||||||
'error',
|
|
||||||
`Problematic JSON string (first 500 chars): ${cleanedResponse.substring(0, 500)}`
|
|
||||||
);
|
|
||||||
report(
|
|
||||||
'error',
|
|
||||||
`Original Raw Response (first 500 chars): ${originalResponseForDebug.substring(0, 500)}`
|
|
||||||
);
|
|
||||||
throw new Error(
|
|
||||||
`Failed to parse JSON response object: ${parseError.message}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!parsedTask || typeof parsedTask !== 'object') {
|
|
||||||
report(
|
|
||||||
'error',
|
|
||||||
`Parsed content is not an object. Type: ${typeof parsedTask}`
|
|
||||||
);
|
|
||||||
report(
|
|
||||||
'error',
|
|
||||||
`Parsed content sample: ${JSON.stringify(parsedTask).substring(0, 200)}`
|
|
||||||
);
|
|
||||||
throw new Error('Parsed AI response is not a valid JSON object.');
|
|
||||||
}
|
|
||||||
|
|
||||||
// Preprocess the task to ensure subtasks have proper structure
|
|
||||||
const preprocessedTask = {
|
|
||||||
...parsedTask,
|
|
||||||
status: parsedTask.status || 'pending',
|
|
||||||
dependencies: Array.isArray(parsedTask.dependencies)
|
|
||||||
? parsedTask.dependencies
|
|
||||||
: [],
|
|
||||||
details:
|
|
||||||
typeof parsedTask.details === 'string'
|
|
||||||
? parsedTask.details
|
|
||||||
: String(parsedTask.details || ''),
|
|
||||||
testStrategy:
|
|
||||||
typeof parsedTask.testStrategy === 'string'
|
|
||||||
? parsedTask.testStrategy
|
|
||||||
: String(parsedTask.testStrategy || ''),
|
|
||||||
// Ensure subtasks is an array and each subtask has required fields
|
|
||||||
subtasks: Array.isArray(parsedTask.subtasks)
|
|
||||||
? parsedTask.subtasks.map((subtask) => ({
|
|
||||||
...subtask,
|
|
||||||
title: subtask.title || '',
|
|
||||||
description: subtask.description || '',
|
|
||||||
status: subtask.status || 'pending',
|
|
||||||
dependencies: Array.isArray(subtask.dependencies)
|
|
||||||
? subtask.dependencies
|
|
||||||
: [],
|
|
||||||
details:
|
|
||||||
typeof subtask.details === 'string'
|
|
||||||
? subtask.details
|
|
||||||
: String(subtask.details || ''),
|
|
||||||
testStrategy:
|
|
||||||
typeof subtask.testStrategy === 'string'
|
|
||||||
? subtask.testStrategy
|
|
||||||
: String(subtask.testStrategy || '')
|
|
||||||
}))
|
|
||||||
: []
|
|
||||||
};
|
|
||||||
|
|
||||||
// Validate the parsed task object using Zod
|
|
||||||
const validationResult = updatedTaskSchema.safeParse(preprocessedTask);
|
|
||||||
if (!validationResult.success) {
|
|
||||||
report('error', 'Parsed task object failed Zod validation.');
|
|
||||||
validationResult.error.errors.forEach((err) => {
|
|
||||||
report('error', ` - Field '${err.path.join('.')}': ${err.message}`);
|
|
||||||
});
|
|
||||||
throw new Error(
|
|
||||||
`AI response failed task structure validation: ${validationResult.error.message}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Final check: ensure ID matches expected ID (AI might hallucinate)
|
|
||||||
if (validationResult.data.id !== expectedTaskId) {
|
|
||||||
report(
|
|
||||||
'warn',
|
|
||||||
`AI returned task with ID ${validationResult.data.id}, but expected ${expectedTaskId}. Overwriting ID.`
|
|
||||||
);
|
|
||||||
validationResult.data.id = expectedTaskId; // Enforce correct ID
|
|
||||||
}
|
|
||||||
|
|
||||||
report('info', 'Successfully validated updated task structure.');
|
|
||||||
return validationResult.data; // Return the validated task data
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Update a task by ID with new information using the unified AI service.
|
* Update a task by ID with new information using the unified AI service.
|
||||||
* @param {string} tasksPath - Path to the tasks.json file
|
* @param {string} tasksPath - Path to the tasks.json file
|
||||||
@@ -522,15 +302,32 @@ async function updateTaskById(
|
|||||||
|
|
||||||
try {
|
try {
|
||||||
const serviceRole = useResearch ? 'research' : 'main';
|
const serviceRole = useResearch ? 'research' : 'main';
|
||||||
aiServiceResponse = await generateTextService({
|
|
||||||
role: serviceRole,
|
if (appendMode) {
|
||||||
session: session,
|
// Append mode still uses generateTextService since it returns plain text
|
||||||
projectRoot: projectRoot,
|
aiServiceResponse = await generateTextService({
|
||||||
systemPrompt: systemPrompt,
|
role: serviceRole,
|
||||||
prompt: userPrompt,
|
session: session,
|
||||||
commandName: 'update-task',
|
projectRoot: projectRoot,
|
||||||
outputType: isMCP ? 'mcp' : 'cli'
|
systemPrompt: systemPrompt,
|
||||||
});
|
prompt: userPrompt,
|
||||||
|
commandName: 'update-task',
|
||||||
|
outputType: isMCP ? 'mcp' : 'cli'
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
// Full update mode uses generateObjectService for structured output
|
||||||
|
aiServiceResponse = await generateObjectService({
|
||||||
|
role: serviceRole,
|
||||||
|
session: session,
|
||||||
|
projectRoot: projectRoot,
|
||||||
|
systemPrompt: systemPrompt,
|
||||||
|
prompt: userPrompt,
|
||||||
|
schema: COMMAND_SCHEMAS['update-task-by-id'],
|
||||||
|
objectName: 'task',
|
||||||
|
commandName: 'update-task',
|
||||||
|
outputType: isMCP ? 'mcp' : 'cli'
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
if (loadingIndicator)
|
if (loadingIndicator)
|
||||||
stopLoadingIndicator(loadingIndicator, 'AI update complete.');
|
stopLoadingIndicator(loadingIndicator, 'AI update complete.');
|
||||||
@@ -600,13 +397,8 @@ async function updateTaskById(
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Full update mode: Use mainResult (text) for parsing
|
// Full update mode: Use structured data directly
|
||||||
const updatedTask = parseUpdatedTaskFromText(
|
const updatedTask = aiServiceResponse.mainResult.task;
|
||||||
aiServiceResponse.mainResult,
|
|
||||||
taskId,
|
|
||||||
logFn,
|
|
||||||
isMCP
|
|
||||||
);
|
|
||||||
|
|
||||||
// --- Task Validation/Correction (Keep existing logic) ---
|
// --- Task Validation/Correction (Keep existing logic) ---
|
||||||
if (!updatedTask || typeof updatedTask !== 'object')
|
if (!updatedTask || typeof updatedTask !== 'object')
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ import path from 'path';
|
|||||||
import chalk from 'chalk';
|
import chalk from 'chalk';
|
||||||
import boxen from 'boxen';
|
import boxen from 'boxen';
|
||||||
import Table from 'cli-table3';
|
import Table from 'cli-table3';
|
||||||
import { z } from 'zod'; // Keep Zod for post-parsing validation
|
|
||||||
|
|
||||||
import {
|
import {
|
||||||
log as consoleLog,
|
log as consoleLog,
|
||||||
@@ -22,258 +21,13 @@ import {
|
|||||||
import { getDebugFlag, hasCodebaseAnalysis } from '../config-manager.js';
|
import { getDebugFlag, hasCodebaseAnalysis } from '../config-manager.js';
|
||||||
import { getPromptManager } from '../prompt-manager.js';
|
import { getPromptManager } from '../prompt-manager.js';
|
||||||
import generateTaskFiles from './generate-task-files.js';
|
import generateTaskFiles from './generate-task-files.js';
|
||||||
import { generateTextService } from '../ai-services-unified.js';
|
import { generateObjectService } from '../ai-services-unified.js';
|
||||||
|
import { COMMAND_SCHEMAS } from '../../../src/schemas/registry.js';
|
||||||
import { getModelConfiguration } from './models.js';
|
import { getModelConfiguration } from './models.js';
|
||||||
import { ContextGatherer } from '../utils/contextGatherer.js';
|
import { ContextGatherer } from '../utils/contextGatherer.js';
|
||||||
import { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';
|
import { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';
|
||||||
import { flattenTasksWithSubtasks, findProjectRoot } from '../utils.js';
|
import { flattenTasksWithSubtasks, findProjectRoot } from '../utils.js';
|
||||||
|
|
||||||
// Zod schema for validating the structure of tasks AFTER parsing
|
|
||||||
const updatedTaskSchema = z
|
|
||||||
.object({
|
|
||||||
id: z.number().int(),
|
|
||||||
title: z.string(),
|
|
||||||
description: z.string(),
|
|
||||||
status: z.string(),
|
|
||||||
dependencies: z.array(z.union([z.number().int(), z.string()])),
|
|
||||||
priority: z.string().nullable(),
|
|
||||||
details: z.string().nullable(),
|
|
||||||
testStrategy: z.string().nullable(),
|
|
||||||
subtasks: z.array(z.any()).nullable() // Keep subtasks flexible for now
|
|
||||||
})
|
|
||||||
.strip(); // Allow potential extra fields during parsing if needed, then validate structure
|
|
||||||
|
|
||||||
// Preprocessing schema that adds defaults before validation
|
|
||||||
const preprocessTaskSchema = z.preprocess((task) => {
|
|
||||||
// Ensure task is an object
|
|
||||||
if (typeof task !== 'object' || task === null) {
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return task with defaults for missing fields
|
|
||||||
return {
|
|
||||||
...task,
|
|
||||||
// Add defaults for required fields if missing
|
|
||||||
id: task.id ?? 0,
|
|
||||||
title: task.title ?? 'Untitled Task',
|
|
||||||
description: task.description ?? '',
|
|
||||||
status: task.status ?? 'pending',
|
|
||||||
dependencies: Array.isArray(task.dependencies) ? task.dependencies : [],
|
|
||||||
// Optional fields - preserve undefined/null distinction
|
|
||||||
priority: task.hasOwnProperty('priority') ? task.priority : null,
|
|
||||||
details: task.hasOwnProperty('details') ? task.details : null,
|
|
||||||
testStrategy: task.hasOwnProperty('testStrategy')
|
|
||||||
? task.testStrategy
|
|
||||||
: null,
|
|
||||||
subtasks: Array.isArray(task.subtasks)
|
|
||||||
? task.subtasks
|
|
||||||
: task.subtasks === null
|
|
||||||
? null
|
|
||||||
: []
|
|
||||||
};
|
|
||||||
}, updatedTaskSchema);
|
|
||||||
|
|
||||||
const updatedTaskArraySchema = z.array(updatedTaskSchema);
|
|
||||||
const preprocessedTaskArraySchema = z.array(preprocessTaskSchema);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Parses an array of task objects from AI's text response.
|
|
||||||
* @param {string} text - Response text from AI.
|
|
||||||
* @param {number} expectedCount - Expected number of tasks.
|
|
||||||
* @param {Function | Object} logFn - The logging function or MCP log object.
|
|
||||||
* @param {boolean} isMCP - Flag indicating if logFn is MCP logger.
|
|
||||||
* @returns {Array} Parsed and validated tasks array.
|
|
||||||
* @throws {Error} If parsing or validation fails.
|
|
||||||
*/
|
|
||||||
function parseUpdatedTasksFromText(text, expectedCount, logFn, isMCP) {
|
|
||||||
const report = (level, ...args) => {
|
|
||||||
if (isMCP) {
|
|
||||||
if (typeof logFn[level] === 'function') logFn[level](...args);
|
|
||||||
else logFn.info(...args);
|
|
||||||
} else if (!isSilentMode()) {
|
|
||||||
// Check silent mode for consoleLog
|
|
||||||
consoleLog(level, ...args);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
report(
|
|
||||||
'info',
|
|
||||||
'Attempting to parse updated tasks array from text response...'
|
|
||||||
);
|
|
||||||
if (!text || text.trim() === '')
|
|
||||||
throw new Error('AI response text is empty.');
|
|
||||||
|
|
||||||
let cleanedResponse = text.trim();
|
|
||||||
const originalResponseForDebug = cleanedResponse;
|
|
||||||
let parseMethodUsed = 'raw'; // Track which method worked
|
|
||||||
|
|
||||||
// --- NEW Step 1: Try extracting between [] first ---
|
|
||||||
const firstBracketIndex = cleanedResponse.indexOf('[');
|
|
||||||
const lastBracketIndex = cleanedResponse.lastIndexOf(']');
|
|
||||||
let potentialJsonFromArray = null;
|
|
||||||
|
|
||||||
if (firstBracketIndex !== -1 && lastBracketIndex > firstBracketIndex) {
|
|
||||||
potentialJsonFromArray = cleanedResponse.substring(
|
|
||||||
firstBracketIndex,
|
|
||||||
lastBracketIndex + 1
|
|
||||||
);
|
|
||||||
// Basic check to ensure it's not just "[]" or malformed
|
|
||||||
if (potentialJsonFromArray.length <= 2) {
|
|
||||||
potentialJsonFromArray = null; // Ignore empty array
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If [] extraction yielded something, try parsing it immediately
|
|
||||||
if (potentialJsonFromArray) {
|
|
||||||
try {
|
|
||||||
const testParse = JSON.parse(potentialJsonFromArray);
|
|
||||||
// It worked! Use this as the primary cleaned response.
|
|
||||||
cleanedResponse = potentialJsonFromArray;
|
|
||||||
parseMethodUsed = 'brackets';
|
|
||||||
} catch (e) {
|
|
||||||
report(
|
|
||||||
'info',
|
|
||||||
'Content between [] looked promising but failed initial parse. Proceeding to other methods.'
|
|
||||||
);
|
|
||||||
// Reset cleanedResponse to original if bracket parsing failed
|
|
||||||
cleanedResponse = originalResponseForDebug;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Step 2: If bracket parsing didn't work or wasn't applicable, try code block extraction ---
|
|
||||||
if (parseMethodUsed === 'raw') {
|
|
||||||
// Only look for ```json blocks now
|
|
||||||
const codeBlockMatch = cleanedResponse.match(
|
|
||||||
/```json\s*([\s\S]*?)\s*```/i // Only match ```json
|
|
||||||
);
|
|
||||||
if (codeBlockMatch) {
|
|
||||||
cleanedResponse = codeBlockMatch[1].trim();
|
|
||||||
parseMethodUsed = 'codeblock';
|
|
||||||
report('info', 'Extracted JSON content from JSON Markdown code block.');
|
|
||||||
} else {
|
|
||||||
report('info', 'No JSON code block found.');
|
|
||||||
// --- Step 3: If code block failed, try stripping prefixes ---
|
|
||||||
const commonPrefixes = [
|
|
||||||
'json\n',
|
|
||||||
'javascript\n', // Keep checking common prefixes just in case
|
|
||||||
'python\n',
|
|
||||||
'here are the updated tasks:',
|
|
||||||
'here is the updated json:',
|
|
||||||
'updated tasks:',
|
|
||||||
'updated json:',
|
|
||||||
'response:',
|
|
||||||
'output:'
|
|
||||||
];
|
|
||||||
let prefixFound = false;
|
|
||||||
for (const prefix of commonPrefixes) {
|
|
||||||
if (cleanedResponse.toLowerCase().startsWith(prefix)) {
|
|
||||||
cleanedResponse = cleanedResponse.substring(prefix.length).trim();
|
|
||||||
parseMethodUsed = 'prefix';
|
|
||||||
report('info', `Stripped prefix: "${prefix.trim()}"`);
|
|
||||||
prefixFound = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!prefixFound) {
|
|
||||||
report(
|
|
||||||
'warn',
|
|
||||||
'Response does not appear to contain [], JSON code block, or known prefix. Attempting raw parse.'
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Step 4: Attempt final parse ---
|
|
||||||
let parsedTasks;
|
|
||||||
try {
|
|
||||||
parsedTasks = JSON.parse(cleanedResponse);
|
|
||||||
} catch (parseError) {
|
|
||||||
report('error', `Failed to parse JSON array: ${parseError.message}`);
|
|
||||||
report(
|
|
||||||
'error',
|
|
||||||
`Extraction method used: ${parseMethodUsed}` // Log which method failed
|
|
||||||
);
|
|
||||||
report(
|
|
||||||
'error',
|
|
||||||
`Problematic JSON string (first 500 chars): ${cleanedResponse.substring(0, 500)}`
|
|
||||||
);
|
|
||||||
report(
|
|
||||||
'error',
|
|
||||||
`Original Raw Response (first 500 chars): ${originalResponseForDebug.substring(0, 500)}`
|
|
||||||
);
|
|
||||||
throw new Error(
|
|
||||||
`Failed to parse JSON response array: ${parseError.message}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Step 5 & 6: Validate Array structure and Zod schema ---
|
|
||||||
if (!Array.isArray(parsedTasks)) {
|
|
||||||
report(
|
|
||||||
'error',
|
|
||||||
`Parsed content is not an array. Type: ${typeof parsedTasks}`
|
|
||||||
);
|
|
||||||
report(
|
|
||||||
'error',
|
|
||||||
`Parsed content sample: ${JSON.stringify(parsedTasks).substring(0, 200)}`
|
|
||||||
);
|
|
||||||
throw new Error('Parsed AI response is not a valid JSON array.');
|
|
||||||
}
|
|
||||||
|
|
||||||
report('info', `Successfully parsed ${parsedTasks.length} potential tasks.`);
|
|
||||||
if (expectedCount && parsedTasks.length !== expectedCount) {
|
|
||||||
report(
|
|
||||||
'warn',
|
|
||||||
`Expected ${expectedCount} tasks, but parsed ${parsedTasks.length}.`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Log missing fields for debugging before preprocessing
|
|
||||||
let hasWarnings = false;
|
|
||||||
parsedTasks.forEach((task, index) => {
|
|
||||||
const missingFields = [];
|
|
||||||
if (!task.hasOwnProperty('id')) missingFields.push('id');
|
|
||||||
if (!task.hasOwnProperty('status')) missingFields.push('status');
|
|
||||||
if (!task.hasOwnProperty('dependencies'))
|
|
||||||
missingFields.push('dependencies');
|
|
||||||
|
|
||||||
if (missingFields.length > 0) {
|
|
||||||
hasWarnings = true;
|
|
||||||
report(
|
|
||||||
'warn',
|
|
||||||
`Task ${index} is missing fields: ${missingFields.join(', ')} - will use defaults`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
if (hasWarnings) {
|
|
||||||
report(
|
|
||||||
'warn',
|
|
||||||
'Some tasks were missing required fields. Applying defaults...'
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use the preprocessing schema to add defaults and validate
|
|
||||||
const preprocessResult = preprocessedTaskArraySchema.safeParse(parsedTasks);
|
|
||||||
|
|
||||||
if (!preprocessResult.success) {
|
|
||||||
// This should rarely happen now since preprocessing adds defaults
|
|
||||||
report('error', 'Failed to validate task array even after preprocessing.');
|
|
||||||
preprocessResult.error.errors.forEach((err) => {
|
|
||||||
report('error', ` - Path '${err.path.join('.')}': ${err.message}`);
|
|
||||||
});
|
|
||||||
|
|
||||||
throw new Error(
|
|
||||||
`AI response failed validation: ${preprocessResult.error.message}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
report('info', 'Successfully validated and transformed task structure.');
|
|
||||||
return preprocessResult.data.slice(
|
|
||||||
0,
|
|
||||||
expectedCount || preprocessResult.data.length
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Update tasks based on new context using the unified AI service.
|
* Update tasks based on new context using the unified AI service.
|
||||||
* @param {string} tasksPath - Path to the tasks.json file
|
* @param {string} tasksPath - Path to the tasks.json file
|
||||||
@@ -458,13 +212,15 @@ async function updateTasks(
|
|||||||
// Determine role based on research flag
|
// Determine role based on research flag
|
||||||
const serviceRole = useResearch ? 'research' : 'main';
|
const serviceRole = useResearch ? 'research' : 'main';
|
||||||
|
|
||||||
// Call the unified AI service
|
// Call the unified AI service with generateObject
|
||||||
aiServiceResponse = await generateTextService({
|
aiServiceResponse = await generateObjectService({
|
||||||
role: serviceRole,
|
role: serviceRole,
|
||||||
session: session,
|
session: session,
|
||||||
projectRoot: projectRoot,
|
projectRoot: projectRoot,
|
||||||
systemPrompt: systemPrompt,
|
systemPrompt: systemPrompt,
|
||||||
prompt: userPrompt,
|
prompt: userPrompt,
|
||||||
|
schema: COMMAND_SCHEMAS['update-tasks'],
|
||||||
|
objectName: 'tasks',
|
||||||
commandName: 'update-tasks',
|
commandName: 'update-tasks',
|
||||||
outputType: isMCP ? 'mcp' : 'cli'
|
outputType: isMCP ? 'mcp' : 'cli'
|
||||||
});
|
});
|
||||||
@@ -472,13 +228,8 @@ async function updateTasks(
|
|||||||
if (loadingIndicator)
|
if (loadingIndicator)
|
||||||
stopLoadingIndicator(loadingIndicator, 'AI update complete.');
|
stopLoadingIndicator(loadingIndicator, 'AI update complete.');
|
||||||
|
|
||||||
// Use the mainResult (text) for parsing
|
// With generateObject, we get structured data directly
|
||||||
const parsedUpdatedTasks = parseUpdatedTasksFromText(
|
const parsedUpdatedTasks = aiServiceResponse.mainResult.tasks;
|
||||||
aiServiceResponse.mainResult,
|
|
||||||
tasksToUpdate.length,
|
|
||||||
logFn,
|
|
||||||
isMCP
|
|
||||||
);
|
|
||||||
|
|
||||||
// --- Update Tasks Data (Updated writeJSON call) ---
|
// --- Update Tasks Data (Updated writeJSON call) ---
|
||||||
if (!Array.isArray(parsedUpdatedTasks)) {
|
if (!Array.isArray(parsedUpdatedTasks)) {
|
||||||
|
|||||||
@@ -2310,7 +2310,8 @@ function displayAiUsageSummary(telemetryData, outputType = 'cli') {
|
|||||||
outputTokens,
|
outputTokens,
|
||||||
totalTokens,
|
totalTokens,
|
||||||
totalCost,
|
totalCost,
|
||||||
commandName
|
commandName,
|
||||||
|
isUnknownCost
|
||||||
} = telemetryData;
|
} = telemetryData;
|
||||||
|
|
||||||
let summary = chalk.bold.blue('AI Usage Summary:') + '\n';
|
let summary = chalk.bold.blue('AI Usage Summary:') + '\n';
|
||||||
@@ -2320,7 +2321,10 @@ function displayAiUsageSummary(telemetryData, outputType = 'cli') {
|
|||||||
summary += chalk.gray(
|
summary += chalk.gray(
|
||||||
` Tokens: ${totalTokens} (Input: ${inputTokens}, Output: ${outputTokens})\n`
|
` Tokens: ${totalTokens} (Input: ${inputTokens}, Output: ${outputTokens})\n`
|
||||||
);
|
);
|
||||||
summary += chalk.gray(` Est. Cost: $${totalCost.toFixed(6)}`);
|
|
||||||
|
// Show "Unknown" if pricing data is not available, otherwise show the cost
|
||||||
|
const costDisplay = isUnknownCost ? 'Unknown' : `$${totalCost.toFixed(6)}`;
|
||||||
|
summary += chalk.gray(` Est. Cost: ${costDisplay}`);
|
||||||
|
|
||||||
console.log(
|
console.log(
|
||||||
boxen(summary, {
|
boxen(summary, {
|
||||||
|
|||||||
@@ -21,6 +21,20 @@ export class BaseAIProvider {
|
|||||||
|
|
||||||
// Each provider must set their name
|
// Each provider must set their name
|
||||||
this.name = this.constructor.name;
|
this.name = this.constructor.name;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Whether this provider needs explicit schema in JSON mode
|
||||||
|
* Can be overridden by subclasses
|
||||||
|
* @type {boolean}
|
||||||
|
*/
|
||||||
|
this.needsExplicitJsonSchema = false;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Whether this provider supports temperature parameter
|
||||||
|
* Can be overridden by subclasses
|
||||||
|
* @type {boolean}
|
||||||
|
*/
|
||||||
|
this.supportsTemperature = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -126,16 +140,6 @@ export class BaseAIProvider {
|
|||||||
throw new Error('getRequiredApiKeyName must be implemented by provider');
|
throw new Error('getRequiredApiKeyName must be implemented by provider');
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Determines if a model requires max_completion_tokens instead of maxTokens
|
|
||||||
* Can be overridden by providers to specify their model requirements
|
|
||||||
* @param {string} modelId - The model ID to check
|
|
||||||
* @returns {boolean} True if the model requires max_completion_tokens
|
|
||||||
*/
|
|
||||||
requiresMaxCompletionTokens(modelId) {
|
|
||||||
return false; // Default behavior - most models use maxTokens
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Prepares token limit parameter based on model requirements
|
* Prepares token limit parameter based on model requirements
|
||||||
* @param {string} modelId - The model ID
|
* @param {string} modelId - The model ID
|
||||||
@@ -150,11 +154,7 @@ export class BaseAIProvider {
|
|||||||
// Ensure maxTokens is an integer
|
// Ensure maxTokens is an integer
|
||||||
const tokenValue = Math.floor(Number(maxTokens));
|
const tokenValue = Math.floor(Number(maxTokens));
|
||||||
|
|
||||||
if (this.requiresMaxCompletionTokens(modelId)) {
|
return { maxOutputTokens: tokenValue };
|
||||||
return { max_completion_tokens: tokenValue };
|
|
||||||
} else {
|
|
||||||
return { maxTokens: tokenValue };
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -175,7 +175,9 @@ export class BaseAIProvider {
|
|||||||
model: client(params.modelId),
|
model: client(params.modelId),
|
||||||
messages: params.messages,
|
messages: params.messages,
|
||||||
...this.prepareTokenParam(params.modelId, params.maxTokens),
|
...this.prepareTokenParam(params.modelId, params.maxTokens),
|
||||||
temperature: params.temperature
|
...(this.supportsTemperature && params.temperature !== undefined
|
||||||
|
? { temperature: params.temperature }
|
||||||
|
: {})
|
||||||
});
|
});
|
||||||
|
|
||||||
log(
|
log(
|
||||||
@@ -183,12 +185,19 @@ export class BaseAIProvider {
|
|||||||
`${this.name} generateText completed successfully for model: ${params.modelId}`
|
`${this.name} generateText completed successfully for model: ${params.modelId}`
|
||||||
);
|
);
|
||||||
|
|
||||||
|
const inputTokens =
|
||||||
|
result.usage?.inputTokens ?? result.usage?.promptTokens ?? 0;
|
||||||
|
const outputTokens =
|
||||||
|
result.usage?.outputTokens ?? result.usage?.completionTokens ?? 0;
|
||||||
|
const totalTokens =
|
||||||
|
result.usage?.totalTokens ?? inputTokens + outputTokens;
|
||||||
|
|
||||||
return {
|
return {
|
||||||
text: result.text,
|
text: result.text,
|
||||||
usage: {
|
usage: {
|
||||||
inputTokens: result.usage?.promptTokens,
|
inputTokens,
|
||||||
outputTokens: result.usage?.completionTokens,
|
outputTokens,
|
||||||
totalTokens: result.usage?.totalTokens
|
totalTokens
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
@@ -211,7 +220,9 @@ export class BaseAIProvider {
|
|||||||
model: client(params.modelId),
|
model: client(params.modelId),
|
||||||
messages: params.messages,
|
messages: params.messages,
|
||||||
...this.prepareTokenParam(params.modelId, params.maxTokens),
|
...this.prepareTokenParam(params.modelId, params.maxTokens),
|
||||||
temperature: params.temperature
|
...(this.supportsTemperature && params.temperature !== undefined
|
||||||
|
? { temperature: params.temperature }
|
||||||
|
: {})
|
||||||
});
|
});
|
||||||
|
|
||||||
log(
|
log(
|
||||||
@@ -248,8 +259,10 @@ export class BaseAIProvider {
|
|||||||
messages: params.messages,
|
messages: params.messages,
|
||||||
schema: zodSchema(params.schema),
|
schema: zodSchema(params.schema),
|
||||||
mode: params.mode || 'auto',
|
mode: params.mode || 'auto',
|
||||||
maxTokens: params.maxTokens,
|
maxOutputTokens: params.maxTokens,
|
||||||
temperature: params.temperature
|
...(this.supportsTemperature && params.temperature !== undefined
|
||||||
|
? { temperature: params.temperature }
|
||||||
|
: {})
|
||||||
});
|
});
|
||||||
|
|
||||||
log(
|
log(
|
||||||
@@ -286,13 +299,18 @@ export class BaseAIProvider {
|
|||||||
);
|
);
|
||||||
|
|
||||||
const client = await this.getClient(params);
|
const client = await this.getClient(params);
|
||||||
|
|
||||||
const result = await generateObject({
|
const result = await generateObject({
|
||||||
model: client(params.modelId),
|
model: client(params.modelId),
|
||||||
messages: params.messages,
|
messages: params.messages,
|
||||||
schema: zodSchema(params.schema),
|
schema: params.schema,
|
||||||
mode: params.mode || 'auto',
|
mode: this.needsExplicitJsonSchema ? 'json' : 'auto',
|
||||||
...this.prepareTokenParam(params.modelId, params.maxTokens),
|
schemaName: params.objectName,
|
||||||
temperature: params.temperature
|
schemaDescription: `Generate a valid JSON object for ${params.objectName}`,
|
||||||
|
maxTokens: params.maxTokens,
|
||||||
|
...(this.supportsTemperature && params.temperature !== undefined
|
||||||
|
? { temperature: params.temperature }
|
||||||
|
: {})
|
||||||
});
|
});
|
||||||
|
|
||||||
log(
|
log(
|
||||||
@@ -300,19 +318,26 @@ export class BaseAIProvider {
|
|||||||
`${this.name} generateObject completed successfully for model: ${params.modelId}`
|
`${this.name} generateObject completed successfully for model: ${params.modelId}`
|
||||||
);
|
);
|
||||||
|
|
||||||
|
const inputTokens =
|
||||||
|
result.usage?.inputTokens ?? result.usage?.promptTokens ?? 0;
|
||||||
|
const outputTokens =
|
||||||
|
result.usage?.outputTokens ?? result.usage?.completionTokens ?? 0;
|
||||||
|
const totalTokens =
|
||||||
|
result.usage?.totalTokens ?? inputTokens + outputTokens;
|
||||||
|
|
||||||
return {
|
return {
|
||||||
object: result.object,
|
object: result.object,
|
||||||
usage: {
|
usage: {
|
||||||
inputTokens: result.usage?.promptTokens,
|
inputTokens,
|
||||||
outputTokens: result.usage?.completionTokens,
|
outputTokens,
|
||||||
totalTokens: result.usage?.totalTokens
|
totalTokens
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Check if this is a JSON parsing error that we can potentially fix
|
// Check if this is a JSON parsing error that we can potentially fix
|
||||||
if (
|
if (
|
||||||
NoObjectGeneratedError.isInstance(error) &&
|
NoObjectGeneratedError.isInstance(error) &&
|
||||||
JSONParseError.isInstance(error.cause) &&
|
error.cause instanceof JSONParseError &&
|
||||||
error.cause.text
|
error.cause.text
|
||||||
) {
|
) {
|
||||||
log(
|
log(
|
||||||
|
|||||||
@@ -1,54 +1,129 @@
|
|||||||
/**
|
/**
|
||||||
* src/ai-providers/claude-code.js
|
* src/ai-providers/claude-code.js
|
||||||
*
|
*
|
||||||
* Implementation for interacting with Claude models via Claude Code CLI
|
* Claude Code provider implementation using the ai-sdk-provider-claude-code package.
|
||||||
* using a custom AI SDK implementation.
|
* This provider uses the local Claude Code CLI with OAuth token authentication.
|
||||||
|
*
|
||||||
|
* Authentication:
|
||||||
|
* - Uses CLAUDE_CODE_OAUTH_TOKEN managed by Claude Code CLI
|
||||||
|
* - Token is set up via: claude setup-token
|
||||||
|
* - No manual API key configuration required
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { createClaudeCode } from './custom-sdk/claude-code/index.js';
|
import { createClaudeCode } from 'ai-sdk-provider-claude-code';
|
||||||
import { BaseAIProvider } from './base-provider.js';
|
import { BaseAIProvider } from './base-provider.js';
|
||||||
import { getClaudeCodeSettingsForCommand } from '../../scripts/modules/config-manager.js';
|
import { getClaudeCodeSettingsForCommand } from '../../scripts/modules/config-manager.js';
|
||||||
|
import { execSync } from 'child_process';
|
||||||
|
import { log } from '../../scripts/modules/utils.js';
|
||||||
|
|
||||||
|
let _claudeCliChecked = false;
|
||||||
|
let _claudeCliAvailable = null;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Provider for Claude Code CLI integration via AI SDK
|
||||||
|
*
|
||||||
|
* Features:
|
||||||
|
* - No API key required (uses local Claude Code CLI)
|
||||||
|
* - Supports 'sonnet' and 'opus' models
|
||||||
|
* - Command-specific configuration support
|
||||||
|
*/
|
||||||
export class ClaudeCodeProvider extends BaseAIProvider {
|
export class ClaudeCodeProvider extends BaseAIProvider {
|
||||||
constructor() {
|
constructor() {
|
||||||
super();
|
super();
|
||||||
this.name = 'Claude Code';
|
this.name = 'Claude Code';
|
||||||
|
this.supportedModels = ['sonnet', 'opus'];
|
||||||
|
// Claude Code requires explicit JSON schema mode
|
||||||
|
this.needsExplicitJsonSchema = true;
|
||||||
|
// Claude Code does not support temperature parameter
|
||||||
|
this.supportsTemperature = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @returns {string} The environment variable name for API key (not used)
|
||||||
|
*/
|
||||||
getRequiredApiKeyName() {
|
getRequiredApiKeyName() {
|
||||||
return 'CLAUDE_CODE_API_KEY';
|
return 'CLAUDE_CODE_API_KEY';
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @returns {boolean} False - Claude Code doesn't require API keys
|
||||||
|
*/
|
||||||
isRequiredApiKey() {
|
isRequiredApiKey() {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Override validateAuth to skip API key validation for Claude Code
|
* Optional CLI availability check for Claude Code
|
||||||
* @param {object} params - Parameters to validate
|
* @param {object} params - Parameters (ignored)
|
||||||
*/
|
*/
|
||||||
validateAuth(params) {
|
validateAuth(params) {
|
||||||
// Claude Code doesn't require an API key
|
// Claude Code uses local CLI - perform lightweight availability check
|
||||||
// No validation needed
|
// This is optional validation that fails fast with actionable guidance
|
||||||
|
if (
|
||||||
|
process.env.NODE_ENV !== 'test' &&
|
||||||
|
!_claudeCliChecked &&
|
||||||
|
!process.env.CLAUDE_CODE_OAUTH_TOKEN
|
||||||
|
) {
|
||||||
|
try {
|
||||||
|
execSync('claude --version', { stdio: 'pipe', timeout: 1000 });
|
||||||
|
_claudeCliAvailable = true;
|
||||||
|
} catch (error) {
|
||||||
|
_claudeCliAvailable = false;
|
||||||
|
log(
|
||||||
|
'warn',
|
||||||
|
'Claude Code CLI not detected. Install it with: npm install -g @anthropic-ai/claude-code'
|
||||||
|
);
|
||||||
|
} finally {
|
||||||
|
_claudeCliChecked = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates and returns a Claude Code client instance.
|
* Creates a Claude Code client instance
|
||||||
* @param {object} params - Parameters for client initialization
|
* @param {object} params - Client parameters
|
||||||
* @param {string} [params.commandName] - Name of the command invoking the service
|
* @param {string} [params.commandName] - Command name for settings lookup
|
||||||
* @param {string} [params.baseURL] - Optional custom API endpoint (not used by Claude Code)
|
* @returns {Function} Claude Code provider function
|
||||||
* @returns {Function} Claude Code client function
|
* @throws {Error} If Claude Code CLI is not available or client creation fails
|
||||||
* @throws {Error} If initialization fails
|
|
||||||
*/
|
*/
|
||||||
getClient(params) {
|
getClient(params = {}) {
|
||||||
try {
|
try {
|
||||||
// Claude Code doesn't use API keys or base URLs
|
const settings =
|
||||||
// Just return the provider factory
|
getClaudeCodeSettingsForCommand(params.commandName) || {};
|
||||||
|
|
||||||
return createClaudeCode({
|
return createClaudeCode({
|
||||||
defaultSettings: getClaudeCodeSettingsForCommand(params?.commandName)
|
defaultSettings: settings
|
||||||
});
|
});
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
this.handleError('client initialization', error);
|
// Provide more helpful error message
|
||||||
|
const msg = String(error?.message || '');
|
||||||
|
const code = error?.code;
|
||||||
|
if (code === 'ENOENT' || /claude/i.test(msg)) {
|
||||||
|
const enhancedError = new Error(
|
||||||
|
`Claude Code CLI not available. Please install Claude Code CLI first. Original error: ${error.message}`
|
||||||
|
);
|
||||||
|
enhancedError.cause = error;
|
||||||
|
this.handleError('Claude Code CLI initialization', enhancedError);
|
||||||
|
} else {
|
||||||
|
this.handleError('client initialization', error);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @returns {string[]} List of supported model IDs
|
||||||
|
*/
|
||||||
|
getSupportedModels() {
|
||||||
|
return this.supportedModels;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if a model is supported
|
||||||
|
* @param {string} modelId - Model ID to check
|
||||||
|
* @returns {boolean} True if supported
|
||||||
|
*/
|
||||||
|
isModelSupported(modelId) {
|
||||||
|
if (!modelId) return false;
|
||||||
|
return this.supportedModels.includes(String(modelId).toLowerCase());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user