Compare commits
63 Commits
extension@
...
tdd-phase-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ef4e2e425b | ||
|
|
f0d1d5de89 | ||
|
|
519d8bdfcb | ||
|
|
4b6ad19bc4 | ||
|
|
f71cdb4eaa | ||
|
|
bc0093d506 | ||
|
|
042fe6dced | ||
|
|
3178c3aeac | ||
|
|
d75430c4d8 | ||
|
|
2dbfaa0d3b | ||
|
|
8857417870 | ||
|
|
ad9355f97a | ||
|
|
ec3972ff10 | ||
|
|
959c6151fa | ||
|
|
728787d869 | ||
|
|
27b2348a9a | ||
|
|
5cb7ed557a | ||
|
|
b9e644c556 | ||
|
|
7265a6cf53 | ||
|
|
db6f405f23 | ||
|
|
7b5a7c4495 | ||
|
|
caee040907 | ||
|
|
4b5473860b | ||
|
|
b43b7ce201 | ||
|
|
86027f1ee4 | ||
|
|
4f984f8a69 | ||
|
|
f7646f41b5 | ||
|
|
20004a39ea | ||
|
|
f1393f47b1 | ||
|
|
738ec51c04 | ||
|
|
c7418c4594 | ||
|
|
0747f1c772 | ||
|
|
ffe24a2e35 | ||
|
|
604b94baa9 | ||
|
|
2ea4bb6a81 | ||
|
|
3e96387715 | ||
|
|
100c3dc47d | ||
|
|
986ac117ae | ||
|
|
18aa416035 | ||
|
|
3b3dbabed1 | ||
|
|
af53525cbc | ||
|
|
0079b7defd | ||
|
|
0b2c6967c4 | ||
|
|
c0682ac795 | ||
|
|
01a7faea8f | ||
|
|
b7f32eac5a | ||
|
|
044a7bfc98 | ||
|
|
814265cd33 | ||
|
|
9b7b2ca7b2 | ||
|
|
949f091179 | ||
|
|
32c2b03c23 | ||
|
|
3bfd999d81 | ||
|
|
9fa79eb026 | ||
|
|
875134247a | ||
|
|
c2fc61ddb3 | ||
|
|
aaacc3dae3 | ||
|
|
46cd5dc186 | ||
|
|
49a31be416 | ||
|
|
2b69936ee7 | ||
|
|
b5fe723f8e | ||
|
|
d67b81d25d | ||
|
|
66c05053c0 | ||
|
|
d7ab4609aa |
11
.changeset/brave-lions-sing.md
Normal file
11
.changeset/brave-lions-sing.md
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
Add Codex CLI provider with OAuth authentication
|
||||
|
||||
- Added codex-cli provider for GPT-5 and GPT-5-Codex models (272K input / 128K output)
|
||||
- OAuth-first authentication via `codex login` - no API key required
|
||||
- Optional OPENAI_CODEX_API_KEY support
|
||||
- Codebase analysis capabilities automatically enabled
|
||||
- Command-specific settings and approval/sandbox modes
|
||||
5
.changeset/chore-fix-docs.md
Normal file
5
.changeset/chore-fix-docs.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
Improve `analyze-complexity` cli docs and `--research` flag documentation
|
||||
7
.changeset/cursor-slash-commands.md
Normal file
7
.changeset/cursor-slash-commands.md
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
Add Cursor IDE custom slash command support
|
||||
|
||||
Expose Task Master commands as Cursor slash commands by copying assets/claude/commands to .cursor/commands on profile add and cleaning up on remove.
|
||||
5
.changeset/curvy-weeks-flow.md
Normal file
5
.changeset/curvy-weeks-flow.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
Change parent task back to "pending" when all subtasks are in "pending" state
|
||||
5
.changeset/easy-spiders-wave.md
Normal file
5
.changeset/easy-spiders-wave.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
Do a quick fix on build
|
||||
5
.changeset/fix-mcp-connection-errors.md
Normal file
5
.changeset/fix-mcp-connection-errors.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
Fix MCP connection errors caused by deprecated generateTaskFiles calls. Resolves "Cannot read properties of null (reading 'toString')" errors when using MCP tools for task management operations.
|
||||
5
.changeset/fix-mcp-default-tasks-path.md
Normal file
5
.changeset/fix-mcp-default-tasks-path.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
Fix MCP server error when file parameter not provided - now properly constructs default tasks.json path instead of failing with 'tasksJsonPath is required' error.
|
||||
5
.changeset/flat-cities-say.md
Normal file
5
.changeset/flat-cities-say.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
Added api keys page on docs website: docs.task-master.dev/getting-started/api-keys
|
||||
10
.changeset/forty-tables-invite.md
Normal file
10
.changeset/forty-tables-invite.md
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
Move to AI SDK v5:
|
||||
|
||||
- Works better with claude-code and gemini-cli as ai providers
|
||||
- Improved openai model family compatibility
|
||||
- Migrate ollama provider to v2
|
||||
- Closes #1223, #1013, #1161, #1174
|
||||
30
.changeset/gentle-cats-dance.md
Normal file
30
.changeset/gentle-cats-dance.md
Normal file
@@ -0,0 +1,30 @@
|
||||
---
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
Migrate AI services to use generateObject for structured data generation
|
||||
|
||||
This update migrates all AI service calls from generateText to generateObject, ensuring more reliable and structured responses across all commands.
|
||||
|
||||
### Key Changes:
|
||||
|
||||
- **Unified AI Service**: Replaced separate generateText implementations with a single generateObjectService that handles structured data generation
|
||||
- **JSON Mode Support**: Added proper JSON mode configuration for providers that support it (OpenAI, Anthropic, Google, Groq)
|
||||
- **Schema Validation**: Integrated Zod schemas for all AI-generated content with automatic validation
|
||||
- **Provider Compatibility**: Maintained compatibility with all existing providers while leveraging their native structured output capabilities
|
||||
- **Improved Reliability**: Structured output generation reduces parsing errors and ensures consistent data formats
|
||||
|
||||
### Technical Improvements:
|
||||
|
||||
- Centralized provider configuration in `ai-providers-unified.js`
|
||||
- Added `generateObject` support detection for each provider
|
||||
- Implemented proper error handling for schema validation failures
|
||||
- Maintained backward compatibility with existing prompt structures
|
||||
|
||||
### Bug Fixes:
|
||||
|
||||
- Fixed subtask ID numbering issue where AI was generating inconsistent IDs (101-105, 601-603) instead of sequential numbering (1, 2, 3...)
|
||||
- Enhanced prompt instructions to enforce proper ID generation patterns
|
||||
- Ensured subtasks display correctly as X.1, X.2, X.3 format
|
||||
|
||||
This migration improves the reliability and consistency of AI-generated content throughout the Task Master application.
|
||||
13
.changeset/mcp-timeout-configuration.md
Normal file
13
.changeset/mcp-timeout-configuration.md
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
Enhanced Roo Code profile with MCP timeout configuration for improved reliability during long-running AI operations. The Roo profile now automatically configures a 300-second timeout for MCP server operations, preventing timeouts during complex tasks like `parse-prd`, `expand-all`, `analyze-complexity`, and `research` operations. This change also replaces static MCP configuration files with programmatic generation for better maintainability.
|
||||
|
||||
**What's New:**
|
||||
- 300-second timeout for MCP operations (up from default 60 seconds)
|
||||
- Programmatic MCP configuration generation (replaces static asset files)
|
||||
- Enhanced reliability for AI-powered operations
|
||||
- Consistent with other AI coding assistant profiles
|
||||
|
||||
**Migration:** No user action required - existing Roo Code installations will automatically receive the enhanced MCP configuration on next initialization.
|
||||
5
.changeset/petite-ideas-grab.md
Normal file
5
.changeset/petite-ideas-grab.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
Fix Claude Code settings validation for pathToClaudeCodeExecutable
|
||||
26
.changeset/pre.json
Normal file
26
.changeset/pre.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"mode": "exit",
|
||||
"tag": "rc",
|
||||
"initialVersions": {
|
||||
"task-master-ai": "0.27.3",
|
||||
"docs": "0.0.4",
|
||||
"extension": "0.25.4"
|
||||
},
|
||||
"changesets": [
|
||||
"brave-lions-sing",
|
||||
"chore-fix-docs",
|
||||
"cursor-slash-commands",
|
||||
"curvy-weeks-flow",
|
||||
"easy-spiders-wave",
|
||||
"fix-mcp-connection-errors",
|
||||
"fix-mcp-default-tasks-path",
|
||||
"flat-cities-say",
|
||||
"forty-tables-invite",
|
||||
"gentle-cats-dance",
|
||||
"mcp-timeout-configuration",
|
||||
"petite-ideas-grab",
|
||||
"silly-pandas-find",
|
||||
"sweet-maps-rule",
|
||||
"whole-pigs-say"
|
||||
]
|
||||
}
|
||||
5
.changeset/silly-pandas-find.md
Normal file
5
.changeset/silly-pandas-find.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
Fix sonar deep research model failing, should be called `sonar-deep-research`
|
||||
5
.changeset/sweet-maps-rule.md
Normal file
5
.changeset/sweet-maps-rule.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
Upgrade grok-cli ai provider to ai sdk v5
|
||||
8
.changeset/whole-pigs-say.md
Normal file
8
.changeset/whole-pigs-say.md
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
Fix complexity score not showing for `task-master show` and `task-master list`
|
||||
|
||||
- Added complexity score on "next task" when running `task-master list`
|
||||
- Added colors to complexity to reflect complexity (easy, medium, hard)
|
||||
@@ -1,10 +1,3 @@
|
||||
reviews:
|
||||
profile: assertive
|
||||
poem: false
|
||||
auto_review:
|
||||
base_branches:
|
||||
- rc
|
||||
- beta
|
||||
- alpha
|
||||
- production
|
||||
- next
|
||||
3
.github/workflows/ci.yml
vendored
3
.github/workflows/ci.yml
vendored
@@ -6,9 +6,6 @@ on:
|
||||
- main
|
||||
- next
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- next
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
|
||||
5
.github/workflows/extension-ci.yml
vendored
5
.github/workflows/extension-ci.yml
vendored
@@ -41,8 +41,7 @@ jobs:
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-
|
||||
|
||||
- name: Install Extension Dependencies
|
||||
working-directory: apps/extension
|
||||
- name: Install Monorepo Dependencies
|
||||
run: npm ci
|
||||
timeout-minutes: 5
|
||||
|
||||
@@ -68,7 +67,6 @@ jobs:
|
||||
${{ runner.os }}-node-
|
||||
|
||||
- name: Install if cache miss
|
||||
working-directory: apps/extension
|
||||
run: npm ci
|
||||
timeout-minutes: 3
|
||||
|
||||
@@ -100,7 +98,6 @@ jobs:
|
||||
${{ runner.os }}-node-
|
||||
|
||||
- name: Install if cache miss
|
||||
working-directory: apps/extension
|
||||
run: npm ci
|
||||
timeout-minutes: 3
|
||||
|
||||
|
||||
3
.github/workflows/extension-release.yml
vendored
3
.github/workflows/extension-release.yml
vendored
@@ -31,8 +31,7 @@ jobs:
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-
|
||||
|
||||
- name: Install Extension Dependencies
|
||||
working-directory: apps/extension
|
||||
- name: Install Monorepo Dependencies
|
||||
run: npm ci
|
||||
timeout-minutes: 5
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
{
|
||||
"models": {
|
||||
"main": {
|
||||
"provider": "anthropic",
|
||||
"modelId": "claude-sonnet-4-20250514",
|
||||
"provider": "claude-code",
|
||||
"modelId": "sonnet",
|
||||
"maxTokens": 64000,
|
||||
"temperature": 0.2
|
||||
},
|
||||
@@ -35,6 +35,7 @@
|
||||
"defaultTag": "master"
|
||||
},
|
||||
"claudeCode": {},
|
||||
"codexCli": {},
|
||||
"grokCli": {
|
||||
"timeout": 120000,
|
||||
"workingDirectory": null,
|
||||
|
||||
912
.taskmaster/docs/autonomous-tdd-git-workflow.md
Normal file
912
.taskmaster/docs/autonomous-tdd-git-workflow.md
Normal file
@@ -0,0 +1,912 @@
|
||||
## Summary
|
||||
|
||||
- Put the existing git and test workflows on rails: a repeatable, automated process that can run autonomously, with guardrails and a compact TUI for visibility.
|
||||
|
||||
- Flow: for a selected task, create a branch named with the tag + task id → generate tests for the first subtask (red) using the Surgical Test Generator → implement code (green) → verify tests → commit → repeat per subtask → final verify → push → open PR against the default branch.
|
||||
|
||||
- Build on existing rules: .cursor/rules/git_workflow.mdc, .cursor/rules/test_workflow.mdc, .claude/agents/surgical-test-generator.md, and existing CLI/core services.
|
||||
|
||||
## Goals
|
||||
|
||||
- Deterministic, resumable automation to execute the TDD loop per subtask with minimal human intervention.
|
||||
|
||||
- Strong guardrails: never commit to the default branch; only commit when tests pass; enforce status transitions; persist logs/state for debuggability.
|
||||
|
||||
- Visibility: a compact terminal UI (like lazygit) to pick tag, view tasks, and start work; right-side pane opens an executor terminal (via tmux) for agent coding.
|
||||
|
||||
- Extensible: framework-agnostic test generation via the Surgical Test Generator; detect and use the repo’s test command for execution with coverage thresholds.
|
||||
|
||||
## Non‑Goals (initial)
|
||||
|
||||
- Full multi-language runner parity beyond detection and executing the project’s test command.
|
||||
|
||||
- Complex GUI; start with CLI/TUI + tmux pane. IDE/extension can hook into the same state later.
|
||||
|
||||
- Rich executor selection UX (codex/gemini/claude) — we’ll prompt per run; defaults can come later.
|
||||
|
||||
## Success Criteria
|
||||
|
||||
- One command can autonomously complete a task's subtasks via TDD and open a PR when done.
|
||||
|
||||
- All commits made on a branch that includes the tag and task id (see Branch Naming); no commits to the default branch directly.
|
||||
|
||||
- Every subtask iteration: failing tests added first (red), then code added to pass them (green), commit only after green.
|
||||
|
||||
- End-to-end logs + artifacts stored in .taskmaster/reports/runs/<timestamp-or-id>/.
|
||||
|
||||
## Success Metrics (Phase 1)
|
||||
|
||||
- **Adoption**: 80% of tasks in a pilot repo completed via `tm autopilot`
|
||||
- **Safety**: 0 commits to default branch; 100% of commits have green tests
|
||||
- **Efficiency**: Average time from task start to PR < 30min for simple subtasks
|
||||
- **Reliability**: < 5% of runs require manual intervention (timeout/conflicts)
|
||||
|
||||
## User Stories
|
||||
|
||||
- As a developer, I can run tm autopilot <taskId> and watch a structured, safe workflow execute.
|
||||
|
||||
- As a reviewer, I can inspect commits per subtask, and a PR summarizing the work when the task completes.
|
||||
|
||||
- As an operator, I can see current step, active subtask, tests status, and logs in a compact CLI view and read a final run report.
|
||||
|
||||
## Example Workflow Traces
|
||||
|
||||
### Happy Path: Complete a 3-subtask feature
|
||||
|
||||
```bash
|
||||
# Developer starts
|
||||
$ tm autopilot 42
|
||||
→ Checks preflight: ✓ clean tree, ✓ npm test detected
|
||||
→ Creates branch: analytics/task-42-user-metrics
|
||||
→ Subtask 42.1: "Add metrics schema"
|
||||
RED: generates test_metrics_schema.test.js → 3 failures
|
||||
GREEN: implements schema.js → all pass
|
||||
COMMIT: "feat(metrics): add metrics schema (task 42.1)"
|
||||
→ Subtask 42.2: "Add collection endpoint"
|
||||
RED: generates test_metrics_endpoint.test.js → 5 failures
|
||||
GREEN: implements api/metrics.js → all pass
|
||||
COMMIT: "feat(metrics): add collection endpoint (task 42.2)"
|
||||
→ Subtask 42.3: "Add dashboard widget"
|
||||
RED: generates test_metrics_widget.test.js → 4 failures
|
||||
GREEN: implements components/MetricsWidget.jsx → all pass
|
||||
COMMIT: "feat(metrics): add dashboard widget (task 42.3)"
|
||||
→ Final: all 3 subtasks complete
|
||||
✓ Run full test suite → all pass
|
||||
✓ Coverage check → 85% (meets 80% threshold)
|
||||
PUSH: confirms with user → pushed to origin
|
||||
PR: opens #123 "Task #42 [analytics]: User metrics tracking"
|
||||
|
||||
✓ Task 42 complete. PR: https://github.com/org/repo/pull/123
|
||||
Run report: .taskmaster/reports/runs/2025-01-15-142033/
|
||||
```
|
||||
|
||||
### Error Recovery: Failing tests timeout
|
||||
|
||||
```bash
|
||||
$ tm autopilot 42
|
||||
→ Subtask 42.2 GREEN phase: attempt 1 fails (2 tests still red)
|
||||
→ Subtask 42.2 GREEN phase: attempt 2 fails (1 test still red)
|
||||
→ Subtask 42.2 GREEN phase: attempt 3 fails (1 test still red)
|
||||
|
||||
⚠️ Paused: Could not achieve green state after 3 attempts
|
||||
📋 State saved to: .taskmaster/reports/runs/2025-01-15-142033/
|
||||
Last error: "POST /api/metrics returns 500 instead of 201"
|
||||
|
||||
Next steps:
|
||||
- Review diff: git diff HEAD
|
||||
- Inspect logs: cat .taskmaster/reports/runs/2025-01-15-142033/log.jsonl
|
||||
- Check test output: cat .taskmaster/reports/runs/2025-01-15-142033/test-results/subtask-42.2-green-attempt3.json
|
||||
- Resume after manual fix: tm autopilot --resume
|
||||
|
||||
# Developer manually fixes the issue, then:
|
||||
$ tm autopilot --resume
|
||||
→ Resuming subtask 42.2 GREEN phase
|
||||
GREEN: all tests pass
|
||||
COMMIT: "feat(metrics): add collection endpoint (task 42.2)"
|
||||
→ Continuing to subtask 42.3...
|
||||
```
|
||||
|
||||
### Dry Run: Preview before execution
|
||||
|
||||
```bash
|
||||
$ tm autopilot 42 --dry-run
|
||||
Autopilot Plan for Task #42 [analytics]: User metrics tracking
|
||||
─────────────────────────────────────────────────────────────
|
||||
Preflight:
|
||||
✓ Working tree is clean
|
||||
✓ Test command detected: npm test
|
||||
✓ Tools available: git, gh, node, npm
|
||||
✓ Current branch: main (will create new branch)
|
||||
|
||||
Branch & Tag:
|
||||
→ Create branch: analytics/task-42-user-metrics
|
||||
→ Set active tag: analytics
|
||||
|
||||
Subtasks (3 pending):
|
||||
1. 42.1: Add metrics schema
|
||||
- RED: generate tests in src/__tests__/schema.test.js
|
||||
- GREEN: implement src/schema.js
|
||||
- COMMIT: "feat(metrics): add metrics schema (task 42.1)"
|
||||
|
||||
2. 42.2: Add collection endpoint [depends on 42.1]
|
||||
- RED: generate tests in src/api/__tests__/metrics.test.js
|
||||
- GREEN: implement src/api/metrics.js
|
||||
- COMMIT: "feat(metrics): add collection endpoint (task 42.2)"
|
||||
|
||||
3. 42.3: Add dashboard widget [depends on 42.2]
|
||||
- RED: generate tests in src/components/__tests__/MetricsWidget.test.jsx
|
||||
- GREEN: implement src/components/MetricsWidget.jsx
|
||||
- COMMIT: "feat(metrics): add dashboard widget (task 42.3)"
|
||||
|
||||
Finalization:
|
||||
→ Run full test suite with coverage
|
||||
→ Push branch to origin (will confirm)
|
||||
→ Create PR targeting main
|
||||
|
||||
Run without --dry-run to execute.
|
||||
```
|
||||
|
||||
## High‑Level Workflow
|
||||
|
||||
1) Pre‑flight
|
||||
|
||||
- Verify clean working tree or confirm staging/commit policy (configurable).
|
||||
|
||||
- Detect repo type and the project’s test command (e.g., npm test, pnpm test, pytest, go test).
|
||||
|
||||
- Validate tools: git, gh (optional for PR), node/npm, and (if used) claude CLI.
|
||||
|
||||
- Load TaskMaster state and selected task; if no subtasks exist, automatically run “expand” before working.
|
||||
|
||||
2) Branch & Tag Setup
|
||||
|
||||
- Checkout default branch and update (optional), then create a branch using Branch Naming (below).
|
||||
|
||||
- Map branch ↔ tag via existing tag management; explicitly set active tag to the branch’s tag.
|
||||
|
||||
3) Subtask Loop (for each pending/in-progress subtask in dependency order)
|
||||
|
||||
- Select next eligible subtask using tm-core TaskService getNextTask() and subtask eligibility logic.
|
||||
|
||||
- Red: generate or update failing tests for the subtask
|
||||
|
||||
- Use the Surgical Test Generator system prompt .claude/agents/surgical-test-generator.md) to produce high-signal tests following project conventions.
|
||||
|
||||
- Run tests to confirm red; record results. If not red (already passing), skip to next subtask or escalate.
|
||||
|
||||
- Green: implement code to pass tests
|
||||
|
||||
- Use executor to implement changes (initial: claude CLI prompt with focused context).
|
||||
|
||||
- Re-run tests until green or timeout/backoff policy triggers.
|
||||
|
||||
- Commit: when green
|
||||
|
||||
- Commit tests + code with conventional commit message. Optionally update subtask status to done.
|
||||
|
||||
- Persist run step metadata/logs.
|
||||
|
||||
4) Finalization
|
||||
|
||||
- Run full test suite and coverage (if configured); optionally lint/format.
|
||||
|
||||
- Commit any final adjustments.
|
||||
|
||||
- Push branch (ask user to confirm); create PR (via gh pr create) targeting the default branch. Title format: Task #<id> [<tag>]: <title>.
|
||||
|
||||
5) Post‑Run
|
||||
|
||||
- Update task status if desired (e.g., review).
|
||||
|
||||
- Persist run report (JSON + markdown summary) to .taskmaster/reports/runs/<run-id>/.
|
||||
|
||||
## Guardrails
|
||||
|
||||
- Never commit to the default branch.
|
||||
|
||||
- Commit only if all tests (targeted and suite) pass; allow override flags.
|
||||
|
||||
- Enforce 80% coverage thresholds (lines/branches/functions/statements) by default; configurable.
|
||||
|
||||
- Timebox/model ops and retries; if not green within N attempts, pause with actionable state for resume.
|
||||
|
||||
- Always log actions, commands, and outcomes; include dry-run mode.
|
||||
|
||||
- Ask before branch creation, pushing, and opening a PR unless --no-confirm is set.
|
||||
|
||||
## Integration Points (Current Repo)
|
||||
|
||||
- CLI: apps/cli provides command structure and UI components.
|
||||
|
||||
- New command: tm autopilot (alias: task-master autopilot).
|
||||
|
||||
- Reuse UI components under apps/cli/src/ui/components/ for headers/task details/next-task.
|
||||
|
||||
- Core services: packages/tm-core
|
||||
|
||||
- TaskService for selection, status, tags.
|
||||
|
||||
- TaskExecutionService for prompt formatting and executor prep.
|
||||
|
||||
- Executors: claude executor and ExecutorFactory to run external tools.
|
||||
|
||||
- Proposed new: WorkflowOrchestrator to drive the autonomous loop and emit progress events.
|
||||
|
||||
- Tag/Git utilities: scripts/modules/utils/git-utils.js and scripts/modules/task-manager/tag-management.js for branch→tag mapping and explicit tag switching.
|
||||
|
||||
- Rules: .cursor/rules/git_workflow.mdc and .cursor/rules/test_workflow.mdc to steer behavior and ensure consistency.
|
||||
|
||||
- Test generation prompt: .claude/agents/surgical-test-generator.md.
|
||||
|
||||
## Proposed Components
|
||||
|
||||
- Orchestrator (tm-core): WorkflowOrchestrator (new)
|
||||
|
||||
- State machine driving phases: Preflight → Branch/Tag → SubtaskIter (Red/Green/Commit) → Finalize → PR.
|
||||
|
||||
- Exposes an evented API (progress events) that the CLI can render.
|
||||
|
||||
- Stores run state artifacts.
|
||||
|
||||
- Test Runner Adapter
|
||||
|
||||
- Detects and runs tests via the project’s test command (e.g., npm test), with targeted runs where feasible.
|
||||
|
||||
- API: runTargeted(files/pattern), runAll(), report summary (failures, duration, coverage), enforce 80% threshold by default.
|
||||
|
||||
- Git/PR Adapter
|
||||
|
||||
- Encapsulates git ops: branch create/checkout, add/commit, push.
|
||||
|
||||
- Optional gh integration to open PR; fallback to instructions if gh unavailable.
|
||||
|
||||
- Confirmation gates for branch creation and pushes.
|
||||
|
||||
- Prompt/Exec Adapter
|
||||
|
||||
- Uses existing executor service to call the selected coding assistant (initially claude) with tight prompts: task/subtask context, surgical tests first, then minimal code to green.
|
||||
|
||||
- Run State + Reporting
|
||||
|
||||
- JSONL log of steps, timestamps, commands, test results.
|
||||
|
||||
- Markdown summary for PR description and post-run artifact.
|
||||
|
||||
## CLI UX (MVP)
|
||||
|
||||
- Command: tm autopilot [taskId]
|
||||
|
||||
- Flags: --dry-run, --no-push, --no-pr, --no-confirm, --force, --max-attempts <n>, --runner <auto|custom>, --commit-scope <scope>
|
||||
|
||||
- Output: compact header (project, tag, branch), current phase, subtask line, last test summary, next actions.
|
||||
|
||||
- Resume: If interrupted, tm autopilot --resume picks up from last checkpoint in run state.
|
||||
|
||||
### TUI with tmux (Linear Execution)
|
||||
|
||||
- Left pane: Tag selector, task list (status/priority), start/expand shortcuts; "Start" triggers the next task or a selected task.
|
||||
|
||||
- Right pane: Executor terminal (tmux split) that runs the coding agent (claude-code/codex). Autopilot can hand over to the right pane during green.
|
||||
|
||||
- MCP integration: use MCP tools for task queries/updates and for shell/test invocations where available.
|
||||
|
||||
## TUI Layout (tmux-based)
|
||||
|
||||
### Pane Structure
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────┬──────────────────────────────────┐
|
||||
│ Task Navigator (left) │ Executor Terminal (right) │
|
||||
│ │ │
|
||||
│ Project: my-app │ $ tm autopilot --executor-mode │
|
||||
│ Branch: analytics/task-42 │ > Running subtask 42.2 GREEN... │
|
||||
│ Tag: analytics │ > Implementing endpoint... │
|
||||
│ │ > Tests: 3 passed, 0 failed │
|
||||
│ Tasks: │ > Ready to commit │
|
||||
│ → 42 [in-progress] User metrics │ │
|
||||
│ → 42.1 [done] Schema │ [Live output from Claude Code] │
|
||||
│ → 42.2 [active] Endpoint ◀ │ │
|
||||
│ → 42.3 [pending] Dashboard │ │
|
||||
│ │ │
|
||||
│ [s] start [p] pause [q] quit │ │
|
||||
└─────────────────────────────────────┴──────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Implementation Notes
|
||||
|
||||
- **Left pane**: `apps/cli/src/ui/tui/navigator.ts` (new, uses `blessed` or `ink`)
|
||||
- **Right pane**: spawned via `tmux split-window -h` running `tm autopilot --executor-mode`
|
||||
- **Communication**: shared state file `.taskmaster/state/current-run.json` + file watching or event stream
|
||||
- **Keybindings**:
|
||||
- `s` - Start selected task
|
||||
- `p` - Pause/resume current run
|
||||
- `q` - Quit (with confirmation if run active)
|
||||
- `↑/↓` - Navigate task list
|
||||
- `Enter` - Expand/collapse subtasks
|
||||
|
||||
## Prompt Composition (Detailed)
|
||||
|
||||
### System Prompt Assembly
|
||||
|
||||
Prompts are composed in three layers:
|
||||
|
||||
1. **Base rules** (loaded in order from `.cursor/rules/` and `.claude/agents/`):
|
||||
- `git_workflow.mdc` → git commit conventions, branch policy, PR guidelines
|
||||
- `test_workflow.mdc` → TDD loop requirements, coverage thresholds, test structure
|
||||
- `surgical-test-generator.md` → test generation methodology, project-specific test patterns
|
||||
|
||||
2. **Task context injection**:
|
||||
```
|
||||
You are implementing:
|
||||
Task #42 [analytics]: User metrics tracking
|
||||
Subtask 42.2: Add collection endpoint
|
||||
|
||||
Description:
|
||||
Implement POST /api/metrics endpoint to collect user metrics events
|
||||
|
||||
Acceptance criteria:
|
||||
- POST /api/metrics accepts { userId, eventType, timestamp }
|
||||
- Validates input schema (reject missing/invalid fields)
|
||||
- Persists to database
|
||||
- Returns 201 on success with created record
|
||||
- Returns 400 on validation errors
|
||||
|
||||
Dependencies:
|
||||
- Subtask 42.1 (metrics schema) is complete
|
||||
|
||||
Current phase: RED (generate failing tests)
|
||||
Test command: npm test
|
||||
Test file convention: src/**/*.test.js (vitest framework detected)
|
||||
Branch: analytics/task-42-user-metrics
|
||||
Project language: JavaScript (Node.js)
|
||||
```
|
||||
|
||||
3. **Phase-specific instructions**:
|
||||
- **RED phase**: "Generate minimal failing tests for this subtask. Do NOT implement any production code. Only create test files. Confirm tests fail with clear error messages indicating missing implementation."
|
||||
- **GREEN phase**: "Implement minimal code to pass the failing tests. Follow existing project patterns in `src/`. Only modify files necessary for this subtask. Keep changes focused and reviewable."
|
||||
|
||||
### Example Full Prompt (RED Phase)
|
||||
|
||||
```markdown
|
||||
<SYSTEM PROMPT>
|
||||
[Contents of .cursor/rules/git_workflow.mdc]
|
||||
[Contents of .cursor/rules/test_workflow.mdc]
|
||||
[Contents of .claude/agents/surgical-test-generator.md]
|
||||
|
||||
<TASK CONTEXT>
|
||||
You are implementing:
|
||||
Task #42.2: Add collection endpoint
|
||||
|
||||
Description:
|
||||
Implement POST /api/metrics endpoint to collect user metrics events
|
||||
|
||||
Acceptance criteria:
|
||||
- POST /api/metrics accepts { userId, eventType, timestamp }
|
||||
- Validates input schema (reject missing/invalid fields)
|
||||
- Persists to database using MetricsSchema from subtask 42.1
|
||||
- Returns 201 on success with created record
|
||||
- Returns 400 on validation errors with details
|
||||
|
||||
Dependencies: Subtask 42.1 (metrics schema) is complete
|
||||
|
||||
<INSTRUCTION>
|
||||
Generate failing tests for this subtask. Follow project conventions:
|
||||
- Test file: src/api/__tests__/metrics.test.js
|
||||
- Framework: vitest (detected from package.json)
|
||||
- Test cases to cover:
|
||||
* POST /api/metrics with valid payload → should return 201 (will fail: endpoint not implemented)
|
||||
* POST /api/metrics with missing userId → should return 400 (will fail: validation not implemented)
|
||||
* POST /api/metrics with invalid timestamp → should return 400 (will fail: validation not implemented)
|
||||
* POST /api/metrics should persist to database → should save record (will fail: persistence not implemented)
|
||||
|
||||
Do NOT implement the endpoint code yet. Only create test file(s).
|
||||
Confirm tests fail with messages like "Cannot POST /api/metrics" or "endpoint not defined".
|
||||
|
||||
Output format:
|
||||
1. File path to create: src/api/__tests__/metrics.test.js
|
||||
2. Complete test code
|
||||
3. Command to run: npm test src/api/__tests__/metrics.test.js
|
||||
```
|
||||
|
||||
### Example Full Prompt (GREEN Phase)
|
||||
|
||||
```markdown
|
||||
<SYSTEM PROMPT>
|
||||
[Contents of .cursor/rules/git_workflow.mdc]
|
||||
[Contents of .cursor/rules/test_workflow.mdc]
|
||||
|
||||
<TASK CONTEXT>
|
||||
Task #42.2: Add collection endpoint
|
||||
[same context as RED phase]
|
||||
|
||||
<CURRENT STATE>
|
||||
Tests created in RED phase:
|
||||
- src/api/__tests__/metrics.test.js
|
||||
- 5 tests written, all failing as expected
|
||||
|
||||
Test output:
|
||||
```
|
||||
FAIL src/api/__tests__/metrics.test.js
|
||||
POST /api/metrics
|
||||
✗ should return 201 with valid payload (endpoint not found)
|
||||
✗ should return 400 with missing userId (endpoint not found)
|
||||
✗ should return 400 with invalid timestamp (endpoint not found)
|
||||
✗ should persist to database (endpoint not found)
|
||||
```
|
||||
|
||||
<INSTRUCTION>
|
||||
Implement minimal code to make all tests pass.
|
||||
|
||||
Guidelines:
|
||||
- Create/modify file: src/api/metrics.js
|
||||
- Use existing patterns from src/api/ (e.g., src/api/users.js for reference)
|
||||
- Import MetricsSchema from subtask 42.1 (src/models/schema.js)
|
||||
- Implement validation, persistence, and response handling
|
||||
- Follow project error handling conventions
|
||||
- Keep implementation focused on this subtask only
|
||||
|
||||
After implementation:
|
||||
1. Run tests: npm test src/api/__tests__/metrics.test.js
|
||||
2. Confirm all 5 tests pass
|
||||
3. Report results
|
||||
|
||||
Output format:
|
||||
1. File(s) created/modified
|
||||
2. Implementation code
|
||||
3. Test command and results
|
||||
```
|
||||
|
||||
### Prompt Loading Configuration
|
||||
|
||||
See `.taskmaster/config.json` → `prompts` section for paths and load order.
|
||||
|
||||
## Configuration Schema
|
||||
|
||||
### .taskmaster/config.json
|
||||
|
||||
```json
|
||||
{
|
||||
"autopilot": {
|
||||
"enabled": true,
|
||||
"requireCleanWorkingTree": true,
|
||||
"commitTemplate": "{type}({scope}): {msg}",
|
||||
"defaultCommitType": "feat",
|
||||
"maxGreenAttempts": 3,
|
||||
"testTimeout": 300000
|
||||
},
|
||||
"test": {
|
||||
"runner": "auto",
|
||||
"coverageThresholds": {
|
||||
"lines": 80,
|
||||
"branches": 80,
|
||||
"functions": 80,
|
||||
"statements": 80
|
||||
},
|
||||
"targetedRunPattern": "**/*.test.js"
|
||||
},
|
||||
"git": {
|
||||
"branchPattern": "{tag}/task-{id}-{slug}",
|
||||
"pr": {
|
||||
"enabled": true,
|
||||
"base": "default",
|
||||
"bodyTemplate": ".taskmaster/templates/pr-body.md"
|
||||
}
|
||||
},
|
||||
"prompts": {
|
||||
"rulesPath": ".cursor/rules",
|
||||
"testGeneratorPath": ".claude/agents/surgical-test-generator.md",
|
||||
"loadOrder": ["git_workflow.mdc", "test_workflow.mdc"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Configuration Fields
|
||||
|
||||
#### autopilot
|
||||
- `enabled` (boolean): Enable/disable autopilot functionality
|
||||
- `requireCleanWorkingTree` (boolean): Require clean git state before starting
|
||||
- `commitTemplate` (string): Template for commit messages (tokens: `{type}`, `{scope}`, `{msg}`)
|
||||
- `defaultCommitType` (string): Default commit type (feat, fix, chore, etc.)
|
||||
- `maxGreenAttempts` (number): Maximum retry attempts to achieve green tests (default: 3)
|
||||
- `testTimeout` (number): Timeout in milliseconds per test run (default: 300000 = 5min)
|
||||
|
||||
#### test
|
||||
- `runner` (string): Test runner detection mode (`"auto"` or explicit command like `"npm test"`)
|
||||
- `coverageThresholds` (object): Minimum coverage percentages required
|
||||
- `lines`, `branches`, `functions`, `statements` (number): Threshold percentages (0-100)
|
||||
- `targetedRunPattern` (string): Glob pattern for targeted subtask test runs
|
||||
|
||||
#### git
|
||||
- `branchPattern` (string): Branch naming pattern (tokens: `{tag}`, `{id}`, `{slug}`)
|
||||
- `pr.enabled` (boolean): Enable automatic PR creation
|
||||
- `pr.base` (string): Target branch for PRs (`"default"` uses repo default, or specify like `"main"`)
|
||||
- `pr.bodyTemplate` (string): Path to PR body template file (optional)
|
||||
|
||||
#### prompts
|
||||
- `rulesPath` (string): Directory containing rule files (e.g., `.cursor/rules`)
|
||||
- `testGeneratorPath` (string): Path to test generator prompt file
|
||||
- `loadOrder` (array): Order to load rule files from `rulesPath`
|
||||
|
||||
### Environment Variables
|
||||
|
||||
```bash
|
||||
# Required for executor
|
||||
ANTHROPIC_API_KEY=sk-ant-... # Claude API key
|
||||
|
||||
# Optional: for PR creation
|
||||
GITHUB_TOKEN=ghp_... # GitHub personal access token
|
||||
|
||||
# Optional: for other executors (future)
|
||||
OPENAI_API_KEY=sk-...
|
||||
GOOGLE_API_KEY=...
|
||||
```
|
||||
|
||||
## Run Artifacts & Observability
|
||||
|
||||
### Per-Run Artifact Structure
|
||||
|
||||
Each autopilot run creates a timestamped directory with complete traceability:
|
||||
|
||||
```
|
||||
.taskmaster/reports/runs/2025-01-15-142033/
|
||||
├── manifest.json # run metadata (task id, start/end time, status)
|
||||
├── log.jsonl # timestamped event stream
|
||||
├── commits.txt # list of commit SHAs made during run
|
||||
├── test-results/
|
||||
│ ├── subtask-42.1-red.json
|
||||
│ ├── subtask-42.1-green.json
|
||||
│ ├── subtask-42.2-red.json
|
||||
│ ├── subtask-42.2-green-attempt1.json
|
||||
│ ├── subtask-42.2-green-attempt2.json
|
||||
│ ├── subtask-42.2-green-attempt3.json
|
||||
│ └── final-suite.json
|
||||
└── pr.md # generated PR body
|
||||
```
|
||||
|
||||
### manifest.json Format
|
||||
|
||||
```json
|
||||
{
|
||||
"runId": "2025-01-15-142033",
|
||||
"taskId": "42",
|
||||
"tag": "analytics",
|
||||
"branch": "analytics/task-42-user-metrics",
|
||||
"startTime": "2025-01-15T14:20:33Z",
|
||||
"endTime": "2025-01-15T14:45:12Z",
|
||||
"status": "completed",
|
||||
"subtasksCompleted": ["42.1", "42.2", "42.3"],
|
||||
"subtasksFailed": [],
|
||||
"totalCommits": 3,
|
||||
"prUrl": "https://github.com/org/repo/pull/123",
|
||||
"finalCoverage": {
|
||||
"lines": 85.3,
|
||||
"branches": 82.1,
|
||||
"functions": 88.9,
|
||||
"statements": 85.0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### log.jsonl Format
|
||||
|
||||
Event stream in JSON Lines format for easy parsing and debugging:
|
||||
|
||||
```jsonl
|
||||
{"ts":"2025-01-15T14:20:33Z","phase":"preflight","status":"ok","details":{"testCmd":"npm test","gitClean":true}}
|
||||
{"ts":"2025-01-15T14:20:45Z","phase":"branch","status":"ok","branch":"analytics/task-42-user-metrics"}
|
||||
{"ts":"2025-01-15T14:21:00Z","phase":"red","subtask":"42.1","status":"ok","tests":{"failed":3,"passed":0}}
|
||||
{"ts":"2025-01-15T14:22:15Z","phase":"green","subtask":"42.1","status":"ok","tests":{"passed":3,"failed":0},"attempts":2}
|
||||
{"ts":"2025-01-15T14:22:20Z","phase":"commit","subtask":"42.1","status":"ok","sha":"a1b2c3d","message":"feat(metrics): add metrics schema (task 42.1)"}
|
||||
{"ts":"2025-01-15T14:23:00Z","phase":"red","subtask":"42.2","status":"ok","tests":{"failed":5,"passed":0}}
|
||||
{"ts":"2025-01-15T14:25:30Z","phase":"green","subtask":"42.2","status":"error","tests":{"passed":3,"failed":2},"attempts":3,"error":"Max attempts reached"}
|
||||
{"ts":"2025-01-15T14:25:35Z","phase":"pause","reason":"max_attempts","nextAction":"manual_review"}
|
||||
```
|
||||
|
||||
### Test Results Format
|
||||
|
||||
Each test run stores detailed results:
|
||||
|
||||
```json
|
||||
{
|
||||
"subtask": "42.2",
|
||||
"phase": "green",
|
||||
"attempt": 3,
|
||||
"timestamp": "2025-01-15T14:25:30Z",
|
||||
"command": "npm test src/api/__tests__/metrics.test.js",
|
||||
"exitCode": 1,
|
||||
"duration": 2340,
|
||||
"summary": {
|
||||
"total": 5,
|
||||
"passed": 3,
|
||||
"failed": 2,
|
||||
"skipped": 0
|
||||
},
|
||||
"failures": [
|
||||
{
|
||||
"test": "POST /api/metrics should return 201 with valid payload",
|
||||
"error": "Expected status 201, got 500",
|
||||
"stack": "..."
|
||||
}
|
||||
],
|
||||
"coverage": {
|
||||
"lines": 78.5,
|
||||
"branches": 75.0,
|
||||
"functions": 80.0,
|
||||
"statements": 78.5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Execution Model
|
||||
|
||||
### Orchestration vs Direct Execution
|
||||
|
||||
The autopilot system uses an **orchestration model** rather than direct code execution:
|
||||
|
||||
**Orchestrator Role** (tm-core WorkflowOrchestrator):
|
||||
- Maintains state machine tracking current phase (RED/GREEN/COMMIT) per subtask
|
||||
- Validates preconditions (tests pass, git state clean, etc.)
|
||||
- Returns "work units" describing what needs to be done next
|
||||
- Records completion and advances to next phase
|
||||
- Persists state for resumability
|
||||
|
||||
**Executor Role** (Claude Code/AI session via MCP):
|
||||
- Queries orchestrator for next work unit
|
||||
- Executes the work (generates tests, writes code, runs tests, makes commits)
|
||||
- Reports results back to orchestrator
|
||||
- Handles file operations and tool invocations
|
||||
|
||||
**Why This Approach?**
|
||||
- Leverages existing AI capabilities (Claude Code) rather than duplicating them
|
||||
- MCP protocol provides clean separation between state management and execution
|
||||
- Allows human oversight and intervention at each phase
|
||||
- Simpler to implement: orchestrator is pure state logic, no code generation needed
|
||||
- Enables multiple executor types (Claude Code, other AI tools, human developers)
|
||||
|
||||
**Example Flow**:
|
||||
```typescript
|
||||
// Claude Code (via MCP) queries orchestrator
|
||||
const workUnit = await orchestrator.getNextWorkUnit('42');
|
||||
// => {
|
||||
// phase: 'RED',
|
||||
// subtask: '42.1',
|
||||
// action: 'Generate failing tests for metrics schema',
|
||||
// context: { title, description, dependencies, testFile: 'src/__tests__/schema.test.js' }
|
||||
// }
|
||||
|
||||
// Claude Code executes the work (writes test file, runs tests)
|
||||
// Then reports back
|
||||
await orchestrator.completeWorkUnit('42', '42.1', 'RED', {
|
||||
success: true,
|
||||
testsCreated: ['src/__tests__/schema.test.js'],
|
||||
testsFailed: 3
|
||||
});
|
||||
|
||||
// Query again for next phase
|
||||
const nextWorkUnit = await orchestrator.getNextWorkUnit('42');
|
||||
// => { phase: 'GREEN', subtask: '42.1', action: 'Implement code to pass tests', ... }
|
||||
```
|
||||
|
||||
## Design Decisions
|
||||
|
||||
### Why commit per subtask instead of per task?
|
||||
|
||||
**Decision**: Commit after each subtask's green state, not after the entire task.
|
||||
|
||||
**Rationale**:
|
||||
- Atomic commits make code review easier (reviewers can see logical progression)
|
||||
- Easier to revert a single subtask if it causes issues downstream
|
||||
- Matches the TDD loop's natural checkpoint and cognitive boundary
|
||||
- Provides resumability points if the run is interrupted
|
||||
|
||||
**Trade-off**: More commits per task (can use squash-merge in PRs if desired)
|
||||
|
||||
### Why not support parallel subtask execution?
|
||||
|
||||
**Decision**: Sequential subtask execution in Phase 1; parallel execution deferred to Phase 3.
|
||||
|
||||
**Rationale**:
|
||||
- Subtasks often have implicit dependencies (e.g., schema before endpoint, endpoint before UI)
|
||||
- Simpler orchestrator state machine (less complexity = faster to ship)
|
||||
- Parallel execution requires explicit dependency DAG and conflict resolution
|
||||
- Can be added in Phase 3 once core workflow is proven stable
|
||||
|
||||
**Trade-off**: Slower for truly independent subtasks (mitigated by keeping subtasks small and focused)
|
||||
|
||||
### Why require 80% coverage by default?
|
||||
|
||||
**Decision**: Enforce 80% coverage threshold (lines/branches/functions/statements) before allowing commits.
|
||||
|
||||
**Rationale**:
|
||||
- Industry standard baseline for production code quality
|
||||
- Forces test generation to be comprehensive, not superficial
|
||||
- Configurable per project via `.taskmaster/config.json` if too strict
|
||||
- Prevents "green tests" that only test happy paths
|
||||
|
||||
**Trade-off**: May require more test generation iterations; can be lowered per project
|
||||
|
||||
### Why use tmux instead of a rich GUI?
|
||||
|
||||
**Decision**: MVP uses tmux split panes for TUI, not Electron/web-based GUI.
|
||||
|
||||
**Rationale**:
|
||||
- Tmux is universally available on dev machines; no installation burden
|
||||
- Terminal-first workflows match developer mental model (no context switching)
|
||||
- Simpler to implement and maintain; can add GUI later via extensions
|
||||
- State stored in files allows IDE/extension integration without coupling
|
||||
|
||||
**Trade-off**: Less visual polish than GUI; requires tmux familiarity
|
||||
|
||||
### Why not support multiple executors (codex/gemini/claude) in Phase 1?
|
||||
|
||||
**Decision**: Start with Claude executor only; add others in Phase 2+.
|
||||
|
||||
**Rationale**:
|
||||
- Reduces scope and complexity for initial delivery
|
||||
- Claude Code already integrated with existing executor service
|
||||
- Executor abstraction already exists; adding more is straightforward later
|
||||
- Different executors may need different prompt strategies (requires experimentation)
|
||||
|
||||
**Trade-off**: Users locked to Claude initially; can work around with manual executor selection
|
||||
|
||||
## Risks and Mitigations
|
||||
|
||||
- Model hallucination/large diffs: restrict prompt scope; enforce minimal changes; show diff previews (optional) before commit.
|
||||
|
||||
- Flaky tests: allow retries, isolate targeted runs for speed, then full suite before commit.
|
||||
|
||||
- Environment variability: detect runners/tools; provide fallbacks and actionable errors.
|
||||
|
||||
- PR creation fails: still push and print manual commands; persist PR body to reuse.
|
||||
|
||||
## Open Questions
|
||||
|
||||
1) Slugging rules for branch names; any length limits or normalization beyond {slug} token sanitize?
|
||||
|
||||
2) PR body standard sections beyond run report (e.g., checklist, coverage table)?
|
||||
|
||||
3) Default executor prompt fine-tuning once codex/gemini integration is available.
|
||||
|
||||
4) Where to store persistent TUI state (pane layout, last selection) in .taskmaster/state.json?
|
||||
|
||||
## Branch Naming
|
||||
|
||||
- Include both the tag and the task id in the branch name to make lineage explicit.
|
||||
|
||||
- Default pattern: <tag>/task-<id>[-slug] (e.g., master/task-12, tag-analytics/task-4-user-auth).
|
||||
|
||||
- Configurable via .taskmaster/config.json: git.branchPattern supports tokens {tag}, {id}, {slug}.
|
||||
|
||||
## PR Base Branch
|
||||
|
||||
- Use the repository’s default branch (detected via git) unless overridden.
|
||||
|
||||
- Title format: Task #<id> [<tag>]: <title>.
|
||||
|
||||
## RPG Mapping (Repository Planning Graph)
|
||||
|
||||
Functional nodes (capabilities):
|
||||
|
||||
- Autopilot Orchestration → drives TDD loop and lifecycle
|
||||
|
||||
- Test Generation (Surgical) → produces failing tests from subtask context
|
||||
|
||||
- Test Execution + Coverage → runs suite, enforces thresholds
|
||||
|
||||
- Git/Branch/PR Management → safe operations and PR creation
|
||||
|
||||
- TUI/Terminal Integration → interactive control and visibility via tmux
|
||||
|
||||
- MCP Integration → structured task/status/context operations
|
||||
|
||||
Structural nodes (code organization):
|
||||
|
||||
- packages/tm-core:
|
||||
|
||||
- services/workflow-orchestrator.ts (new)
|
||||
|
||||
- services/test-runner-adapter.ts (new)
|
||||
|
||||
- services/git-adapter.ts (new)
|
||||
|
||||
- existing: task-service.ts, task-execution-service.ts, executors/*
|
||||
|
||||
- apps/cli:
|
||||
|
||||
- src/commands/autopilot.command.ts (new)
|
||||
|
||||
- src/ui/tui/ (new tmux/TUI helpers)
|
||||
|
||||
- scripts/modules:
|
||||
|
||||
- reuse utils/git-utils.js, task-manager/tag-management.js
|
||||
|
||||
- .claude/agents/:
|
||||
|
||||
- surgical-test-generator.md
|
||||
|
||||
Edges (data/control flow):
|
||||
|
||||
- Autopilot → Test Generation → Test Execution → Git Commit → loop
|
||||
|
||||
- Autopilot → Git Adapter (branch, tag, PR)
|
||||
|
||||
- Autopilot → TUI (event stream) → tmux pane control
|
||||
|
||||
- Autopilot → MCP tools for task/status updates
|
||||
|
||||
- Test Execution → Coverage gate → Autopilot decision
|
||||
|
||||
Topological traversal (implementation order):
|
||||
|
||||
1) Git/Test adapters (foundations)
|
||||
|
||||
2) Orchestrator skeleton + events
|
||||
|
||||
3) CLI autopilot command and dry-run
|
||||
|
||||
4) Surgical test-gen integration and execution gate
|
||||
|
||||
5) PR creation, run reports, resumability
|
||||
|
||||
## Phased Roadmap
|
||||
|
||||
- Phase 0: Spike
|
||||
|
||||
- Implement CLI skeleton tm autopilot with dry-run showing planned steps from a real task + subtasks.
|
||||
|
||||
- Detect test runner (package.json) and git state; render a preflight report.
|
||||
|
||||
- Phase 1: Core Rails (State Machine & Orchestration)
|
||||
|
||||
- Implement WorkflowOrchestrator in tm-core as a **state machine** that tracks TDD phases per subtask.
|
||||
|
||||
- Orchestrator **guides** the current AI session (Claude Code/MCP client) rather than executing code itself.
|
||||
|
||||
- Add Git/Test adapters for status checks and validation (not direct execution).
|
||||
|
||||
- WorkflowOrchestrator API:
|
||||
- `getNextWorkUnit(taskId)` → returns next phase to execute (RED/GREEN/COMMIT) with context
|
||||
- `completeWorkUnit(taskId, subtaskId, phase, result)` → records completion and advances state
|
||||
- `getRunState(taskId)` → returns current progress and resumability data
|
||||
|
||||
- MCP integration: expose work unit endpoints so Claude Code can query "what to do next" and report back.
|
||||
|
||||
- Branch/tag mapping via existing tag-management APIs.
|
||||
|
||||
- Run report persisted under .taskmaster/reports/runs/ with state checkpoints for resumability.
|
||||
|
||||
- Phase 2: PR + Resumability
|
||||
|
||||
- Add gh PR creation with well-formed body using the run report.
|
||||
|
||||
- Introduce resumable checkpoints and --resume flag.
|
||||
|
||||
- Add coverage enforcement and optional lint/format step.
|
||||
|
||||
- Phase 3: Extensibility + Guardrails
|
||||
|
||||
- Add support for basic pytest/go test adapters.
|
||||
|
||||
- Add safeguards: diff preview mode, manual confirm gates, aggressive minimal-change prompts.
|
||||
|
||||
- Optional: small TUI panel and extension panel leveraging the same run state file.
|
||||
|
||||
## References (Repo)
|
||||
|
||||
- Test Workflow: .cursor/rules/test_workflow.mdc
|
||||
|
||||
- Git Workflow: .cursor/rules/git_workflow.mdc
|
||||
|
||||
- CLI: apps/cli/src/commands/start.command.ts, apps/cli/src/ui/components/*.ts
|
||||
|
||||
- Core Services: packages/tm-core/src/services/task-service.ts, task-execution-service.ts
|
||||
|
||||
- Executors: packages/tm-core/src/executors/*
|
||||
|
||||
- Git Utilities: scripts/modules/utils/git-utils.js
|
||||
|
||||
- Tag Management: scripts/modules/task-manager/tag-management.js
|
||||
|
||||
- Surgical Test Generator: .claude/agents/surgical-test-generator.md
|
||||
|
||||
130
.taskmaster/docs/tdd-workflow-phase-0-spike.md
Normal file
130
.taskmaster/docs/tdd-workflow-phase-0-spike.md
Normal file
@@ -0,0 +1,130 @@
|
||||
# Phase 0: Spike - Autonomous TDD Workflow ✅ COMPLETE
|
||||
|
||||
## Objective
|
||||
Validate feasibility and build foundational understanding before full implementation.
|
||||
|
||||
## Status
|
||||
**COMPLETED** - All deliverables implemented and validated.
|
||||
|
||||
See `apps/cli/src/commands/autopilot.command.ts` for implementation.
|
||||
|
||||
## Scope
|
||||
- Implement CLI skeleton `tm autopilot` with dry-run mode
|
||||
- Show planned steps from a real task with subtasks
|
||||
- Detect test runner from package.json
|
||||
- Detect git state and render preflight report
|
||||
|
||||
## Deliverables
|
||||
|
||||
### 1. CLI Command Skeleton
|
||||
- Create `apps/cli/src/commands/autopilot.command.ts`
|
||||
- Support `tm autopilot <taskId>` command
|
||||
- Implement `--dry-run` flag
|
||||
- Basic help text and usage information
|
||||
|
||||
### 2. Preflight Detection System
|
||||
- Detect test runner from package.json (npm test, pnpm test, etc.)
|
||||
- Check git working tree state (clean/dirty)
|
||||
- Validate required tools are available (git, gh, node/npm)
|
||||
- Detect default branch
|
||||
|
||||
### 3. Dry-Run Execution Plan Display
|
||||
Display planned execution for a task including:
|
||||
- Preflight checks status
|
||||
- Branch name that would be created
|
||||
- Tag that would be set
|
||||
- List of subtasks in execution order
|
||||
- For each subtask:
|
||||
- RED phase: test file that would be created
|
||||
- GREEN phase: implementation files that would be modified
|
||||
- COMMIT: commit message that would be used
|
||||
- Finalization steps: test suite run, coverage check, push, PR creation
|
||||
|
||||
### 4. Task Loading & Validation
|
||||
- Load task from TaskMaster state
|
||||
- Validate task exists and has subtasks
|
||||
- If no subtasks, show message about needing to expand first
|
||||
- Show dependency order for subtasks
|
||||
|
||||
## Example Output
|
||||
|
||||
```bash
|
||||
$ tm autopilot 42 --dry-run
|
||||
|
||||
Autopilot Plan for Task #42 [analytics]: User metrics tracking
|
||||
─────────────────────────────────────────────────────────────
|
||||
|
||||
Preflight Checks:
|
||||
✓ Working tree is clean
|
||||
✓ Test command detected: npm test
|
||||
✓ Tools available: git, gh, node, npm
|
||||
✓ Current branch: main (will create new branch)
|
||||
✓ Task has 3 subtasks ready to execute
|
||||
|
||||
Branch & Tag:
|
||||
→ Will create branch: analytics/task-42-user-metrics
|
||||
→ Will set active tag: analytics
|
||||
|
||||
Execution Plan (3 subtasks):
|
||||
|
||||
1. Subtask 42.1: Add metrics schema
|
||||
RED: Generate tests → src/__tests__/schema.test.js
|
||||
GREEN: Implement code → src/schema.js
|
||||
COMMIT: "feat(metrics): add metrics schema (task 42.1)"
|
||||
|
||||
2. Subtask 42.2: Add collection endpoint [depends on 42.1]
|
||||
RED: Generate tests → src/api/__tests__/metrics.test.js
|
||||
GREEN: Implement code → src/api/metrics.js
|
||||
COMMIT: "feat(metrics): add collection endpoint (task 42.2)"
|
||||
|
||||
3. Subtask 42.3: Add dashboard widget [depends on 42.2]
|
||||
RED: Generate tests → src/components/__tests__/MetricsWidget.test.jsx
|
||||
GREEN: Implement code → src/components/MetricsWidget.jsx
|
||||
COMMIT: "feat(metrics): add dashboard widget (task 42.3)"
|
||||
|
||||
Finalization:
|
||||
→ Run full test suite with coverage (threshold: 80%)
|
||||
→ Push branch to origin (will confirm)
|
||||
→ Create PR targeting main
|
||||
|
||||
Estimated commits: 3
|
||||
Estimated duration: ~20-30 minutes (depends on implementation complexity)
|
||||
|
||||
Run without --dry-run to execute.
|
||||
```
|
||||
|
||||
## Success Criteria
|
||||
- Dry-run output is clear and matches expected workflow
|
||||
- Preflight detection works correctly on the project repo
|
||||
- Task loading integrates with existing TaskMaster state
|
||||
- No actual git operations or file modifications occur in dry-run mode
|
||||
|
||||
## Out of Scope
|
||||
- Actual test generation
|
||||
- Actual code implementation
|
||||
- Git operations (branch creation, commits, push)
|
||||
- PR creation
|
||||
- Test execution
|
||||
|
||||
## Implementation Notes
|
||||
- Reuse existing `TaskService` from `packages/tm-core`
|
||||
- Use existing git utilities from `scripts/modules/utils/git-utils.js`
|
||||
- Load task/subtask data from `.taskmaster/tasks/tasks.json`
|
||||
- Detect test command via package.json → scripts.test field
|
||||
|
||||
## Dependencies
|
||||
- Existing TaskMaster CLI structure
|
||||
- Existing task storage format
|
||||
- Git utilities
|
||||
|
||||
## Estimated Effort
|
||||
2-3 days
|
||||
|
||||
## Validation
|
||||
Test dry-run mode with:
|
||||
- Task with 1 subtask
|
||||
- Task with multiple subtasks
|
||||
- Task with dependencies between subtasks
|
||||
- Task without subtasks (should show warning)
|
||||
- Dirty git working tree (should warn)
|
||||
- Missing tools (should error with helpful message)
|
||||
1144
.taskmaster/docs/tdd-workflow-phase-1-core-rails.md
Normal file
1144
.taskmaster/docs/tdd-workflow-phase-1-core-rails.md
Normal file
File diff suppressed because it is too large
Load Diff
369
.taskmaster/docs/tdd-workflow-phase-1-orchestrator.md
Normal file
369
.taskmaster/docs/tdd-workflow-phase-1-orchestrator.md
Normal file
@@ -0,0 +1,369 @@
|
||||
# Phase 1: Core Rails - State Machine & Orchestration
|
||||
|
||||
## Objective
|
||||
Build the WorkflowOrchestrator as a state machine that guides AI sessions through TDD workflow, rather than directly executing code.
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
### Execution Model
|
||||
The orchestrator acts as a **state manager and guide**, not a code executor:
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Claude Code (MCP Client) │
|
||||
│ - Queries "what to do next" │
|
||||
│ - Executes work (writes tests, code, runs commands) │
|
||||
│ - Reports completion │
|
||||
└────────────────┬────────────────────────────────────────────┘
|
||||
│ MCP Protocol
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ WorkflowOrchestrator (tm-core) │
|
||||
│ - Maintains state machine (RED → GREEN → COMMIT) │
|
||||
│ - Returns work units with context │
|
||||
│ - Validates preconditions │
|
||||
│ - Records progress │
|
||||
│ - Persists state for resumability │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Why This Approach?
|
||||
1. **Separation of Concerns**: State management separate from code execution
|
||||
2. **Leverage Existing Tools**: Uses Claude Code's capabilities instead of reimplementing
|
||||
3. **Human-in-the-Loop**: Easy to inspect state and intervene at any phase
|
||||
4. **Simpler Implementation**: Orchestrator is pure logic, no AI model integration needed
|
||||
5. **Flexible Executors**: Any tool (Claude Code, human, other AI) can execute work units
|
||||
|
||||
## Core Components
|
||||
|
||||
### 1. WorkflowOrchestrator Service
|
||||
**Location**: `packages/tm-core/src/services/workflow-orchestrator.service.ts`
|
||||
|
||||
**Responsibilities**:
|
||||
- Track current phase (RED/GREEN/COMMIT) per subtask
|
||||
- Generate work units with context for each phase
|
||||
- Validate phase completion criteria
|
||||
- Advance state machine on successful completion
|
||||
- Handle errors and retry logic
|
||||
- Persist run state for resumability
|
||||
|
||||
**API**:
|
||||
```typescript
|
||||
interface WorkflowOrchestrator {
|
||||
// Start a new autopilot run
|
||||
startRun(taskId: string, options?: RunOptions): Promise<RunContext>;
|
||||
|
||||
// Get next work unit to execute
|
||||
getNextWorkUnit(runId: string): Promise<WorkUnit | null>;
|
||||
|
||||
// Report work unit completion
|
||||
completeWorkUnit(
|
||||
runId: string,
|
||||
workUnitId: string,
|
||||
result: WorkUnitResult
|
||||
): Promise<void>;
|
||||
|
||||
// Get current run state
|
||||
getRunState(runId: string): Promise<RunState>;
|
||||
|
||||
// Pause/resume
|
||||
pauseRun(runId: string): Promise<void>;
|
||||
resumeRun(runId: string): Promise<void>;
|
||||
}
|
||||
|
||||
interface WorkUnit {
|
||||
id: string; // Unique work unit ID
|
||||
phase: 'RED' | 'GREEN' | 'COMMIT';
|
||||
subtaskId: string; // e.g., "42.1"
|
||||
action: string; // Human-readable description
|
||||
context: WorkUnitContext; // All info needed to execute
|
||||
preconditions: Precondition[]; // Checks before execution
|
||||
}
|
||||
|
||||
interface WorkUnitContext {
|
||||
taskId: string;
|
||||
taskTitle: string;
|
||||
subtaskTitle: string;
|
||||
subtaskDescription: string;
|
||||
dependencies: string[]; // Completed subtask IDs
|
||||
testCommand: string; // e.g., "npm test"
|
||||
|
||||
// Phase-specific context
|
||||
redPhase?: {
|
||||
testFile: string; // Where to create test
|
||||
testFramework: string; // e.g., "vitest"
|
||||
acceptanceCriteria: string[];
|
||||
};
|
||||
|
||||
greenPhase?: {
|
||||
testFile: string; // Test to make pass
|
||||
implementationHints: string[];
|
||||
expectedFiles: string[]; // Files likely to modify
|
||||
};
|
||||
|
||||
commitPhase?: {
|
||||
commitMessage: string; // Pre-generated message
|
||||
filesToCommit: string[]; // Files modified in RED+GREEN
|
||||
};
|
||||
}
|
||||
|
||||
interface WorkUnitResult {
|
||||
success: boolean;
|
||||
phase: 'RED' | 'GREEN' | 'COMMIT';
|
||||
|
||||
// RED phase results
|
||||
testsCreated?: string[];
|
||||
testsFailed?: number;
|
||||
|
||||
// GREEN phase results
|
||||
testsPassed?: number;
|
||||
filesModified?: string[];
|
||||
attempts?: number;
|
||||
|
||||
// COMMIT phase results
|
||||
commitSha?: string;
|
||||
|
||||
// Common
|
||||
error?: string;
|
||||
logs?: string;
|
||||
}
|
||||
|
||||
interface RunState {
|
||||
runId: string;
|
||||
taskId: string;
|
||||
status: 'running' | 'paused' | 'completed' | 'failed';
|
||||
currentPhase: 'RED' | 'GREEN' | 'COMMIT';
|
||||
currentSubtask: string;
|
||||
completedSubtasks: string[];
|
||||
failedSubtasks: string[];
|
||||
startTime: Date;
|
||||
lastUpdateTime: Date;
|
||||
|
||||
// Resumability
|
||||
checkpoint: {
|
||||
subtaskId: string;
|
||||
phase: 'RED' | 'GREEN' | 'COMMIT';
|
||||
attemptNumber: number;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### 2. State Machine Logic
|
||||
|
||||
**Phase Transitions**:
|
||||
```
|
||||
START → RED(subtask 1) → GREEN(subtask 1) → COMMIT(subtask 1)
|
||||
↓
|
||||
RED(subtask 2) ← ─ ─ ─ ┘
|
||||
↓
|
||||
GREEN(subtask 2)
|
||||
↓
|
||||
COMMIT(subtask 2)
|
||||
↓
|
||||
(repeat for remaining subtasks)
|
||||
↓
|
||||
FINALIZE → END
|
||||
```
|
||||
|
||||
**Phase Rules**:
|
||||
- **RED**: Can only transition to GREEN if tests created and failing
|
||||
- **GREEN**: Can only transition to COMMIT if tests passing (attempt < maxAttempts)
|
||||
- **COMMIT**: Can only transition to next RED if commit successful
|
||||
- **FINALIZE**: Can only start if all subtasks completed
|
||||
|
||||
**Preconditions**:
|
||||
- RED: No uncommitted changes (or staged from previous GREEN that failed)
|
||||
- GREEN: RED phase complete, tests exist and are failing
|
||||
- COMMIT: GREEN phase complete, all tests passing, coverage meets threshold
|
||||
|
||||
### 3. MCP Integration
|
||||
|
||||
**New MCP Tools** (expose WorkflowOrchestrator via MCP):
|
||||
```typescript
|
||||
// Start an autopilot run
|
||||
mcp__task_master_ai__autopilot_start(taskId: string, dryRun?: boolean)
|
||||
|
||||
// Get next work unit
|
||||
mcp__task_master_ai__autopilot_next_work_unit(runId: string)
|
||||
|
||||
// Complete current work unit
|
||||
mcp__task_master_ai__autopilot_complete_work_unit(
|
||||
runId: string,
|
||||
workUnitId: string,
|
||||
result: WorkUnitResult
|
||||
)
|
||||
|
||||
// Get run state
|
||||
mcp__task_master_ai__autopilot_get_state(runId: string)
|
||||
|
||||
// Pause/resume
|
||||
mcp__task_master_ai__autopilot_pause(runId: string)
|
||||
mcp__task_master_ai__autopilot_resume(runId: string)
|
||||
```
|
||||
|
||||
### 4. Git/Test Adapters
|
||||
|
||||
**GitAdapter** (`packages/tm-core/src/services/git-adapter.service.ts`):
|
||||
- Check working tree status
|
||||
- Validate branch state
|
||||
- Read git config (user, remote, default branch)
|
||||
- **Does NOT execute** git commands (that's executor's job)
|
||||
|
||||
**TestAdapter** (`packages/tm-core/src/services/test-adapter.service.ts`):
|
||||
- Detect test framework from package.json
|
||||
- Parse test output (failures, passes, coverage)
|
||||
- Validate coverage thresholds
|
||||
- **Does NOT run** tests (that's executor's job)
|
||||
|
||||
### 5. Run State Persistence
|
||||
|
||||
**Storage Location**: `.taskmaster/reports/runs/<runId>/`
|
||||
|
||||
**Files**:
|
||||
- `state.json` - Current run state (for resumability)
|
||||
- `log.jsonl` - Event stream (timestamped work unit completions)
|
||||
- `manifest.json` - Run metadata
|
||||
- `work-units.json` - All work units generated for this run
|
||||
|
||||
**Example `state.json`**:
|
||||
```json
|
||||
{
|
||||
"runId": "2025-01-15-142033",
|
||||
"taskId": "42",
|
||||
"status": "paused",
|
||||
"currentPhase": "GREEN",
|
||||
"currentSubtask": "42.2",
|
||||
"completedSubtasks": ["42.1"],
|
||||
"failedSubtasks": [],
|
||||
"checkpoint": {
|
||||
"subtaskId": "42.2",
|
||||
"phase": "GREEN",
|
||||
"attemptNumber": 2
|
||||
},
|
||||
"startTime": "2025-01-15T14:20:33Z",
|
||||
"lastUpdateTime": "2025-01-15T14:35:12Z"
|
||||
}
|
||||
```
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Step 1: WorkflowOrchestrator Skeleton
|
||||
- [ ] Create `workflow-orchestrator.service.ts` with interfaces
|
||||
- [ ] Implement state machine logic (phase transitions)
|
||||
- [ ] Add run state persistence (state.json, log.jsonl)
|
||||
- [ ] Write unit tests for state machine
|
||||
|
||||
### Step 2: Work Unit Generation
|
||||
- [ ] Implement `getNextWorkUnit()` with context assembly
|
||||
- [ ] Generate RED phase work units (test file paths, criteria)
|
||||
- [ ] Generate GREEN phase work units (implementation hints)
|
||||
- [ ] Generate COMMIT phase work units (commit messages)
|
||||
|
||||
### Step 3: Git/Test Adapters
|
||||
- [ ] Create GitAdapter for status checks only
|
||||
- [ ] Create TestAdapter for output parsing only
|
||||
- [ ] Add precondition validation using adapters
|
||||
- [ ] Write adapter unit tests
|
||||
|
||||
### Step 4: MCP Integration
|
||||
- [ ] Add MCP tool definitions in `packages/mcp-server/src/tools/`
|
||||
- [ ] Wire up WorkflowOrchestrator to MCP tools
|
||||
- [ ] Test MCP tools via Claude Code
|
||||
- [ ] Document MCP workflow in CLAUDE.md
|
||||
|
||||
### Step 5: CLI Integration
|
||||
- [ ] Update `autopilot.command.ts` to call WorkflowOrchestrator
|
||||
- [ ] Add `--interactive` mode that shows work units and waits for completion
|
||||
- [ ] Add `--resume` flag to continue paused runs
|
||||
- [ ] Test end-to-end flow
|
||||
|
||||
### Step 6: Integration Testing
|
||||
- [ ] Create test task with 2-3 subtasks
|
||||
- [ ] Run autopilot start → get work unit → complete → repeat
|
||||
- [ ] Verify state persistence and resumability
|
||||
- [ ] Test failure scenarios (test failures, git issues)
|
||||
|
||||
## Success Criteria
|
||||
- [ ] WorkflowOrchestrator can generate work units for all phases
|
||||
- [ ] MCP tools allow Claude Code to query and complete work units
|
||||
- [ ] State persists correctly between work unit completions
|
||||
- [ ] Run can be paused and resumed from checkpoint
|
||||
- [ ] Adapters validate preconditions without executing commands
|
||||
- [ ] End-to-end: Claude Code can complete a simple task via work units
|
||||
|
||||
## Out of Scope (Phase 1)
|
||||
- Actual git operations (branch creation, commits) - executor handles this
|
||||
- Actual test execution - executor handles this
|
||||
- PR creation - deferred to Phase 2
|
||||
- TUI interface - deferred to Phase 3
|
||||
- Coverage enforcement - deferred to Phase 2
|
||||
|
||||
## Example Usage Flow
|
||||
|
||||
```bash
|
||||
# Terminal 1: Claude Code session
|
||||
$ claude
|
||||
|
||||
# In Claude Code (via MCP):
|
||||
> Start autopilot for task 42
|
||||
[Calls mcp__task_master_ai__autopilot_start(42)]
|
||||
→ Run started: run-2025-01-15-142033
|
||||
|
||||
> Get next work unit
|
||||
[Calls mcp__task_master_ai__autopilot_next_work_unit(run-2025-01-15-142033)]
|
||||
→ Work unit: RED phase for subtask 42.1
|
||||
→ Action: Generate failing tests for metrics schema
|
||||
→ Test file: src/__tests__/schema.test.js
|
||||
→ Framework: vitest
|
||||
|
||||
> [Claude Code creates test file, runs tests]
|
||||
|
||||
> Complete work unit
|
||||
[Calls mcp__task_master_ai__autopilot_complete_work_unit(
|
||||
run-2025-01-15-142033,
|
||||
workUnit-42.1-RED,
|
||||
{ success: true, testsCreated: ['src/__tests__/schema.test.js'], testsFailed: 3 }
|
||||
)]
|
||||
→ Work unit completed. State saved.
|
||||
|
||||
> Get next work unit
|
||||
[Calls mcp__task_master_ai__autopilot_next_work_unit(run-2025-01-15-142033)]
|
||||
→ Work unit: GREEN phase for subtask 42.1
|
||||
→ Action: Implement code to pass failing tests
|
||||
→ Test file: src/__tests__/schema.test.js
|
||||
→ Expected implementation: src/schema.js
|
||||
|
||||
> [Claude Code implements schema.js, runs tests, confirms all pass]
|
||||
|
||||
> Complete work unit
|
||||
[...]
|
||||
→ Work unit completed. Ready for COMMIT.
|
||||
|
||||
> Get next work unit
|
||||
[...]
|
||||
→ Work unit: COMMIT phase for subtask 42.1
|
||||
→ Commit message: "feat(metrics): add metrics schema (task 42.1)"
|
||||
→ Files to commit: src/__tests__/schema.test.js, src/schema.js
|
||||
|
||||
> [Claude Code stages files and commits]
|
||||
|
||||
> Complete work unit
|
||||
[...]
|
||||
→ Subtask 42.1 complete! Moving to 42.2...
|
||||
```
|
||||
|
||||
## Dependencies
|
||||
- Existing TaskService (task loading, status updates)
|
||||
- Existing PreflightChecker (environment validation)
|
||||
- Existing TaskLoaderService (dependency ordering)
|
||||
- MCP server infrastructure
|
||||
|
||||
## Estimated Effort
|
||||
7-10 days
|
||||
|
||||
## Next Phase
|
||||
Phase 2 will add:
|
||||
- PR creation via gh CLI
|
||||
- Coverage enforcement
|
||||
- Enhanced error recovery
|
||||
- Full resumability testing
|
||||
433
.taskmaster/docs/tdd-workflow-phase-2-pr-resumability.md
Normal file
433
.taskmaster/docs/tdd-workflow-phase-2-pr-resumability.md
Normal file
@@ -0,0 +1,433 @@
|
||||
# Phase 2: PR + Resumability - Autonomous TDD Workflow
|
||||
|
||||
## Objective
|
||||
Add PR creation with GitHub CLI integration, resumable checkpoints for interrupted runs, and enhanced guardrails with coverage enforcement.
|
||||
|
||||
## Scope
|
||||
- GitHub PR creation via `gh` CLI
|
||||
- Well-formed PR body using run report
|
||||
- Resumable checkpoints and `--resume` flag
|
||||
- Coverage enforcement before finalization
|
||||
- Optional lint/format step
|
||||
- Enhanced error recovery
|
||||
|
||||
## Deliverables
|
||||
|
||||
### 1. PR Creation Integration
|
||||
|
||||
**PRAdapter** (`packages/tm-core/src/services/pr-adapter.ts`):
|
||||
```typescript
|
||||
class PRAdapter {
|
||||
async isGHAvailable(): Promise<boolean>
|
||||
async createPR(options: PROptions): Promise<PRResult>
|
||||
async getPRTemplate(runReport: RunReport): Promise<string>
|
||||
|
||||
// Fallback for missing gh CLI
|
||||
async getManualPRInstructions(options: PROptions): Promise<string>
|
||||
}
|
||||
|
||||
interface PROptions {
|
||||
branch: string
|
||||
base: string
|
||||
title: string
|
||||
body: string
|
||||
draft?: boolean
|
||||
}
|
||||
|
||||
interface PRResult {
|
||||
url: string
|
||||
number: number
|
||||
}
|
||||
```
|
||||
|
||||
**PR Title Format:**
|
||||
```
|
||||
Task #<id> [<tag>]: <title>
|
||||
```
|
||||
|
||||
Example: `Task #42 [analytics]: User metrics tracking`
|
||||
|
||||
**PR Body Template:**
|
||||
|
||||
Located at `.taskmaster/templates/pr-body.md`:
|
||||
|
||||
```markdown
|
||||
## Summary
|
||||
|
||||
Implements Task #42 from TaskMaster autonomous workflow.
|
||||
|
||||
**Branch:** {branch}
|
||||
**Tag:** {tag}
|
||||
**Subtasks completed:** {subtaskCount}
|
||||
|
||||
{taskDescription}
|
||||
|
||||
## Subtasks
|
||||
|
||||
{subtasksList}
|
||||
|
||||
## Test Coverage
|
||||
|
||||
| Metric | Coverage |
|
||||
|--------|----------|
|
||||
| Lines | {lines}% |
|
||||
| Branches | {branches}% |
|
||||
| Functions | {functions}% |
|
||||
| Statements | {statements}% |
|
||||
|
||||
**All subtasks passed with {totalTests} tests.**
|
||||
|
||||
## Commits
|
||||
|
||||
{commitsList}
|
||||
|
||||
## Run Report
|
||||
|
||||
Full execution report: `.taskmaster/reports/runs/{runId}/`
|
||||
|
||||
---
|
||||
|
||||
🤖 Generated with [Task Master](https://github.com/cline/task-master) autonomous TDD workflow
|
||||
```
|
||||
|
||||
**Token replacement:**
|
||||
- `{branch}` → branch name
|
||||
- `{tag}` → active tag
|
||||
- `{subtaskCount}` → number of completed subtasks
|
||||
- `{taskDescription}` → task description from TaskMaster
|
||||
- `{subtasksList}` → markdown list of subtask titles
|
||||
- `{lines}`, `{branches}`, `{functions}`, `{statements}` → coverage percentages
|
||||
- `{totalTests}` → total test count
|
||||
- `{commitsList}` → markdown list of commit SHAs and messages
|
||||
- `{runId}` → run ID timestamp
|
||||
|
||||
### 2. GitHub CLI Integration
|
||||
|
||||
**Detection:**
|
||||
```bash
|
||||
which gh
|
||||
```
|
||||
|
||||
If not found, show fallback instructions:
|
||||
```bash
|
||||
✓ Branch pushed: analytics/task-42-user-metrics
|
||||
✗ gh CLI not found - cannot create PR automatically
|
||||
|
||||
To create PR manually:
|
||||
gh pr create \
|
||||
--base main \
|
||||
--head analytics/task-42-user-metrics \
|
||||
--title "Task #42 [analytics]: User metrics tracking" \
|
||||
--body-file .taskmaster/reports/runs/2025-01-15-142033/pr.md
|
||||
|
||||
Or visit:
|
||||
https://github.com/org/repo/compare/main...analytics/task-42-user-metrics
|
||||
```
|
||||
|
||||
**Confirmation gate:**
|
||||
```bash
|
||||
Ready to create PR:
|
||||
Title: Task #42 [analytics]: User metrics tracking
|
||||
Base: main
|
||||
Head: analytics/task-42-user-metrics
|
||||
|
||||
Create PR? [Y/n]
|
||||
```
|
||||
|
||||
Unless `--no-confirm` flag is set.
|
||||
|
||||
### 3. Resumable Workflow
|
||||
|
||||
**State Checkpoint** (`state.json`):
|
||||
```json
|
||||
{
|
||||
"runId": "2025-01-15-142033",
|
||||
"taskId": "42",
|
||||
"phase": "subtask-loop",
|
||||
"currentSubtask": "42.2",
|
||||
"currentPhase": "green",
|
||||
"attempts": 2,
|
||||
"completedSubtasks": ["42.1"],
|
||||
"commits": ["a1b2c3d"],
|
||||
"branch": "analytics/task-42-user-metrics",
|
||||
"tag": "analytics",
|
||||
"canResume": true,
|
||||
"pausedAt": "2025-01-15T14:25:35Z",
|
||||
"pausedReason": "max_attempts_reached",
|
||||
"nextAction": "manual_review_required"
|
||||
}
|
||||
```
|
||||
|
||||
**Resume Command:**
|
||||
```bash
|
||||
$ tm autopilot --resume
|
||||
|
||||
Resuming run: 2025-01-15-142033
|
||||
Task: #42 [analytics] User metrics tracking
|
||||
Branch: analytics/task-42-user-metrics
|
||||
Last subtask: 42.2 (GREEN phase, attempt 2/3 failed)
|
||||
Paused: 5 minutes ago
|
||||
|
||||
Reason: Could not achieve green state after 3 attempts
|
||||
Last error: POST /api/metrics returns 500 instead of 201
|
||||
|
||||
Resume from subtask 42.2 GREEN phase? [Y/n]
|
||||
```
|
||||
|
||||
**Resume logic:**
|
||||
1. Load state from `.taskmaster/reports/runs/<runId>/state.json`
|
||||
2. Verify branch still exists and is checked out
|
||||
3. Verify no uncommitted changes (unless `--force`)
|
||||
4. Continue from last checkpoint phase
|
||||
5. Update state file as execution progresses
|
||||
|
||||
**Multiple interrupted runs:**
|
||||
```bash
|
||||
$ tm autopilot --resume
|
||||
|
||||
Found 2 resumable runs:
|
||||
1. 2025-01-15-142033 - Task #42 (paused 5 min ago at subtask 42.2 GREEN)
|
||||
2. 2025-01-14-103022 - Task #38 (paused 2 hours ago at subtask 38.3 RED)
|
||||
|
||||
Select run to resume [1-2]:
|
||||
```
|
||||
|
||||
### 4. Coverage Enforcement
|
||||
|
||||
**Coverage Check Phase** (before finalization):
|
||||
```typescript
|
||||
async function enforceCoverage(runId: string): Promise<void> {
|
||||
const testResults = await testRunner.runAll()
|
||||
const coverage = await testRunner.getCoverage()
|
||||
|
||||
const thresholds = config.test.coverageThresholds
|
||||
const failures = []
|
||||
|
||||
if (coverage.lines < thresholds.lines) {
|
||||
failures.push(`Lines: ${coverage.lines}% < ${thresholds.lines}%`)
|
||||
}
|
||||
// ... check branches, functions, statements
|
||||
|
||||
if (failures.length > 0) {
|
||||
throw new CoverageError(
|
||||
`Coverage thresholds not met:\n${failures.join('\n')}`
|
||||
)
|
||||
}
|
||||
|
||||
// Store coverage in run report
|
||||
await storeRunArtifact(runId, 'coverage.json', coverage)
|
||||
}
|
||||
```
|
||||
|
||||
**Handling coverage failures:**
|
||||
```bash
|
||||
⚠️ Coverage check failed:
|
||||
Lines: 78.5% < 80%
|
||||
Branches: 75.0% < 80%
|
||||
|
||||
Options:
|
||||
1. Add more tests and resume
|
||||
2. Lower thresholds in .taskmaster/config.json
|
||||
3. Skip coverage check: tm autopilot --resume --skip-coverage
|
||||
|
||||
Run paused. Fix coverage and resume with:
|
||||
tm autopilot --resume
|
||||
```
|
||||
|
||||
### 5. Optional Lint/Format Step
|
||||
|
||||
**Configuration:**
|
||||
```json
|
||||
{
|
||||
"autopilot": {
|
||||
"finalization": {
|
||||
"lint": {
|
||||
"enabled": true,
|
||||
"command": "npm run lint",
|
||||
"fix": true,
|
||||
"failOnError": false
|
||||
},
|
||||
"format": {
|
||||
"enabled": true,
|
||||
"command": "npm run format",
|
||||
"commitChanges": true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Execution:**
|
||||
```bash
|
||||
Finalization Steps:
|
||||
|
||||
✓ All tests passing (12 tests, 0 failures)
|
||||
✓ Coverage thresholds met (85% lines, 82% branches)
|
||||
|
||||
LINT Running linter... ⏳
|
||||
LINT ✓ No lint errors
|
||||
|
||||
FORMAT Running formatter... ⏳
|
||||
FORMAT ✓ Formatted 3 files
|
||||
FORMAT ✓ Committed formatting changes: "chore: auto-format code"
|
||||
|
||||
PUSH Pushing to origin... ⏳
|
||||
PUSH ✓ Pushed analytics/task-42-user-metrics
|
||||
|
||||
PR Creating pull request... ⏳
|
||||
PR ✓ Created PR #123
|
||||
https://github.com/org/repo/pull/123
|
||||
```
|
||||
|
||||
### 6. Enhanced Error Recovery
|
||||
|
||||
**Pause Points:**
|
||||
- Max GREEN attempts reached (current)
|
||||
- Coverage check failed (new)
|
||||
- Lint errors (if `failOnError: true`)
|
||||
- Git push failed (new)
|
||||
- PR creation failed (new)
|
||||
|
||||
**Each pause saves:**
|
||||
- Full state checkpoint
|
||||
- Last command output
|
||||
- Suggested next actions
|
||||
- Resume instructions
|
||||
|
||||
**Automatic recovery attempts:**
|
||||
- Git push: retry up to 3 times with backoff
|
||||
- PR creation: fall back to manual instructions
|
||||
- Lint: auto-fix if enabled, otherwise pause
|
||||
|
||||
### 7. Finalization Phase Enhancement
|
||||
|
||||
**Updated workflow:**
|
||||
1. Run full test suite
|
||||
2. Check coverage thresholds → pause if failed
|
||||
3. Run lint (if enabled) → pause if failed and `failOnError: true`
|
||||
4. Run format (if enabled) → auto-commit changes
|
||||
5. Confirm push (unless `--no-confirm`)
|
||||
6. Push branch → retry on failure
|
||||
7. Generate PR body from template
|
||||
8. Create PR via gh → fall back to manual instructions
|
||||
9. Update task status to 'review' (configurable)
|
||||
10. Save final run report
|
||||
|
||||
**Final output:**
|
||||
```bash
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
✅ Task #42 [analytics]: User metrics tracking - COMPLETE
|
||||
|
||||
Branch: analytics/task-42-user-metrics
|
||||
Subtasks completed: 3/3
|
||||
Commits: 3
|
||||
Total tests: 12 (12 passed, 0 failed)
|
||||
Coverage: 85% lines, 82% branches, 88% functions, 85% statements
|
||||
|
||||
PR #123: https://github.com/org/repo/pull/123
|
||||
|
||||
Run report: .taskmaster/reports/runs/2025-01-15-142033/
|
||||
|
||||
Next steps:
|
||||
- Review PR and request changes if needed
|
||||
- Merge when ready
|
||||
- Task status updated to 'review'
|
||||
|
||||
Completed in 24 minutes
|
||||
```
|
||||
|
||||
## CLI Updates
|
||||
|
||||
**New flags:**
|
||||
- `--resume` → Resume from last checkpoint
|
||||
- `--skip-coverage` → Skip coverage checks
|
||||
- `--skip-lint` → Skip lint step
|
||||
- `--skip-format` → Skip format step
|
||||
- `--skip-pr` → Push branch but don't create PR
|
||||
- `--draft-pr` → Create draft PR instead of ready-for-review
|
||||
|
||||
## Configuration Updates
|
||||
|
||||
**Add to `.taskmaster/config.json`:**
|
||||
```json
|
||||
{
|
||||
"autopilot": {
|
||||
"finalization": {
|
||||
"lint": {
|
||||
"enabled": false,
|
||||
"command": "npm run lint",
|
||||
"fix": true,
|
||||
"failOnError": false
|
||||
},
|
||||
"format": {
|
||||
"enabled": false,
|
||||
"command": "npm run format",
|
||||
"commitChanges": true
|
||||
},
|
||||
"updateTaskStatus": "review"
|
||||
}
|
||||
},
|
||||
"git": {
|
||||
"pr": {
|
||||
"enabled": true,
|
||||
"base": "default",
|
||||
"bodyTemplate": ".taskmaster/templates/pr-body.md",
|
||||
"draft": false
|
||||
},
|
||||
"pushRetries": 3,
|
||||
"pushRetryDelay": 5000
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Success Criteria
|
||||
- Can create PR automatically with well-formed body
|
||||
- Can resume interrupted runs from any checkpoint
|
||||
- Coverage checks prevent low-quality code from being merged
|
||||
- Clear error messages and recovery paths for all failure modes
|
||||
- Run reports include full PR context for review
|
||||
|
||||
## Out of Scope (defer to Phase 3)
|
||||
- Multiple test framework support (pytest, go test)
|
||||
- Diff preview before commits
|
||||
- TUI panel implementation
|
||||
- Extension/IDE integration
|
||||
|
||||
## Testing Strategy
|
||||
- Mock `gh` CLI for PR creation tests
|
||||
- Test resume from each possible pause point
|
||||
- Test coverage failure scenarios
|
||||
- Test lint/format integration with mock commands
|
||||
- End-to-end test with PR creation on test repo
|
||||
|
||||
## Dependencies
|
||||
- Phase 1 completed (core workflow)
|
||||
- GitHub CLI (`gh`) installed (optional, fallback provided)
|
||||
- Test framework supports coverage output
|
||||
|
||||
## Estimated Effort
|
||||
1-2 weeks
|
||||
|
||||
## Risks & Mitigations
|
||||
- **Risk:** GitHub CLI auth issues
|
||||
- **Mitigation:** Clear auth setup docs, fallback to manual instructions
|
||||
|
||||
- **Risk:** PR body template doesn't match all project needs
|
||||
- **Mitigation:** Make template customizable via config path
|
||||
|
||||
- **Risk:** Resume state gets corrupted
|
||||
- **Mitigation:** Validate state on load, provide --force-reset option
|
||||
|
||||
- **Risk:** Coverage calculation differs between runs
|
||||
- **Mitigation:** Store coverage with each test run for comparison
|
||||
|
||||
## Validation
|
||||
Test with:
|
||||
- Successful PR creation end-to-end
|
||||
- Resume from GREEN attempt failure
|
||||
- Resume from coverage failure
|
||||
- Resume from lint failure
|
||||
- Missing `gh` CLI (fallback to manual)
|
||||
- Lint/format integration enabled
|
||||
- Multiple interrupted runs (selection UI)
|
||||
@@ -0,0 +1,534 @@
|
||||
# Phase 3: Extensibility + Guardrails - Autonomous TDD Workflow
|
||||
|
||||
## Objective
|
||||
Add multi-language/framework support, enhanced safety guardrails, TUI interface, and extensibility for IDE/editor integration.
|
||||
|
||||
## Scope
|
||||
- Multi-language test runner support (pytest, go test, etc.)
|
||||
- Enhanced safety: diff preview, confirmation gates, minimal-change prompts
|
||||
- Optional TUI panel with tmux integration
|
||||
- State-based extension API for IDE integration
|
||||
- Parallel subtask execution (experimental)
|
||||
|
||||
## Deliverables
|
||||
|
||||
### 1. Multi-Language Test Runner Support
|
||||
|
||||
**Extend TestRunnerAdapter:**
|
||||
```typescript
|
||||
class TestRunnerAdapter {
|
||||
// Existing methods...
|
||||
|
||||
async detectLanguage(): Promise<Language>
|
||||
async detectFramework(language: Language): Promise<Framework>
|
||||
async getFrameworkAdapter(framework: Framework): Promise<FrameworkAdapter>
|
||||
}
|
||||
|
||||
enum Language {
|
||||
JavaScript = 'javascript',
|
||||
TypeScript = 'typescript',
|
||||
Python = 'python',
|
||||
Go = 'go',
|
||||
Rust = 'rust'
|
||||
}
|
||||
|
||||
enum Framework {
|
||||
Vitest = 'vitest',
|
||||
Jest = 'jest',
|
||||
Pytest = 'pytest',
|
||||
GoTest = 'gotest',
|
||||
CargoTest = 'cargotest'
|
||||
}
|
||||
|
||||
interface FrameworkAdapter {
|
||||
runTargeted(pattern: string): Promise<TestResults>
|
||||
runAll(): Promise<TestResults>
|
||||
parseCoverage(output: string): Promise<CoverageReport>
|
||||
getTestFilePattern(): string
|
||||
getTestFileExtension(): string
|
||||
}
|
||||
```
|
||||
|
||||
**Framework-specific adapters:**
|
||||
|
||||
**PytestAdapter** (`packages/tm-core/src/services/test-adapters/pytest-adapter.ts`):
|
||||
```typescript
|
||||
class PytestAdapter implements FrameworkAdapter {
|
||||
async runTargeted(pattern: string): Promise<TestResults> {
|
||||
const output = await exec(`pytest ${pattern} --json-report`)
|
||||
return this.parseResults(output)
|
||||
}
|
||||
|
||||
async runAll(): Promise<TestResults> {
|
||||
const output = await exec('pytest --cov --json-report')
|
||||
return this.parseResults(output)
|
||||
}
|
||||
|
||||
parseCoverage(output: string): Promise<CoverageReport> {
|
||||
// Parse pytest-cov XML output
|
||||
}
|
||||
|
||||
getTestFilePattern(): string {
|
||||
return '**/test_*.py'
|
||||
}
|
||||
|
||||
getTestFileExtension(): string {
|
||||
return '.py'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**GoTestAdapter** (`packages/tm-core/src/services/test-adapters/gotest-adapter.ts`):
|
||||
```typescript
|
||||
class GoTestAdapter implements FrameworkAdapter {
|
||||
async runTargeted(pattern: string): Promise<TestResults> {
|
||||
const output = await exec(`go test ${pattern} -json`)
|
||||
return this.parseResults(output)
|
||||
}
|
||||
|
||||
async runAll(): Promise<TestResults> {
|
||||
const output = await exec('go test ./... -coverprofile=coverage.out -json')
|
||||
return this.parseResults(output)
|
||||
}
|
||||
|
||||
parseCoverage(output: string): Promise<CoverageReport> {
|
||||
// Parse go test coverage output
|
||||
}
|
||||
|
||||
getTestFilePattern(): string {
|
||||
return '**/*_test.go'
|
||||
}
|
||||
|
||||
getTestFileExtension(): string {
|
||||
return '_test.go'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Detection Logic:**
|
||||
```typescript
|
||||
async function detectFramework(): Promise<Framework> {
|
||||
// Check for package.json
|
||||
if (await exists('package.json')) {
|
||||
const pkg = await readJSON('package.json')
|
||||
if (pkg.devDependencies?.vitest) return Framework.Vitest
|
||||
if (pkg.devDependencies?.jest) return Framework.Jest
|
||||
}
|
||||
|
||||
// Check for Python files
|
||||
if (await exists('pytest.ini') || await exists('setup.py')) {
|
||||
return Framework.Pytest
|
||||
}
|
||||
|
||||
// Check for Go files
|
||||
if (await exists('go.mod')) {
|
||||
return Framework.GoTest
|
||||
}
|
||||
|
||||
// Check for Rust files
|
||||
if (await exists('Cargo.toml')) {
|
||||
return Framework.CargoTest
|
||||
}
|
||||
|
||||
throw new Error('Could not detect test framework')
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Enhanced Safety Guardrails
|
||||
|
||||
**Diff Preview Mode:**
|
||||
```bash
|
||||
$ tm autopilot 42 --preview-diffs
|
||||
|
||||
[2/3] Subtask 42.2: Add collection endpoint
|
||||
|
||||
RED ✓ Tests created: src/api/__tests__/metrics.test.js
|
||||
|
||||
GREEN Implementing code...
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
Proposed changes (src/api/metrics.js):
|
||||
|
||||
+ import { MetricsSchema } from '../models/schema.js'
|
||||
+
|
||||
+ export async function createMetric(data) {
|
||||
+ const validated = MetricsSchema.parse(data)
|
||||
+ const result = await db.metrics.create(validated)
|
||||
+ return result
|
||||
+ }
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
Apply these changes? [Y/n/e(dit)/s(kip)]
|
||||
Y - Apply and continue
|
||||
n - Reject and retry GREEN phase
|
||||
e - Open in editor for manual changes
|
||||
s - Skip this subtask
|
||||
```
|
||||
|
||||
**Minimal Change Enforcement:**
|
||||
|
||||
Add to system prompt:
|
||||
```markdown
|
||||
CRITICAL: Make MINIMAL changes to pass the failing tests.
|
||||
- Only modify files directly related to the subtask
|
||||
- Do not refactor existing code unless absolutely necessary
|
||||
- Do not add features beyond the acceptance criteria
|
||||
- Keep changes under 50 lines per file when possible
|
||||
- Prefer composition over modification
|
||||
```
|
||||
|
||||
**Change Size Warnings:**
|
||||
```bash
|
||||
⚠️ Large change detected:
|
||||
Files modified: 5
|
||||
Lines changed: +234, -12
|
||||
|
||||
This subtask was expected to be small (~50 lines).
|
||||
Consider:
|
||||
- Breaking into smaller subtasks
|
||||
- Reviewing acceptance criteria
|
||||
- Checking for unintended changes
|
||||
|
||||
Continue anyway? [y/N]
|
||||
```
|
||||
|
||||
### 3. TUI Interface with tmux
|
||||
|
||||
**Layout:**
|
||||
```
|
||||
┌──────────────────────────────────┬─────────────────────────────────┐
|
||||
│ Task Navigator (left) │ Executor Terminal (right) │
|
||||
│ │ │
|
||||
│ Project: my-app │ $ tm autopilot --executor-mode │
|
||||
│ Branch: analytics/task-42 │ > Running subtask 42.2 GREEN... │
|
||||
│ Tag: analytics │ > Implementing endpoint... │
|
||||
│ │ > Tests: 3 passed, 0 failed │
|
||||
│ Tasks: │ > Ready to commit │
|
||||
│ → 42 [in-progress] User metrics │ │
|
||||
│ → 42.1 [done] Schema │ [Live output from executor] │
|
||||
│ → 42.2 [active] Endpoint ◀ │ │
|
||||
│ → 42.3 [pending] Dashboard │ │
|
||||
│ │ │
|
||||
│ [s] start [p] pause [q] quit │ │
|
||||
└──────────────────────────────────┴─────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Implementation:**
|
||||
|
||||
**TUI Navigator** (`apps/cli/src/ui/tui/navigator.ts`):
|
||||
```typescript
|
||||
import blessed from 'blessed'
|
||||
|
||||
class AutopilotTUI {
|
||||
private screen: blessed.Widgets.Screen
|
||||
private taskList: blessed.Widgets.ListElement
|
||||
private statusBox: blessed.Widgets.BoxElement
|
||||
private executorPane: string // tmux pane ID
|
||||
|
||||
async start(taskId?: string) {
|
||||
// Create blessed screen
|
||||
this.screen = blessed.screen()
|
||||
|
||||
// Create task list widget
|
||||
this.taskList = blessed.list({
|
||||
label: 'Tasks',
|
||||
keys: true,
|
||||
vi: true,
|
||||
style: { selected: { bg: 'blue' } }
|
||||
})
|
||||
|
||||
// Spawn tmux pane for executor
|
||||
this.executorPane = await this.spawnExecutorPane()
|
||||
|
||||
// Watch state file for updates
|
||||
this.watchStateFile()
|
||||
|
||||
// Handle keybindings
|
||||
this.setupKeybindings()
|
||||
}
|
||||
|
||||
private async spawnExecutorPane(): Promise<string> {
|
||||
const paneId = await exec('tmux split-window -h -P -F "#{pane_id}"')
|
||||
await exec(`tmux send-keys -t ${paneId} "tm autopilot --executor-mode" Enter`)
|
||||
return paneId.trim()
|
||||
}
|
||||
|
||||
private watchStateFile() {
|
||||
watch('.taskmaster/state/current-run.json', (event, filename) => {
|
||||
this.updateDisplay()
|
||||
})
|
||||
}
|
||||
|
||||
private setupKeybindings() {
|
||||
this.screen.key(['s'], () => this.startTask())
|
||||
this.screen.key(['p'], () => this.pauseTask())
|
||||
this.screen.key(['q'], () => this.quit())
|
||||
this.screen.key(['up', 'down'], () => this.navigateTasks())
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Executor Mode:**
|
||||
```bash
|
||||
$ tm autopilot 42 --executor-mode
|
||||
|
||||
# Runs in executor pane, writes state to shared file
|
||||
# Left pane reads state file and updates display
|
||||
```
|
||||
|
||||
**State File** (`.taskmaster/state/current-run.json`):
|
||||
```json
|
||||
{
|
||||
"runId": "2025-01-15-142033",
|
||||
"taskId": "42",
|
||||
"status": "running",
|
||||
"currentPhase": "green",
|
||||
"currentSubtask": "42.2",
|
||||
"lastOutput": "Implementing endpoint...",
|
||||
"testsStatus": {
|
||||
"passed": 3,
|
||||
"failed": 0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Extension API for IDE Integration
|
||||
|
||||
**State-based API:**
|
||||
|
||||
Expose run state via JSON files that IDEs can read:
|
||||
- `.taskmaster/state/current-run.json` - live run state
|
||||
- `.taskmaster/reports/runs/<runId>/manifest.json` - run metadata
|
||||
- `.taskmaster/reports/runs/<runId>/log.jsonl` - event stream
|
||||
|
||||
**WebSocket API (optional):**
|
||||
```typescript
|
||||
// packages/tm-core/src/services/autopilot-server.ts
|
||||
class AutopilotServer {
|
||||
private wss: WebSocketServer
|
||||
|
||||
start(port: number = 7890) {
|
||||
this.wss = new WebSocketServer({ port })
|
||||
|
||||
this.wss.on('connection', (ws) => {
|
||||
// Send current state
|
||||
ws.send(JSON.stringify(this.getCurrentState()))
|
||||
|
||||
// Stream events
|
||||
this.orchestrator.on('*', (event) => {
|
||||
ws.send(JSON.stringify(event))
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Usage from IDE extension:**
|
||||
```typescript
|
||||
// VS Code extension example
|
||||
const ws = new WebSocket('ws://localhost:7890')
|
||||
|
||||
ws.on('message', (data) => {
|
||||
const event = JSON.parse(data)
|
||||
|
||||
if (event.type === 'subtask:complete') {
|
||||
vscode.window.showInformationMessage(
|
||||
`Subtask ${event.subtaskId} completed`
|
||||
)
|
||||
}
|
||||
})
|
||||
```
|
||||
|
||||
### 5. Parallel Subtask Execution (Experimental)
|
||||
|
||||
**Dependency Analysis:**
|
||||
```typescript
|
||||
class SubtaskScheduler {
|
||||
async buildDependencyGraph(subtasks: Subtask[]): Promise<DAG> {
|
||||
const graph = new DAG()
|
||||
|
||||
for (const subtask of subtasks) {
|
||||
graph.addNode(subtask.id)
|
||||
|
||||
for (const depId of subtask.dependencies) {
|
||||
graph.addEdge(depId, subtask.id)
|
||||
}
|
||||
}
|
||||
|
||||
return graph
|
||||
}
|
||||
|
||||
async getParallelBatches(graph: DAG): Promise<Subtask[][]> {
|
||||
const batches: Subtask[][] = []
|
||||
const completed = new Set<string>()
|
||||
|
||||
while (completed.size < graph.size()) {
|
||||
const ready = graph.nodes.filter(node =>
|
||||
!completed.has(node.id) &&
|
||||
node.dependencies.every(dep => completed.has(dep))
|
||||
)
|
||||
|
||||
batches.push(ready)
|
||||
ready.forEach(node => completed.add(node.id))
|
||||
}
|
||||
|
||||
return batches
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Parallel Execution:**
|
||||
```bash
|
||||
$ tm autopilot 42 --parallel
|
||||
|
||||
[Batch 1] Running 2 subtasks in parallel:
|
||||
→ 42.1: Add metrics schema
|
||||
→ 42.4: Add API documentation
|
||||
|
||||
42.1 RED ✓ Tests created
|
||||
42.4 RED ✓ Tests created
|
||||
|
||||
42.1 GREEN ✓ Implementation complete
|
||||
42.4 GREEN ✓ Implementation complete
|
||||
|
||||
42.1 COMMIT ✓ Committed: a1b2c3d
|
||||
42.4 COMMIT ✓ Committed: e5f6g7h
|
||||
|
||||
[Batch 2] Running 2 subtasks in parallel (depend on 42.1):
|
||||
→ 42.2: Add collection endpoint
|
||||
→ 42.3: Add dashboard widget
|
||||
...
|
||||
```
|
||||
|
||||
**Conflict Detection:**
|
||||
```typescript
|
||||
async function detectConflicts(subtasks: Subtask[]): Promise<Conflict[]> {
|
||||
const conflicts: Conflict[] = []
|
||||
|
||||
for (let i = 0; i < subtasks.length; i++) {
|
||||
for (let j = i + 1; j < subtasks.length; j++) {
|
||||
const filesA = await predictAffectedFiles(subtasks[i])
|
||||
const filesB = await predictAffectedFiles(subtasks[j])
|
||||
|
||||
const overlap = filesA.filter(f => filesB.includes(f))
|
||||
|
||||
if (overlap.length > 0) {
|
||||
conflicts.push({
|
||||
subtasks: [subtasks[i].id, subtasks[j].id],
|
||||
files: overlap
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return conflicts
|
||||
}
|
||||
```
|
||||
|
||||
### 6. Advanced Configuration
|
||||
|
||||
**Add to `.taskmaster/config.json`:**
|
||||
```json
|
||||
{
|
||||
"autopilot": {
|
||||
"safety": {
|
||||
"previewDiffs": false,
|
||||
"maxChangeLinesPerFile": 100,
|
||||
"warnOnLargeChanges": true,
|
||||
"requireConfirmOnLargeChanges": true
|
||||
},
|
||||
"parallel": {
|
||||
"enabled": false,
|
||||
"maxConcurrent": 3,
|
||||
"detectConflicts": true
|
||||
},
|
||||
"tui": {
|
||||
"enabled": false,
|
||||
"tmuxSession": "taskmaster-autopilot"
|
||||
},
|
||||
"api": {
|
||||
"enabled": false,
|
||||
"port": 7890,
|
||||
"allowRemote": false
|
||||
}
|
||||
},
|
||||
"test": {
|
||||
"frameworks": {
|
||||
"python": {
|
||||
"runner": "pytest",
|
||||
"coverageCommand": "pytest --cov",
|
||||
"testPattern": "**/test_*.py"
|
||||
},
|
||||
"go": {
|
||||
"runner": "go test",
|
||||
"coverageCommand": "go test ./... -coverprofile=coverage.out",
|
||||
"testPattern": "**/*_test.go"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## CLI Updates
|
||||
|
||||
**New commands:**
|
||||
```bash
|
||||
tm autopilot <taskId> --tui # Launch TUI interface
|
||||
tm autopilot <taskId> --parallel # Enable parallel execution
|
||||
tm autopilot <taskId> --preview-diffs # Show diffs before applying
|
||||
tm autopilot <taskId> --executor-mode # Run as executor pane
|
||||
tm autopilot-server start # Start WebSocket API
|
||||
```
|
||||
|
||||
## Success Criteria
|
||||
- Supports Python projects with pytest
|
||||
- Supports Go projects with go test
|
||||
- Diff preview prevents unwanted changes
|
||||
- TUI provides better visibility for long-running tasks
|
||||
- IDE extensions can integrate via state files or WebSocket
|
||||
- Parallel execution reduces total time for independent subtasks
|
||||
|
||||
## Out of Scope
|
||||
- Full Electron/web GUI
|
||||
- AI executor selection UI (defer to Phase 4)
|
||||
- Multi-repository support
|
||||
- Remote execution on cloud runners
|
||||
|
||||
## Testing Strategy
|
||||
- Test with Python project (pytest)
|
||||
- Test with Go project (go test)
|
||||
- Test diff preview UI with mock changes
|
||||
- Test parallel execution with independent subtasks
|
||||
- Test conflict detection with overlapping file changes
|
||||
- Test TUI with mock tmux environment
|
||||
|
||||
## Dependencies
|
||||
- Phase 2 completed (PR + resumability)
|
||||
- tmux installed (for TUI)
|
||||
- blessed or ink library (for TUI rendering)
|
||||
|
||||
## Estimated Effort
|
||||
3-4 weeks
|
||||
|
||||
## Risks & Mitigations
|
||||
- **Risk:** Parallel execution causes git conflicts
|
||||
- **Mitigation:** Conservative conflict detection, sequential fallback
|
||||
|
||||
- **Risk:** TUI adds complexity and maintenance burden
|
||||
- **Mitigation:** Keep TUI optional, state-based design allows alternatives
|
||||
|
||||
- **Risk:** Framework adapters hard to maintain across versions
|
||||
- **Mitigation:** Abstract common parsing logic, document adapter interface
|
||||
|
||||
- **Risk:** Diff preview slows down workflow
|
||||
- **Mitigation:** Make optional, use --preview-diffs flag only when needed
|
||||
|
||||
## Validation
|
||||
Test with:
|
||||
- Python project with pytest and pytest-cov
|
||||
- Go project with go test
|
||||
- Large changes requiring confirmation
|
||||
- Parallel execution with 3+ independent subtasks
|
||||
- TUI with task selection and live status updates
|
||||
- VS Code extension reading state files
|
||||
@@ -0,0 +1,197 @@
|
||||
{
|
||||
"meta": {
|
||||
"generatedAt": "2025-10-07T09:46:06.248Z",
|
||||
"tasksAnalyzed": 23,
|
||||
"totalTasks": 23,
|
||||
"analysisCount": 23,
|
||||
"thresholdScore": 5,
|
||||
"projectName": "Taskmaster",
|
||||
"usedResearch": false
|
||||
},
|
||||
"complexityAnalysis": [
|
||||
{
|
||||
"taskId": 31,
|
||||
"taskTitle": "Create WorkflowOrchestrator service foundation",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Break down the WorkflowOrchestrator foundation into its core architectural components: phase management system, event emitter infrastructure, state management interfaces, service integration, and lifecycle control methods. Each subtask should focus on a specific architectural concern with clear interfaces and testable units.",
|
||||
"reasoning": "This is a foundational service requiring state machine implementation, event-driven architecture, and integration with existing services. The complexity is high due to the need for robust phase management, error handling, and service orchestration patterns."
|
||||
},
|
||||
{
|
||||
"taskId": 32,
|
||||
"taskTitle": "Implement GitAdapter for repository operations",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Decompose the GitAdapter implementation into: TypeScript wrapper creation around existing git-utils.js, core git operation methods with comprehensive error handling, branch naming pattern system with token replacement, and confirmation gates for destructive operations. Focus on type safety and existing code integration.",
|
||||
"reasoning": "Moderate-high complexity due to TypeScript integration over existing JavaScript utilities, branch pattern implementation, and safety mechanisms. The existing git-utils.js provides a solid foundation, reducing complexity."
|
||||
},
|
||||
{
|
||||
"taskId": 33,
|
||||
"taskTitle": "Create TestRunnerAdapter for framework detection and execution",
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Break down TestRunnerAdapter into framework detection logic, test execution engine with process management, Jest-specific result parsing, Vitest-specific result parsing, unified result interfaces, and final integration. Each framework parser should be separate to handle their unique output formats.",
|
||||
"reasoning": "High complexity due to multiple framework support (Jest, Vitest), child process management, result parsing from different formats, coverage reporting, and timeout handling. Each framework has unique output formats requiring specialized parsers."
|
||||
},
|
||||
{
|
||||
"taskId": 34,
|
||||
"taskTitle": "Implement autopilot CLI command structure",
|
||||
"complexityScore": 5,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Structure the autopilot command into: basic command setup with Commander.js integration, comprehensive flag handling and validation system, preflight check validation with environment validation, and WorkflowOrchestrator integration with dry-run execution planning. Follow existing CLI patterns from the codebase.",
|
||||
"reasoning": "Moderate complexity involving CLI structure, flag handling, and integration with WorkflowOrchestrator. The existing CLI patterns and Commander.js usage in the codebase provide good guidance, reducing implementation complexity."
|
||||
},
|
||||
{
|
||||
"taskId": 35,
|
||||
"taskTitle": "Integrate surgical test generator with WorkflowOrchestrator",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Decompose the test generation integration into: TaskExecutionService enhancement for test generation mode, TestGenerationService creation using executor framework, prompt composition system for rule integration, and framework-specific test pattern support. Leverage existing executor patterns from the codebase.",
|
||||
"reasoning": "Moderate-high complexity due to integration with existing services, prompt composition system, and framework-specific test generation. The existing executor framework and TaskExecutionService provide good integration points."
|
||||
},
|
||||
{
|
||||
"taskId": 36,
|
||||
"taskTitle": "Implement subtask TDD loop execution",
|
||||
"complexityScore": 9,
|
||||
"recommendedSubtasks": 7,
|
||||
"expansionPrompt": "Break down the TDD loop into: SubtaskExecutor class architecture, RED phase test generation, GREEN phase code generation, COMMIT phase with conventional commits, retry mechanism for GREEN phase, timeout and backoff policies, and TaskService integration. Each phase should be independently testable.",
|
||||
"reasoning": "Very high complexity due to implementing the complete TDD red-green-commit cycle with AI integration, retry logic, timeout handling, and git operations. This is the core autonomous workflow requiring robust error handling and state management."
|
||||
},
|
||||
{
|
||||
"taskId": 37,
|
||||
"taskTitle": "Add configuration schema for autopilot settings",
|
||||
"complexityScore": 4,
|
||||
"recommendedSubtasks": 3,
|
||||
"expansionPrompt": "Expand configuration support into: extending configuration interfaces with autopilot settings, updating ConfigManager validation logic, and implementing default configuration values. Build on existing configuration patterns and maintain backward compatibility.",
|
||||
"reasoning": "Low-moderate complexity involving schema extension and validation logic. The existing configuration system provides clear patterns to follow, making this primarily an extension task rather than new architecture."
|
||||
},
|
||||
{
|
||||
"taskId": 38,
|
||||
"taskTitle": "Implement run state persistence and logging",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Structure run state management into: RunStateManager service class creation, run directory structure and manifest creation, JSONL event logging system, test result and commit tracking storage, and state checkpointing with resume functionality. Focus on data integrity and structured logging.",
|
||||
"reasoning": "Moderate-high complexity due to file system operations, structured logging, state serialization, and resume functionality. Requires careful design of data formats and error handling for persistence operations."
|
||||
},
|
||||
{
|
||||
"taskId": 39,
|
||||
"taskTitle": "Add GitHub PR creation with run reports",
|
||||
"complexityScore": 5,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Decompose PR creation into: PRAdapter service foundation with interfaces, GitHub CLI integration and command execution, PR body generation from run data and test results, and custom PR template system with configuration support. Leverage existing git-utils.js patterns for CLI integration.",
|
||||
"reasoning": "Moderate complexity involving GitHub CLI integration, report generation, and template systems. The existing git-utils.js provides patterns for CLI tool integration, reducing implementation complexity."
|
||||
},
|
||||
{
|
||||
"taskId": 40,
|
||||
"taskTitle": "Implement task dependency resolution for subtask ordering",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Break down dependency resolution into: dependency resolution algorithm with cycle detection, topological sorting for subtask ordering, task eligibility checking system, and TaskService integration. Implement graph algorithms for dependency management with proper error handling.",
|
||||
"reasoning": "Moderate-high complexity due to graph algorithm implementation, cycle detection, and integration with existing task management. Requires careful design of dependency resolution logic and edge case handling."
|
||||
},
|
||||
{
|
||||
"taskId": 41,
|
||||
"taskTitle": "Create resume functionality for interrupted runs",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Structure resume functionality into: checkpoint creation in RunStateManager, state restoration logic with validation, state validation for safe resume operations, CLI flag implementation for resume command, and partial phase resume functionality. Focus on data integrity and workflow consistency.",
|
||||
"reasoning": "High complexity due to state serialization/deserialization, workflow restoration, validation logic, and CLI integration. Requires robust error handling and state consistency checks for reliable resume operations."
|
||||
},
|
||||
{
|
||||
"taskId": 42,
|
||||
"taskTitle": "Add coverage threshold enforcement",
|
||||
"complexityScore": 5,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Decompose coverage enforcement into: coverage report parsing from Jest/Vitest, configurable threshold validation logic, coverage gates integration in workflow phases, and detailed coverage failure reporting system. Build on existing TestRunnerAdapter patterns.",
|
||||
"reasoning": "Moderate complexity involving coverage report parsing, validation logic, and workflow integration. The existing TestRunnerAdapter provides good foundation for extending coverage capabilities."
|
||||
},
|
||||
{
|
||||
"taskId": 43,
|
||||
"taskTitle": "Implement tmux-based TUI navigator",
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Break down TUI implementation into: framework selection and basic structure setup, left pane interface layout with status indicators, tmux integration and terminal coordination, navigation system with keybindings, real-time status updates system, and comprehensive event handling with UX polish. Each component should be independently testable.",
|
||||
"reasoning": "High complexity due to terminal UI framework integration, tmux session management, real-time updates, keyboard event handling, and terminal interface design. Requires expertise in terminal UI libraries and tmux integration."
|
||||
},
|
||||
{
|
||||
"taskId": 44,
|
||||
"taskTitle": "Add prompt composition system for context-aware test generation",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Structure prompt composition into: PromptComposer service foundation, template processing engine with token replacement, rule loading system with precedence handling, and context injection with phase-specific prompt generation. Focus on flexible template system and rule management.",
|
||||
"reasoning": "Moderate-high complexity due to template processing, rule precedence systems, and context injection logic. Requires careful design of template syntax and rule loading mechanisms."
|
||||
},
|
||||
{
|
||||
"taskId": 45,
|
||||
"taskTitle": "Implement tag-branch mapping and automatic tag switching",
|
||||
"complexityScore": 5,
|
||||
"recommendedSubtasks": 3,
|
||||
"expansionPrompt": "Decompose tag-branch mapping into: GitAdapter enhancement with branch-to-tag extraction logic, automatic tag switching workflow integration, and branch-to-tag mapping persistence with validation. Build on existing git-utils.js and tag management functionality.",
|
||||
"reasoning": "Moderate complexity involving pattern matching, tag management integration, and workflow automation. The existing git-utils.js and tag management systems provide good foundation for implementation."
|
||||
},
|
||||
{
|
||||
"taskId": 46,
|
||||
"taskTitle": "Add comprehensive error handling and recovery",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Structure error handling into: error classification system with specific error types, recovery suggestion engine with actionable recommendations, error context management and preservation, force flag implementation with selective bypass, and logging/reporting system integration. Focus on actionable error messages and automated recovery where possible.",
|
||||
"reasoning": "High complexity due to comprehensive error taxonomy, recovery automation, context preservation, and integration across all workflow components. Requires deep understanding of failure modes and recovery strategies."
|
||||
},
|
||||
{
|
||||
"taskId": 47,
|
||||
"taskTitle": "Implement conventional commit message generation",
|
||||
"complexityScore": 4,
|
||||
"recommendedSubtasks": 3,
|
||||
"expansionPrompt": "Break down commit message generation into: template system creation with variable substitution, commit type auto-detection based on task content and file changes, and validation with GitAdapter integration. Follow conventional commit standards and integrate with existing git operations.",
|
||||
"reasoning": "Low-moderate complexity involving template processing, pattern matching for commit type detection, and validation logic. Well-defined conventional commit standards provide clear implementation guidance."
|
||||
},
|
||||
{
|
||||
"taskId": 48,
|
||||
"taskTitle": "Add multi-framework test execution support",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Expand test framework support into: framework detection system for multiple languages, common adapter interface design, Python pytest adapter implementation, Go and Rust adapter implementations, and integration with existing TestRunnerAdapter. Each language adapter should follow the unified interface pattern.",
|
||||
"reasoning": "High complexity due to multi-language support, framework detection across different ecosystems, and adapter pattern implementation. Each language has unique testing conventions and output formats."
|
||||
},
|
||||
{
|
||||
"taskId": 49,
|
||||
"taskTitle": "Implement workflow event streaming for real-time monitoring",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Structure event streaming into: WorkflowOrchestrator EventEmitter enhancement, structured event format with metadata, event persistence to run logs, and optional WebSocket streaming for external monitoring. Focus on event consistency and real-time delivery.",
|
||||
"reasoning": "Moderate-high complexity due to event-driven architecture, structured event formats, persistence integration, and WebSocket implementation. Requires careful design of event schemas and delivery mechanisms."
|
||||
},
|
||||
{
|
||||
"taskId": 50,
|
||||
"taskTitle": "Add intelligent test targeting for faster feedback",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Decompose test targeting into: file change detection system, test dependency analysis engine, framework-specific targeting adapters, test impact calculation algorithm, and fallback integration with TestRunnerAdapter. Focus on accuracy and performance optimization.",
|
||||
"reasoning": "High complexity due to dependency analysis, impact calculation algorithms, framework-specific targeting, and integration with existing test execution. Requires sophisticated analysis of code relationships and test dependencies."
|
||||
},
|
||||
{
|
||||
"taskId": 51,
|
||||
"taskTitle": "Implement dry-run visualization with execution timeline",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Structure dry-run visualization into: timeline calculation engine with duration estimates, estimation algorithms based on task complexity, ASCII art progress visualization with formatting, and resource validation with preflight checks. Focus on accurate planning and clear visual presentation.",
|
||||
"reasoning": "Moderate-high complexity due to timeline calculation, estimation algorithms, ASCII visualization, and resource validation. Requires understanding of workflow timing and visual formatting for terminal output."
|
||||
},
|
||||
{
|
||||
"taskId": 52,
|
||||
"taskTitle": "Add autopilot workflow integration tests",
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Structure integration testing into: isolated test environment infrastructure, mock integrations and service stubs, end-to-end workflow test scenarios, performance benchmarking and resource monitoring, test isolation and parallelization strategies, and comprehensive result validation and reporting. Focus on realistic test scenarios and reliable automation.",
|
||||
"reasoning": "High complexity due to end-to-end testing requirements, mock service integration, performance testing, isolation mechanisms, and comprehensive validation. Requires sophisticated test infrastructure and scenario design."
|
||||
},
|
||||
{
|
||||
"taskId": 53,
|
||||
"taskTitle": "Finalize autopilot documentation and examples",
|
||||
"complexityScore": 3,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Structure documentation into: comprehensive autopilot documentation covering setup and usage, example PRD files and templates for different project types, troubleshooting guide for common issues and solutions, and demo materials with workflow visualization. Focus on clarity and practical examples.",
|
||||
"reasoning": "Low complexity involving documentation writing, example creation, and demo material production. The main challenge is ensuring accuracy and completeness rather than technical implementation."
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,93 @@
|
||||
{
|
||||
"meta": {
|
||||
"generatedAt": "2025-10-07T14:16:40.283Z",
|
||||
"tasksAnalyzed": 10,
|
||||
"totalTasks": 10,
|
||||
"analysisCount": 10,
|
||||
"thresholdScore": 5,
|
||||
"projectName": "Taskmaster",
|
||||
"usedResearch": false
|
||||
},
|
||||
"complexityAnalysis": [
|
||||
{
|
||||
"taskId": 1,
|
||||
"taskTitle": "Create autopilot command CLI skeleton",
|
||||
"complexityScore": 4,
|
||||
"recommendedSubtasks": 3,
|
||||
"expansionPrompt": "Break down the autopilot command creation into: 1) Create AutopilotCommand class extending Commander.Command with proper argument parsing and options, 2) Implement command structure with help text and validation following existing patterns, 3) Add basic registration method and placeholder action handler",
|
||||
"reasoning": "Medium complexity due to following established patterns in the codebase. The command-registry.ts and start.command.ts provide clear templates for implementation. Main complexity is argument parsing and option validation."
|
||||
},
|
||||
{
|
||||
"taskId": 2,
|
||||
"taskTitle": "Implement preflight detection system",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Create PreflightChecker with these subtasks: 1) Package.json test script detection and validation, 2) Git working tree status checking using system commands, 3) Tool availability validation (git, gh, node/npm), 4) Default branch detection via git commands, 5) Structured result reporting with success/failure indicators and error messages",
|
||||
"reasoning": "High complexity due to system integration requirements. Needs to interact with multiple external tools (git, npm, gh), parse various file formats, and handle different system configurations. Error handling for missing tools adds complexity."
|
||||
},
|
||||
{
|
||||
"taskId": 3,
|
||||
"taskTitle": "Implement task loading and validation",
|
||||
"complexityScore": 5,
|
||||
"recommendedSubtasks": 3,
|
||||
"expansionPrompt": "Implement task loading: 1) Use existing TaskService from @tm/core to load tasks by ID with proper error handling, 2) Validate task structure including subtask existence and dependency validation, 3) Provide user-friendly error messages for missing tasks or need to expand subtasks first",
|
||||
"reasoning": "Medium-high complexity. While leveraging existing TaskService reduces implementation effort, the validation logic for subtasks and dependencies requires careful handling of edge cases. Task structure validation adds complexity."
|
||||
},
|
||||
{
|
||||
"taskId": 4,
|
||||
"taskTitle": "Create execution plan display logic",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Build ExecutionPlanDisplay: 1) Create display formatter using boxen and chalk for consistent CLI styling, 2) Format preflight check results with color-coded status indicators, 3) Display subtask execution order with RED/GREEN/COMMIT phase visualization, 4) Show branch/tag info and finalization steps with duration estimates",
|
||||
"reasoning": "Moderate-high complexity due to complex formatting requirements and dependency on multiple other components. The display needs to coordinate information from preflight, task validation, and execution planning. CLI styling consistency adds complexity."
|
||||
},
|
||||
{
|
||||
"taskId": 5,
|
||||
"taskTitle": "Implement branch and tag planning",
|
||||
"complexityScore": 3,
|
||||
"recommendedSubtasks": 2,
|
||||
"expansionPrompt": "Create BranchPlanner: 1) Implement branch name generation using pattern <tag>/task-<id>-<slug> with kebab-case conversion and special character handling, 2) Add TaskMaster config integration to determine active tag and handle existing branch conflicts",
|
||||
"reasoning": "Low-medium complexity. String manipulation and naming convention implementation is straightforward. The main complexity is handling edge cases with special characters and existing branch conflicts."
|
||||
},
|
||||
{
|
||||
"taskId": 6,
|
||||
"taskTitle": "Create subtask execution order calculation",
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Implement dependency resolution: 1) Build dependency graph from subtask data with proper parsing, 2) Implement topological sort algorithm for execution order, 3) Add circular dependency detection with clear error reporting, 4) Create parallel execution grouping for independent subtasks",
|
||||
"reasoning": "High complexity due to graph algorithms and dependency resolution. Topological sorting, circular dependency detection, and parallel grouping require algorithmic sophistication. Edge cases in dependency chains add significant complexity."
|
||||
},
|
||||
{
|
||||
"taskId": 7,
|
||||
"taskTitle": "Implement TDD phase planning for subtasks",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Create TDDPhasePlanner: 1) Implement test file path detection for common project structures (src/, tests/, __tests__), 2) Parse implementation files from subtask details and descriptions, 3) Generate conventional commit messages for RED/GREEN/COMMIT phases, 4) Add implementation complexity estimation based on subtask content",
|
||||
"reasoning": "Moderate-high complexity due to project structure detection and file path inference. Conventional commit message generation and complexity estimation require understanding of different project layouts and parsing subtask content effectively."
|
||||
},
|
||||
{
|
||||
"taskId": 8,
|
||||
"taskTitle": "Add finalization steps planning",
|
||||
"complexityScore": 4,
|
||||
"recommendedSubtasks": 3,
|
||||
"expansionPrompt": "Create FinalizationPlanner: 1) Implement test suite execution planning with coverage threshold detection from package.json, 2) Add git operations planning (branch push, PR creation) using existing git patterns, 3) Create duration estimation algorithm based on subtask count and complexity metrics",
|
||||
"reasoning": "Medium complexity. Building on existing git utilities and test command detection reduces complexity. Main challenges are coverage threshold parsing and duration estimation algorithms."
|
||||
},
|
||||
{
|
||||
"taskId": 9,
|
||||
"taskTitle": "Integrate command with existing CLI infrastructure",
|
||||
"complexityScore": 3,
|
||||
"recommendedSubtasks": 2,
|
||||
"expansionPrompt": "Complete CLI integration: 1) Add AutopilotCommand to command-registry.ts following existing patterns and update command metadata, 2) Test command registration and help system integration with proper cleanup and error handling",
|
||||
"reasoning": "Low-medium complexity. The command-registry.ts provides a clear pattern to follow. Main work is registration and ensuring proper integration with existing CLI infrastructure. Well-established patterns reduce complexity."
|
||||
},
|
||||
{
|
||||
"taskId": 10,
|
||||
"taskTitle": "Add comprehensive error handling and edge cases",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Implement error handling: 1) Add missing task and invalid task structure error handling with helpful messages, 2) Handle git state errors (dirty working tree, missing tools), 3) Add dependency validation errors (circular, invalid references), 4) Implement missing tool detection with installation guidance, 5) Create user-friendly error messages following existing CLI patterns",
|
||||
"reasoning": "High complexity due to comprehensive error scenarios. Each component (preflight, task loading, dependency resolution) has multiple failure modes that need proper handling. Providing helpful error messages and recovery suggestions adds complexity."
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"currentTag": "master",
|
||||
"lastSwitched": "2025-09-12T22:25:27.535Z",
|
||||
"lastSwitched": "2025-10-07T17:17:58.049Z",
|
||||
"branchTagMapping": {
|
||||
"v017-adds": "v017-adds",
|
||||
"next": "next"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
104
CHANGELOG.md
104
CHANGELOG.md
@@ -1,5 +1,109 @@
|
||||
# task-master-ai
|
||||
|
||||
## 0.28.0-rc.2
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- [#1273](https://github.com/eyaltoledano/claude-task-master/pull/1273) [`b43b7ce`](https://github.com/eyaltoledano/claude-task-master/commit/b43b7ce201625eee956fb2f8cd332f238bb78c21) Thanks [@ben-vargas](https://github.com/ben-vargas)! - Add Codex CLI provider with OAuth authentication
|
||||
- Added codex-cli provider for GPT-5 and GPT-5-Codex models (272K input / 128K output)
|
||||
- OAuth-first authentication via `codex login` - no API key required
|
||||
- Optional OPENAI_CODEX_API_KEY support
|
||||
- Codebase analysis capabilities automatically enabled
|
||||
- Command-specific settings and approval/sandbox modes
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- [#1277](https://github.com/eyaltoledano/claude-task-master/pull/1277) [`7b5a7c4`](https://github.com/eyaltoledano/claude-task-master/commit/7b5a7c4495a68b782f7407fc5d0e0d3ae81f42f5) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix MCP connection errors caused by deprecated generateTaskFiles calls. Resolves "Cannot read properties of null (reading 'toString')" errors when using MCP tools for task management operations.
|
||||
|
||||
- [#1276](https://github.com/eyaltoledano/claude-task-master/pull/1276) [`caee040`](https://github.com/eyaltoledano/claude-task-master/commit/caee040907f856d31a660171c9e6d966f23c632e) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix MCP server error when file parameter not provided - now properly constructs default tasks.json path instead of failing with 'tasksJsonPath is required' error.
|
||||
|
||||
## 0.28.0-rc.1
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- [#1274](https://github.com/eyaltoledano/claude-task-master/pull/1274) [`4f984f8`](https://github.com/eyaltoledano/claude-task-master/commit/4f984f8a6965da9f9c7edd60ddfd6560ac022917) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Do a quick fix on build
|
||||
|
||||
## 0.28.0-rc.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- [#1215](https://github.com/eyaltoledano/claude-task-master/pull/1215) [`0079b7d`](https://github.com/eyaltoledano/claude-task-master/commit/0079b7defdad550811f704c470fdd01955d91d4d) Thanks [@joedanz](https://github.com/joedanz)! - Add Cursor IDE custom slash command support
|
||||
|
||||
Expose Task Master commands as Cursor slash commands by copying assets/claude/commands to .cursor/commands on profile add and cleaning up on remove.
|
||||
|
||||
- [#1246](https://github.com/eyaltoledano/claude-task-master/pull/1246) [`18aa416`](https://github.com/eyaltoledano/claude-task-master/commit/18aa416035f44345bde1c7321490345733a5d042) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Added api keys page on docs website: docs.task-master.dev/getting-started/api-keys
|
||||
|
||||
- [#1246](https://github.com/eyaltoledano/claude-task-master/pull/1246) [`18aa416`](https://github.com/eyaltoledano/claude-task-master/commit/18aa416035f44345bde1c7321490345733a5d042) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Move to AI SDK v5:
|
||||
- Works better with claude-code and gemini-cli as ai providers
|
||||
- Improved openai model family compatibility
|
||||
- Migrate ollama provider to v2
|
||||
- Closes #1223, #1013, #1161, #1174
|
||||
|
||||
- [#1262](https://github.com/eyaltoledano/claude-task-master/pull/1262) [`738ec51`](https://github.com/eyaltoledano/claude-task-master/commit/738ec51c049a295a12839b2dfddaf05e23b8fede) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Migrate AI services to use generateObject for structured data generation
|
||||
|
||||
This update migrates all AI service calls from generateText to generateObject, ensuring more reliable and structured responses across all commands.
|
||||
|
||||
### Key Changes:
|
||||
- **Unified AI Service**: Replaced separate generateText implementations with a single generateObjectService that handles structured data generation
|
||||
- **JSON Mode Support**: Added proper JSON mode configuration for providers that support it (OpenAI, Anthropic, Google, Groq)
|
||||
- **Schema Validation**: Integrated Zod schemas for all AI-generated content with automatic validation
|
||||
- **Provider Compatibility**: Maintained compatibility with all existing providers while leveraging their native structured output capabilities
|
||||
- **Improved Reliability**: Structured output generation reduces parsing errors and ensures consistent data formats
|
||||
|
||||
### Technical Improvements:
|
||||
- Centralized provider configuration in `ai-providers-unified.js`
|
||||
- Added `generateObject` support detection for each provider
|
||||
- Implemented proper error handling for schema validation failures
|
||||
- Maintained backward compatibility with existing prompt structures
|
||||
|
||||
### Bug Fixes:
|
||||
- Fixed subtask ID numbering issue where AI was generating inconsistent IDs (101-105, 601-603) instead of sequential numbering (1, 2, 3...)
|
||||
- Enhanced prompt instructions to enforce proper ID generation patterns
|
||||
- Ensured subtasks display correctly as X.1, X.2, X.3 format
|
||||
|
||||
This migration improves the reliability and consistency of AI-generated content throughout the Task Master application.
|
||||
|
||||
- [#1112](https://github.com/eyaltoledano/claude-task-master/pull/1112) [`d67b81d`](https://github.com/eyaltoledano/claude-task-master/commit/d67b81d25ddd927fabb6f5deb368e8993519c541) Thanks [@olssonsten](https://github.com/olssonsten)! - Enhanced Roo Code profile with MCP timeout configuration for improved reliability during long-running AI operations. The Roo profile now automatically configures a 300-second timeout for MCP server operations, preventing timeouts during complex tasks like `parse-prd`, `expand-all`, `analyze-complexity`, and `research` operations. This change also replaces static MCP configuration files with programmatic generation for better maintainability.
|
||||
|
||||
**What's New:**
|
||||
- 300-second timeout for MCP operations (up from default 60 seconds)
|
||||
- Programmatic MCP configuration generation (replaces static asset files)
|
||||
- Enhanced reliability for AI-powered operations
|
||||
- Consistent with other AI coding assistant profiles
|
||||
|
||||
**Migration:** No user action required - existing Roo Code installations will automatically receive the enhanced MCP configuration on next initialization.
|
||||
|
||||
- [#1246](https://github.com/eyaltoledano/claude-task-master/pull/1246) [`986ac11`](https://github.com/eyaltoledano/claude-task-master/commit/986ac117aee00bcd3e6830a0f76e1ad6d10e0bca) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Upgrade grok-cli ai provider to ai sdk v5
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- [#1235](https://github.com/eyaltoledano/claude-task-master/pull/1235) [`aaacc3d`](https://github.com/eyaltoledano/claude-task-master/commit/aaacc3dae36247b4de72b2d2697f49e5df6d01e3) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Improve `analyze-complexity` cli docs and `--research` flag documentation
|
||||
|
||||
- [#1251](https://github.com/eyaltoledano/claude-task-master/pull/1251) [`0b2c696`](https://github.com/eyaltoledano/claude-task-master/commit/0b2c6967c4605c33a100cff16f6ce8ff09ad06f0) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Change parent task back to "pending" when all subtasks are in "pending" state
|
||||
|
||||
- [#1172](https://github.com/eyaltoledano/claude-task-master/pull/1172) [`b5fe723`](https://github.com/eyaltoledano/claude-task-master/commit/b5fe723f8ead928e9f2dbde13b833ee70ac3382d) Thanks [@jujax](https://github.com/jujax)! - Fix Claude Code settings validation for pathToClaudeCodeExecutable
|
||||
|
||||
- [#1192](https://github.com/eyaltoledano/claude-task-master/pull/1192) [`2b69936`](https://github.com/eyaltoledano/claude-task-master/commit/2b69936ee7b34346d6de5175af20e077359e2e2a) Thanks [@nukunga](https://github.com/nukunga)! - Fix sonar deep research model failing, should be called `sonar-deep-research`
|
||||
|
||||
- [#1270](https://github.com/eyaltoledano/claude-task-master/pull/1270) [`20004a3`](https://github.com/eyaltoledano/claude-task-master/commit/20004a39ea848f747e1ff48981bfe176554e4055) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix complexity score not showing for `task-master show` and `task-master list`
|
||||
- Added complexity score on "next task" when running `task-master list`
|
||||
- Added colors to complexity to reflect complexity (easy, medium, hard)
|
||||
|
||||
## 0.27.3
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- [#1254](https://github.com/eyaltoledano/claude-task-master/pull/1254) [`af53525`](https://github.com/eyaltoledano/claude-task-master/commit/af53525cbc660a595b67d4bb90d906911c71f45d) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fixed issue where `tm show` command could not find subtasks using dotted notation IDs (e.g., '8.1').
|
||||
- The command now properly searches within parent task subtasks and returns the correct subtask information.
|
||||
|
||||
## 0.27.2
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- [#1248](https://github.com/eyaltoledano/claude-task-master/pull/1248) [`044a7bf`](https://github.com/eyaltoledano/claude-task-master/commit/044a7bfc98049298177bc655cf341d7a8b6a0011) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix set-status for subtasks:
|
||||
- Parent tasks are now set as `done` when subtasks are all `done`
|
||||
- Parent tasks are now set as `in-progress` when at least one subtask is `in-progress` or `done`
|
||||
|
||||
## 0.27.1
|
||||
|
||||
### Patch Changes
|
||||
|
||||
22
CLAUDE.md
22
CLAUDE.md
@@ -4,6 +4,28 @@
|
||||
**Import Task Master's development workflow commands and guidelines, treat as if import is in the main CLAUDE.md file.**
|
||||
@./.taskmaster/CLAUDE.md
|
||||
|
||||
## Test Guidelines
|
||||
|
||||
### Synchronous Tests
|
||||
- **NEVER use async/await in test functions** unless testing actual asynchronous operations
|
||||
- Use synchronous top-level imports instead of dynamic `await import()`
|
||||
- Test bodies should be synchronous whenever possible
|
||||
- Example:
|
||||
```javascript
|
||||
// ✅ CORRECT - Synchronous imports
|
||||
import { MyClass } from '../src/my-class.js';
|
||||
|
||||
it('should verify behavior', () => {
|
||||
expect(new MyClass().property).toBe(value);
|
||||
});
|
||||
|
||||
// ❌ INCORRECT - Async imports
|
||||
it('should verify behavior', async () => {
|
||||
const { MyClass } = await import('../src/my-class.js');
|
||||
expect(new MyClass().property).toBe(value);
|
||||
});
|
||||
```
|
||||
|
||||
## Changeset Guidelines
|
||||
|
||||
- When creating changesets, remember that it's user-facing, meaning we don't have to get into the specifics of the code, but rather mention what the end-user is getting or fixing from this changeset.
|
||||
19
README.md
19
README.md
@@ -60,6 +60,19 @@ The following documentation is also available in the `docs` directory:
|
||||
|
||||
> **Note:** After clicking the link, you'll still need to add your API keys to the configuration. The link installs the MCP server with placeholder keys that you'll need to replace with your actual API keys.
|
||||
|
||||
#### Claude Code Quick Install
|
||||
|
||||
For Claude Code users:
|
||||
|
||||
```bash
|
||||
claude mcp add taskmaster-ai -- npx -y task-master-ai
|
||||
```
|
||||
|
||||
Don't forget to add your API keys to the configuration:
|
||||
- in the root .env of your Project
|
||||
- in the "env" section of your mcp config for taskmaster-ai
|
||||
|
||||
|
||||
## Requirements
|
||||
|
||||
Taskmaster utilizes AI across several commands, and those require a separate API key. You can use a variety of models from different AI providers provided you add your API keys. For example, if you want to use Claude 3.7, you'll need an Anthropic API key.
|
||||
@@ -75,8 +88,9 @@ At least one (1) of the following is required:
|
||||
- xAI API Key (for research or main model)
|
||||
- OpenRouter API Key (for research or main model)
|
||||
- Claude Code (no API key required - requires Claude Code CLI)
|
||||
- Codex CLI (OAuth via ChatGPT subscription - requires Codex CLI)
|
||||
|
||||
Using the research model is optional but highly recommended. You will need at least ONE API key (unless using Claude Code). Adding all API keys enables you to seamlessly switch between model providers at will.
|
||||
Using the research model is optional but highly recommended. You will need at least ONE API key (unless using Claude Code or Codex CLI with OAuth). Adding all API keys enables you to seamlessly switch between model providers at will.
|
||||
|
||||
## Quick Start
|
||||
|
||||
@@ -92,10 +106,11 @@ MCP (Model Control Protocol) lets you run Task Master directly from your editor.
|
||||
| | Project | `<project_folder>/.cursor/mcp.json` | `<project_folder>\.cursor\mcp.json` | `mcpServers` |
|
||||
| **Windsurf** | Global | `~/.codeium/windsurf/mcp_config.json` | `%USERPROFILE%\.codeium\windsurf\mcp_config.json` | `mcpServers` |
|
||||
| **VS Code** | Project | `<project_folder>/.vscode/mcp.json` | `<project_folder>\.vscode\mcp.json` | `servers` |
|
||||
| **Q CLI** | Global | `~/.aws/amazonq/mcp.json` | | `mcpServers` |
|
||||
|
||||
##### Manual Configuration
|
||||
|
||||
###### Cursor & Windsurf (`mcpServers`)
|
||||
###### Cursor & Windsurf & Q Developer CLI (`mcpServers`)
|
||||
|
||||
```json
|
||||
{
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
"@types/inquirer": "^9.0.3",
|
||||
"@types/node": "^22.10.5",
|
||||
"tsx": "^4.20.4",
|
||||
"typescript": "^5.7.3",
|
||||
"typescript": "^5.9.2",
|
||||
"vitest": "^2.1.8"
|
||||
},
|
||||
"engines": {
|
||||
|
||||
262
apps/cli/src/command-registry.ts
Normal file
262
apps/cli/src/command-registry.ts
Normal file
@@ -0,0 +1,262 @@
|
||||
/**
|
||||
* @fileoverview Centralized Command Registry
|
||||
* Provides a single location for registering all CLI commands
|
||||
*/
|
||||
|
||||
import { Command } from 'commander';
|
||||
|
||||
// Import all commands
|
||||
import { ListTasksCommand } from './commands/list.command.js';
|
||||
import { ShowCommand } from './commands/show.command.js';
|
||||
import { AuthCommand } from './commands/auth.command.js';
|
||||
import { ContextCommand } from './commands/context.command.js';
|
||||
import { StartCommand } from './commands/start.command.js';
|
||||
import { SetStatusCommand } from './commands/set-status.command.js';
|
||||
import { ExportCommand } from './commands/export.command.js';
|
||||
import { AutopilotCommand } from './commands/autopilot.command.js';
|
||||
|
||||
/**
|
||||
* Command metadata for registration
|
||||
*/
|
||||
export interface CommandMetadata {
|
||||
name: string;
|
||||
description: string;
|
||||
commandClass: typeof Command;
|
||||
category?: 'task' | 'auth' | 'utility' | 'development';
|
||||
}
|
||||
|
||||
/**
|
||||
* Registry of all available commands
|
||||
*/
|
||||
export class CommandRegistry {
|
||||
/**
|
||||
* All available commands with their metadata
|
||||
*/
|
||||
private static commands: CommandMetadata[] = [
|
||||
// Task Management Commands
|
||||
{
|
||||
name: 'list',
|
||||
description: 'List all tasks with filtering and status overview',
|
||||
commandClass: ListTasksCommand as any,
|
||||
category: 'task'
|
||||
},
|
||||
{
|
||||
name: 'show',
|
||||
description: 'Display detailed information about a specific task',
|
||||
commandClass: ShowCommand as any,
|
||||
category: 'task'
|
||||
},
|
||||
{
|
||||
name: 'start',
|
||||
description: 'Start working on a task with claude-code',
|
||||
commandClass: StartCommand as any,
|
||||
category: 'task'
|
||||
},
|
||||
{
|
||||
name: 'set-status',
|
||||
description: 'Update the status of one or more tasks',
|
||||
commandClass: SetStatusCommand as any,
|
||||
category: 'task'
|
||||
},
|
||||
{
|
||||
name: 'export',
|
||||
description: 'Export tasks to external systems',
|
||||
commandClass: ExportCommand as any,
|
||||
category: 'task'
|
||||
},
|
||||
{
|
||||
name: 'autopilot',
|
||||
description: 'Execute a task autonomously using TDD workflow',
|
||||
commandClass: AutopilotCommand as any,
|
||||
category: 'development'
|
||||
},
|
||||
|
||||
// Authentication & Context Commands
|
||||
{
|
||||
name: 'auth',
|
||||
description: 'Manage authentication with tryhamster.com',
|
||||
commandClass: AuthCommand as any,
|
||||
category: 'auth'
|
||||
},
|
||||
{
|
||||
name: 'context',
|
||||
description: 'Manage workspace context (organization/brief)',
|
||||
commandClass: ContextCommand as any,
|
||||
category: 'auth'
|
||||
}
|
||||
];
|
||||
|
||||
/**
|
||||
* Register all commands on a program instance
|
||||
* @param program - Commander program to register commands on
|
||||
*/
|
||||
static registerAll(program: Command): void {
|
||||
for (const cmd of this.commands) {
|
||||
this.registerCommand(program, cmd);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Register specific commands by category
|
||||
* @param program - Commander program to register commands on
|
||||
* @param category - Category of commands to register
|
||||
*/
|
||||
static registerByCategory(
|
||||
program: Command,
|
||||
category: 'task' | 'auth' | 'utility' | 'development'
|
||||
): void {
|
||||
const categoryCommands = this.commands.filter(
|
||||
(cmd) => cmd.category === category
|
||||
);
|
||||
|
||||
for (const cmd of categoryCommands) {
|
||||
this.registerCommand(program, cmd);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a single command by name
|
||||
* @param program - Commander program to register the command on
|
||||
* @param name - Name of the command to register
|
||||
*/
|
||||
static registerByName(program: Command, name: string): void {
|
||||
const cmd = this.commands.find((c) => c.name === name);
|
||||
if (cmd) {
|
||||
this.registerCommand(program, cmd);
|
||||
} else {
|
||||
throw new Error(`Command '${name}' not found in registry`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a single command
|
||||
* @param program - Commander program to register the command on
|
||||
* @param metadata - Command metadata
|
||||
*/
|
||||
private static registerCommand(
|
||||
program: Command,
|
||||
metadata: CommandMetadata
|
||||
): void {
|
||||
const CommandClass = metadata.commandClass as any;
|
||||
|
||||
// Use the static registration method that all commands have
|
||||
if (CommandClass.registerOn) {
|
||||
CommandClass.registerOn(program);
|
||||
} else if (CommandClass.register) {
|
||||
CommandClass.register(program);
|
||||
} else {
|
||||
// Fallback to creating instance and adding
|
||||
const instance = new CommandClass();
|
||||
program.addCommand(instance);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all registered command names
|
||||
*/
|
||||
static getCommandNames(): string[] {
|
||||
return this.commands.map((cmd) => cmd.name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get commands by category
|
||||
*/
|
||||
static getCommandsByCategory(
|
||||
category: 'task' | 'auth' | 'utility' | 'development'
|
||||
): CommandMetadata[] {
|
||||
return this.commands.filter((cmd) => cmd.category === category);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a new command to the registry
|
||||
* @param metadata - Command metadata to add
|
||||
*/
|
||||
static addCommand(metadata: CommandMetadata): void {
|
||||
// Check if command already exists
|
||||
if (this.commands.some((cmd) => cmd.name === metadata.name)) {
|
||||
throw new Error(`Command '${metadata.name}' already exists in registry`);
|
||||
}
|
||||
|
||||
this.commands.push(metadata);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a command from the registry
|
||||
* @param name - Name of the command to remove
|
||||
*/
|
||||
static removeCommand(name: string): boolean {
|
||||
const index = this.commands.findIndex((cmd) => cmd.name === name);
|
||||
if (index >= 0) {
|
||||
this.commands.splice(index, 1);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get command metadata by name
|
||||
* @param name - Name of the command
|
||||
*/
|
||||
static getCommand(name: string): CommandMetadata | undefined {
|
||||
return this.commands.find((cmd) => cmd.name === name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a command exists
|
||||
* @param name - Name of the command
|
||||
*/
|
||||
static hasCommand(name: string): boolean {
|
||||
return this.commands.some((cmd) => cmd.name === name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a formatted list of all commands for display
|
||||
*/
|
||||
static getFormattedCommandList(): string {
|
||||
const categories = {
|
||||
task: 'Task Management',
|
||||
auth: 'Authentication & Context',
|
||||
utility: 'Utilities',
|
||||
development: 'Development'
|
||||
};
|
||||
|
||||
let output = '';
|
||||
|
||||
for (const [category, title] of Object.entries(categories)) {
|
||||
const cmds = this.getCommandsByCategory(
|
||||
category as keyof typeof categories
|
||||
);
|
||||
if (cmds.length > 0) {
|
||||
output += `\n${title}:\n`;
|
||||
for (const cmd of cmds) {
|
||||
output += ` ${cmd.name.padEnd(20)} ${cmd.description}\n`;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience function to register all CLI commands
|
||||
* @param program - Commander program instance
|
||||
*/
|
||||
export function registerAllCommands(program: Command): void {
|
||||
CommandRegistry.registerAll(program);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience function to register commands by category
|
||||
* @param program - Commander program instance
|
||||
* @param category - Category to register
|
||||
*/
|
||||
export function registerCommandsByCategory(
|
||||
program: Command,
|
||||
category: 'task' | 'auth' | 'utility' | 'development'
|
||||
): void {
|
||||
CommandRegistry.registerByCategory(program, category);
|
||||
}
|
||||
|
||||
// Export the registry for direct access if needed
|
||||
export default CommandRegistry;
|
||||
@@ -493,18 +493,7 @@ export class AuthCommand extends Command {
|
||||
}
|
||||
|
||||
/**
|
||||
* Static method to register this command on an existing program
|
||||
* This is for gradual migration - allows commands.js to use this
|
||||
*/
|
||||
static registerOn(program: Command): Command {
|
||||
const authCommand = new AuthCommand();
|
||||
program.addCommand(authCommand);
|
||||
return authCommand;
|
||||
}
|
||||
|
||||
/**
|
||||
* Alternative registration that returns the command for chaining
|
||||
* Can also configure the command name if needed
|
||||
* Register this command on an existing program
|
||||
*/
|
||||
static register(program: Command, name?: string): AuthCommand {
|
||||
const authCommand = new AuthCommand(name);
|
||||
|
||||
515
apps/cli/src/commands/autopilot.command.ts
Normal file
515
apps/cli/src/commands/autopilot.command.ts
Normal file
@@ -0,0 +1,515 @@
|
||||
/**
|
||||
* @fileoverview AutopilotCommand using Commander's native class pattern
|
||||
* Extends Commander.Command for better integration with the framework
|
||||
* This is a thin presentation layer over @tm/core's autopilot functionality
|
||||
*/
|
||||
|
||||
import { Command } from 'commander';
|
||||
import chalk from 'chalk';
|
||||
import boxen from 'boxen';
|
||||
import ora, { type Ora } from 'ora';
|
||||
import {
|
||||
createTaskMasterCore,
|
||||
type TaskMasterCore,
|
||||
type Task,
|
||||
type Subtask
|
||||
} from '@tm/core';
|
||||
import * as ui from '../utils/ui.js';
|
||||
|
||||
/**
|
||||
* CLI-specific options interface for the autopilot command
|
||||
*/
|
||||
export interface AutopilotCommandOptions {
|
||||
format?: 'text' | 'json';
|
||||
project?: string;
|
||||
dryRun?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Preflight check result for a single check
|
||||
*/
|
||||
export interface PreflightCheckResult {
|
||||
success: boolean;
|
||||
message?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Overall preflight check results
|
||||
*/
|
||||
export interface PreflightResult {
|
||||
success: boolean;
|
||||
testCommand: PreflightCheckResult;
|
||||
gitWorkingTree: PreflightCheckResult;
|
||||
requiredTools: PreflightCheckResult;
|
||||
defaultBranch: PreflightCheckResult;
|
||||
}
|
||||
|
||||
/**
|
||||
* CLI-specific result type from autopilot command
|
||||
*/
|
||||
export interface AutopilotCommandResult {
|
||||
success: boolean;
|
||||
taskId: string;
|
||||
task?: Task;
|
||||
error?: string;
|
||||
message?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* AutopilotCommand extending Commander's Command class
|
||||
* This is a thin presentation layer over @tm/core's autopilot functionality
|
||||
*/
|
||||
export class AutopilotCommand extends Command {
|
||||
private tmCore?: TaskMasterCore;
|
||||
private lastResult?: AutopilotCommandResult;
|
||||
|
||||
constructor(name?: string) {
|
||||
super(name || 'autopilot');
|
||||
|
||||
// Configure the command
|
||||
this.description(
|
||||
'Execute a task autonomously using TDD workflow with git integration'
|
||||
)
|
||||
.argument('<taskId>', 'Task ID to execute autonomously')
|
||||
.option('-f, --format <format>', 'Output format (text, json)', 'text')
|
||||
.option('-p, --project <path>', 'Project root directory', process.cwd())
|
||||
.option(
|
||||
'--dry-run',
|
||||
'Show what would be executed without performing actions'
|
||||
)
|
||||
.action(async (taskId: string, options: AutopilotCommandOptions) => {
|
||||
await this.executeCommand(taskId, options);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the autopilot command
|
||||
*/
|
||||
private async executeCommand(
|
||||
taskId: string,
|
||||
options: AutopilotCommandOptions
|
||||
): Promise<void> {
|
||||
let spinner: Ora | null = null;
|
||||
|
||||
try {
|
||||
// Validate options
|
||||
if (!this.validateOptions(options)) {
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Validate task ID format
|
||||
if (!this.validateTaskId(taskId)) {
|
||||
ui.displayError(`Invalid task ID format: ${taskId}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Initialize tm-core with spinner
|
||||
spinner = ora('Initializing Task Master...').start();
|
||||
await this.initializeCore(options.project || process.cwd());
|
||||
spinner.succeed('Task Master initialized');
|
||||
|
||||
// Load and validate task existence
|
||||
spinner = ora(`Loading task ${taskId}...`).start();
|
||||
const task = await this.loadTask(taskId);
|
||||
|
||||
if (!task) {
|
||||
spinner.fail(`Task ${taskId} not found`);
|
||||
ui.displayError(`Task with ID ${taskId} does not exist`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
spinner.succeed(`Task ${taskId} loaded`);
|
||||
|
||||
// Display task information
|
||||
this.displayTaskInfo(task, options.dryRun || false);
|
||||
|
||||
// Execute autopilot logic (placeholder for now)
|
||||
const result = await this.performAutopilot(taskId, task, options);
|
||||
|
||||
// Store result for programmatic access
|
||||
this.setLastResult(result);
|
||||
|
||||
// Display results
|
||||
this.displayResults(result, options);
|
||||
} catch (error: unknown) {
|
||||
if (spinner) {
|
||||
spinner.fail('Operation failed');
|
||||
}
|
||||
this.handleError(error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate command options
|
||||
*/
|
||||
private validateOptions(options: AutopilotCommandOptions): boolean {
|
||||
// Validate format
|
||||
if (options.format && !['text', 'json'].includes(options.format)) {
|
||||
console.error(chalk.red(`Invalid format: ${options.format}`));
|
||||
console.error(chalk.gray(`Valid formats: text, json`));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate task ID format
|
||||
*/
|
||||
private validateTaskId(taskId: string): boolean {
|
||||
// Task ID should be a number or number.number format (e.g., "1" or "1.2")
|
||||
const taskIdPattern = /^\d+(\.\d+)*$/;
|
||||
return taskIdPattern.test(taskId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize TaskMasterCore
|
||||
*/
|
||||
private async initializeCore(projectRoot: string): Promise<void> {
|
||||
if (!this.tmCore) {
|
||||
this.tmCore = await createTaskMasterCore({ projectPath: projectRoot });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Load task from tm-core
|
||||
*/
|
||||
private async loadTask(taskId: string): Promise<Task | null> {
|
||||
if (!this.tmCore) {
|
||||
throw new Error('TaskMasterCore not initialized');
|
||||
}
|
||||
|
||||
try {
|
||||
const { task } = await this.tmCore.getTaskWithSubtask(taskId);
|
||||
return task;
|
||||
} catch (error) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Display task information before execution
|
||||
*/
|
||||
private displayTaskInfo(task: Task, isDryRun: boolean): void {
|
||||
const prefix = isDryRun ? '[DRY RUN] ' : '';
|
||||
console.log();
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.cyan.bold(`${prefix}Autopilot Task Execution`) +
|
||||
'\n\n' +
|
||||
chalk.white(`Task ID: ${task.id}`) +
|
||||
'\n' +
|
||||
chalk.white(`Title: ${task.title}`) +
|
||||
'\n' +
|
||||
chalk.white(`Status: ${task.status}`) +
|
||||
(task.description ? '\n\n' + chalk.gray(task.description) : ''),
|
||||
{
|
||||
padding: 1,
|
||||
borderStyle: 'round',
|
||||
borderColor: 'cyan',
|
||||
width: process.stdout.columns ? process.stdout.columns * 0.95 : 100
|
||||
}
|
||||
)
|
||||
);
|
||||
console.log();
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform autopilot execution using PreflightChecker and TaskLoader
|
||||
*/
|
||||
private async performAutopilot(
|
||||
taskId: string,
|
||||
task: Task,
|
||||
options: AutopilotCommandOptions
|
||||
): Promise<AutopilotCommandResult> {
|
||||
// Run preflight checks
|
||||
const preflightResult = await this.runPreflightChecks(options);
|
||||
if (!preflightResult.success) {
|
||||
return {
|
||||
success: false,
|
||||
taskId,
|
||||
task,
|
||||
error: 'Preflight checks failed',
|
||||
message: 'Please resolve the issues above before running autopilot'
|
||||
};
|
||||
}
|
||||
|
||||
// Validate task structure and get execution order
|
||||
const validationResult = await this.validateTaskStructure(
|
||||
taskId,
|
||||
task,
|
||||
options
|
||||
);
|
||||
if (!validationResult.success) {
|
||||
return validationResult;
|
||||
}
|
||||
|
||||
// Display execution plan
|
||||
this.displayExecutionPlan(
|
||||
validationResult.task!,
|
||||
validationResult.orderedSubtasks!,
|
||||
options
|
||||
);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
taskId,
|
||||
task: validationResult.task,
|
||||
message: options.dryRun
|
||||
? 'Dry run completed successfully'
|
||||
: 'Autopilot execution ready (actual execution not yet implemented)'
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Run preflight checks and display results
|
||||
*/
|
||||
private async runPreflightChecks(
|
||||
options: AutopilotCommandOptions
|
||||
): Promise<PreflightResult> {
|
||||
const { PreflightChecker } = await import('@tm/core');
|
||||
|
||||
console.log();
|
||||
console.log(chalk.cyan.bold('Running preflight checks...'));
|
||||
|
||||
const preflightChecker = new PreflightChecker(
|
||||
options.project || process.cwd()
|
||||
);
|
||||
const result = await preflightChecker.runAllChecks();
|
||||
|
||||
this.displayPreflightResults(result);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate task structure and get execution order
|
||||
*/
|
||||
private async validateTaskStructure(
|
||||
taskId: string,
|
||||
task: Task,
|
||||
options: AutopilotCommandOptions
|
||||
): Promise<AutopilotCommandResult & { orderedSubtasks?: Subtask[] }> {
|
||||
const { TaskLoaderService } = await import('@tm/core');
|
||||
|
||||
console.log();
|
||||
console.log(chalk.cyan.bold('Validating task structure...'));
|
||||
|
||||
const taskLoader = new TaskLoaderService(options.project || process.cwd());
|
||||
const validationResult = await taskLoader.loadAndValidateTask(taskId);
|
||||
|
||||
if (!validationResult.success) {
|
||||
await taskLoader.cleanup();
|
||||
return {
|
||||
success: false,
|
||||
taskId,
|
||||
task,
|
||||
error: validationResult.errorMessage,
|
||||
message: validationResult.suggestion
|
||||
};
|
||||
}
|
||||
|
||||
const orderedSubtasks = taskLoader.getExecutionOrder(
|
||||
validationResult.task!
|
||||
);
|
||||
|
||||
await taskLoader.cleanup();
|
||||
|
||||
return {
|
||||
success: true,
|
||||
taskId,
|
||||
task: validationResult.task,
|
||||
orderedSubtasks
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Display execution plan with subtasks and TDD workflow
|
||||
*/
|
||||
private displayExecutionPlan(
|
||||
task: Task,
|
||||
orderedSubtasks: Subtask[],
|
||||
options: AutopilotCommandOptions
|
||||
): void {
|
||||
console.log();
|
||||
console.log(chalk.green.bold('✓ All checks passed!'));
|
||||
console.log();
|
||||
console.log(chalk.cyan.bold('Execution Plan:'));
|
||||
console.log(chalk.white(`Task: ${task.title}`));
|
||||
console.log(
|
||||
chalk.gray(
|
||||
`${orderedSubtasks.length} subtasks will be executed in dependency order`
|
||||
)
|
||||
);
|
||||
console.log();
|
||||
|
||||
// Display subtasks
|
||||
orderedSubtasks.forEach((subtask: Subtask, index: number) => {
|
||||
console.log(
|
||||
chalk.yellow(`${index + 1}. ${task.id}.${subtask.id}: ${subtask.title}`)
|
||||
);
|
||||
if (subtask.dependencies && subtask.dependencies.length > 0) {
|
||||
console.log(
|
||||
chalk.gray(` Dependencies: ${subtask.dependencies.join(', ')}`)
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
console.log();
|
||||
console.log(
|
||||
chalk.cyan('Autopilot would execute each subtask using TDD workflow:')
|
||||
);
|
||||
console.log(chalk.gray(' 1. RED phase: Write failing test'));
|
||||
console.log(chalk.gray(' 2. GREEN phase: Implement code to pass test'));
|
||||
console.log(chalk.gray(' 3. COMMIT phase: Commit changes'));
|
||||
console.log();
|
||||
|
||||
if (options.dryRun) {
|
||||
console.log(
|
||||
chalk.yellow('This was a dry run. Use without --dry-run to execute.')
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Display preflight check results
|
||||
*/
|
||||
private displayPreflightResults(result: PreflightResult): void {
|
||||
const checks = [
|
||||
{ name: 'Test command', result: result.testCommand },
|
||||
{ name: 'Git working tree', result: result.gitWorkingTree },
|
||||
{ name: 'Required tools', result: result.requiredTools },
|
||||
{ name: 'Default branch', result: result.defaultBranch }
|
||||
];
|
||||
|
||||
checks.forEach((check) => {
|
||||
const icon = check.result.success ? chalk.green('✓') : chalk.red('✗');
|
||||
const status = check.result.success
|
||||
? chalk.green('PASS')
|
||||
: chalk.red('FAIL');
|
||||
console.log(`${icon} ${chalk.white(check.name)}: ${status}`);
|
||||
if (check.result.message) {
|
||||
console.log(chalk.gray(` ${check.result.message}`));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Display results based on format
|
||||
*/
|
||||
private displayResults(
|
||||
result: AutopilotCommandResult,
|
||||
options: AutopilotCommandOptions
|
||||
): void {
|
||||
const format = options.format || 'text';
|
||||
|
||||
switch (format) {
|
||||
case 'json':
|
||||
this.displayJson(result);
|
||||
break;
|
||||
|
||||
case 'text':
|
||||
default:
|
||||
this.displayTextResult(result);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Display in JSON format
|
||||
*/
|
||||
private displayJson(result: AutopilotCommandResult): void {
|
||||
console.log(JSON.stringify(result, null, 2));
|
||||
}
|
||||
|
||||
/**
|
||||
* Display result in text format
|
||||
*/
|
||||
private displayTextResult(result: AutopilotCommandResult): void {
|
||||
if (result.success) {
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.green.bold('✓ Autopilot Command Completed') +
|
||||
'\n\n' +
|
||||
chalk.white(result.message || 'Execution complete'),
|
||||
{
|
||||
padding: 1,
|
||||
borderStyle: 'round',
|
||||
borderColor: 'green',
|
||||
margin: { top: 1 }
|
||||
}
|
||||
)
|
||||
);
|
||||
} else {
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.red.bold('✗ Autopilot Command Failed') +
|
||||
'\n\n' +
|
||||
chalk.white(result.error || 'Unknown error'),
|
||||
{
|
||||
padding: 1,
|
||||
borderStyle: 'round',
|
||||
borderColor: 'red',
|
||||
margin: { top: 1 }
|
||||
}
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle general errors
|
||||
*/
|
||||
private handleError(error: unknown): void {
|
||||
const errorObj = error as {
|
||||
getSanitizedDetails?: () => { message: string };
|
||||
message?: string;
|
||||
stack?: string;
|
||||
};
|
||||
|
||||
const msg = errorObj?.getSanitizedDetails?.() ?? {
|
||||
message: errorObj?.message ?? String(error)
|
||||
};
|
||||
console.error(chalk.red(`Error: ${msg.message || 'Unexpected error'}`));
|
||||
|
||||
// Show stack trace in development mode or when DEBUG is set
|
||||
const isDevelopment = process.env.NODE_ENV !== 'production';
|
||||
if ((isDevelopment || process.env.DEBUG) && errorObj.stack) {
|
||||
console.error(chalk.gray(errorObj.stack));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the last result for programmatic access
|
||||
*/
|
||||
private setLastResult(result: AutopilotCommandResult): void {
|
||||
this.lastResult = result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the last result (for programmatic usage)
|
||||
*/
|
||||
getLastResult(): AutopilotCommandResult | undefined {
|
||||
return this.lastResult;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up resources
|
||||
*/
|
||||
async cleanup(): Promise<void> {
|
||||
if (this.tmCore) {
|
||||
await this.tmCore.close();
|
||||
this.tmCore = undefined;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Register this command on an existing program
|
||||
*/
|
||||
static register(program: Command, name?: string): AutopilotCommand {
|
||||
const autopilotCommand = new AutopilotCommand(name);
|
||||
program.addCommand(autopilotCommand);
|
||||
return autopilotCommand;
|
||||
}
|
||||
}
|
||||
@@ -694,16 +694,7 @@ export class ContextCommand extends Command {
|
||||
}
|
||||
|
||||
/**
|
||||
* Static method to register this command on an existing program
|
||||
*/
|
||||
static registerOn(program: Command): Command {
|
||||
const contextCommand = new ContextCommand();
|
||||
program.addCommand(contextCommand);
|
||||
return contextCommand;
|
||||
}
|
||||
|
||||
/**
|
||||
* Alternative registration that returns the command for chaining
|
||||
* Register this command on an existing program
|
||||
*/
|
||||
static register(program: Command, name?: string): ContextCommand {
|
||||
const contextCommand = new ContextCommand(name);
|
||||
|
||||
379
apps/cli/src/commands/export.command.ts
Normal file
379
apps/cli/src/commands/export.command.ts
Normal file
@@ -0,0 +1,379 @@
|
||||
/**
|
||||
* @fileoverview Export command for exporting tasks to external systems
|
||||
* Provides functionality to export tasks to Hamster briefs
|
||||
*/
|
||||
|
||||
import { Command } from 'commander';
|
||||
import chalk from 'chalk';
|
||||
import inquirer from 'inquirer';
|
||||
import ora, { Ora } from 'ora';
|
||||
import {
|
||||
AuthManager,
|
||||
AuthenticationError,
|
||||
type UserContext
|
||||
} from '@tm/core/auth';
|
||||
import { TaskMasterCore, type ExportResult } from '@tm/core';
|
||||
import * as ui from '../utils/ui.js';
|
||||
|
||||
/**
|
||||
* Result type from export command
|
||||
*/
|
||||
export interface ExportCommandResult {
|
||||
success: boolean;
|
||||
action: 'export' | 'validate' | 'cancelled';
|
||||
result?: ExportResult;
|
||||
message?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* ExportCommand extending Commander's Command class
|
||||
* Handles task export to external systems
|
||||
*/
|
||||
export class ExportCommand extends Command {
|
||||
private authManager: AuthManager;
|
||||
private taskMasterCore?: TaskMasterCore;
|
||||
private lastResult?: ExportCommandResult;
|
||||
|
||||
constructor(name?: string) {
|
||||
super(name || 'export');
|
||||
|
||||
// Initialize auth manager
|
||||
this.authManager = AuthManager.getInstance();
|
||||
|
||||
// Configure the command
|
||||
this.description('Export tasks to external systems (e.g., Hamster briefs)');
|
||||
|
||||
// Add options
|
||||
this.option('--org <id>', 'Organization ID to export to');
|
||||
this.option('--brief <id>', 'Brief ID to export tasks to');
|
||||
this.option('--tag <tag>', 'Export tasks from a specific tag');
|
||||
this.option(
|
||||
'--status <status>',
|
||||
'Filter tasks by status (pending, in-progress, done, etc.)'
|
||||
);
|
||||
this.option('--exclude-subtasks', 'Exclude subtasks from export');
|
||||
this.option('-y, --yes', 'Skip confirmation prompt');
|
||||
|
||||
// Accept optional positional argument for brief ID or Hamster URL
|
||||
this.argument('[briefOrUrl]', 'Brief ID or Hamster brief URL');
|
||||
|
||||
// Default action
|
||||
this.action(async (briefOrUrl?: string, options?: any) => {
|
||||
await this.executeExport(briefOrUrl, options);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the TaskMasterCore
|
||||
*/
|
||||
private async initializeServices(): Promise<void> {
|
||||
if (this.taskMasterCore) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
// Initialize TaskMasterCore
|
||||
this.taskMasterCore = await TaskMasterCore.create({
|
||||
projectPath: process.cwd()
|
||||
});
|
||||
} catch (error) {
|
||||
throw new Error(
|
||||
`Failed to initialize services: ${(error as Error).message}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the export command
|
||||
*/
|
||||
private async executeExport(
|
||||
briefOrUrl?: string,
|
||||
options?: any
|
||||
): Promise<void> {
|
||||
let spinner: Ora | undefined;
|
||||
|
||||
try {
|
||||
// Check authentication
|
||||
if (!this.authManager.isAuthenticated()) {
|
||||
ui.displayError('Not authenticated. Run "tm auth login" first.');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Initialize services
|
||||
await this.initializeServices();
|
||||
|
||||
// Get current context
|
||||
const context = this.authManager.getContext();
|
||||
|
||||
// Determine org and brief IDs
|
||||
let orgId = options?.org || context?.orgId;
|
||||
let briefId = options?.brief || briefOrUrl || context?.briefId;
|
||||
|
||||
// If a URL/ID was provided as argument, resolve it
|
||||
if (briefOrUrl && !options?.brief) {
|
||||
spinner = ora('Resolving brief...').start();
|
||||
const resolvedBrief = await this.resolveBriefInput(briefOrUrl);
|
||||
if (resolvedBrief) {
|
||||
briefId = resolvedBrief.briefId;
|
||||
orgId = resolvedBrief.orgId;
|
||||
spinner.succeed('Brief resolved');
|
||||
} else {
|
||||
spinner.fail('Could not resolve brief');
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Validate we have necessary IDs
|
||||
if (!orgId) {
|
||||
ui.displayError(
|
||||
'No organization selected. Run "tm context org" or use --org flag.'
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
if (!briefId) {
|
||||
ui.displayError(
|
||||
'No brief specified. Run "tm context brief", provide a brief ID/URL, or use --brief flag.'
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Confirm export if not auto-confirmed
|
||||
if (!options?.yes) {
|
||||
const confirmed = await this.confirmExport(orgId, briefId, context);
|
||||
if (!confirmed) {
|
||||
ui.displayWarning('Export cancelled');
|
||||
this.lastResult = {
|
||||
success: false,
|
||||
action: 'cancelled',
|
||||
message: 'User cancelled export'
|
||||
};
|
||||
process.exit(0);
|
||||
}
|
||||
}
|
||||
|
||||
// Perform export
|
||||
spinner = ora('Exporting tasks...').start();
|
||||
|
||||
const exportResult = await this.taskMasterCore!.exportTasks({
|
||||
orgId,
|
||||
briefId,
|
||||
tag: options?.tag,
|
||||
status: options?.status,
|
||||
excludeSubtasks: options?.excludeSubtasks || false
|
||||
});
|
||||
|
||||
if (exportResult.success) {
|
||||
spinner.succeed(
|
||||
`Successfully exported ${exportResult.taskCount} task(s) to brief`
|
||||
);
|
||||
|
||||
// Display summary
|
||||
console.log(chalk.cyan('\n📤 Export Summary\n'));
|
||||
console.log(chalk.white(` Organization: ${orgId}`));
|
||||
console.log(chalk.white(` Brief: ${briefId}`));
|
||||
console.log(chalk.white(` Tasks exported: ${exportResult.taskCount}`));
|
||||
if (options?.tag) {
|
||||
console.log(chalk.gray(` Tag: ${options.tag}`));
|
||||
}
|
||||
if (options?.status) {
|
||||
console.log(chalk.gray(` Status filter: ${options.status}`));
|
||||
}
|
||||
|
||||
if (exportResult.message) {
|
||||
console.log(chalk.gray(`\n ${exportResult.message}`));
|
||||
}
|
||||
} else {
|
||||
spinner.fail('Export failed');
|
||||
if (exportResult.error) {
|
||||
console.error(chalk.red(`\n✗ ${exportResult.error.message}`));
|
||||
}
|
||||
}
|
||||
|
||||
this.lastResult = {
|
||||
success: exportResult.success,
|
||||
action: 'export',
|
||||
result: exportResult
|
||||
};
|
||||
} catch (error: any) {
|
||||
if (spinner?.isSpinning) spinner.fail('Export failed');
|
||||
this.handleError(error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve brief input to get brief and org IDs
|
||||
*/
|
||||
private async resolveBriefInput(
|
||||
briefOrUrl: string
|
||||
): Promise<{ briefId: string; orgId: string } | null> {
|
||||
try {
|
||||
// Extract brief ID from input
|
||||
const briefId = this.extractBriefId(briefOrUrl);
|
||||
if (!briefId) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Fetch brief to get organization
|
||||
const brief = await this.authManager.getBrief(briefId);
|
||||
if (!brief) {
|
||||
ui.displayError('Brief not found or you do not have access');
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
briefId: brief.id,
|
||||
orgId: brief.accountId
|
||||
};
|
||||
} catch (error) {
|
||||
console.error(chalk.red(`Failed to resolve brief: ${error}`));
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract a brief ID from raw input (ID or URL)
|
||||
*/
|
||||
private extractBriefId(input: string): string | null {
|
||||
const raw = input?.trim() ?? '';
|
||||
if (!raw) return null;
|
||||
|
||||
const parseUrl = (s: string): URL | null => {
|
||||
try {
|
||||
return new URL(s);
|
||||
} catch {}
|
||||
try {
|
||||
return new URL(`https://${s}`);
|
||||
} catch {}
|
||||
return null;
|
||||
};
|
||||
|
||||
const fromParts = (path: string): string | null => {
|
||||
const parts = path.split('/').filter(Boolean);
|
||||
const briefsIdx = parts.lastIndexOf('briefs');
|
||||
const candidate =
|
||||
briefsIdx >= 0 && parts.length > briefsIdx + 1
|
||||
? parts[briefsIdx + 1]
|
||||
: parts[parts.length - 1];
|
||||
return candidate?.trim() || null;
|
||||
};
|
||||
|
||||
// Try URL parsing
|
||||
const url = parseUrl(raw);
|
||||
if (url) {
|
||||
const qId = url.searchParams.get('id') || url.searchParams.get('briefId');
|
||||
const candidate = (qId || fromParts(url.pathname)) ?? null;
|
||||
if (candidate) {
|
||||
if (this.isLikelyId(candidate) || candidate.length >= 8) {
|
||||
return candidate;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if it looks like a path
|
||||
if (raw.includes('/')) {
|
||||
const candidate = fromParts(raw);
|
||||
if (candidate && (this.isLikelyId(candidate) || candidate.length >= 8)) {
|
||||
return candidate;
|
||||
}
|
||||
}
|
||||
|
||||
// Return raw if it looks like an ID
|
||||
return raw;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a string looks like a brief ID
|
||||
*/
|
||||
private isLikelyId(value: string): boolean {
|
||||
const uuidRegex =
|
||||
/^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$/;
|
||||
const ulidRegex = /^[0-9A-HJKMNP-TV-Z]{26}$/i;
|
||||
const slugRegex = /^[A-Za-z0-9_-]{16,}$/;
|
||||
return (
|
||||
uuidRegex.test(value) || ulidRegex.test(value) || slugRegex.test(value)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Confirm export with the user
|
||||
*/
|
||||
private async confirmExport(
|
||||
orgId: string,
|
||||
briefId: string,
|
||||
context: UserContext | null
|
||||
): Promise<boolean> {
|
||||
console.log(chalk.cyan('\n📤 Export Tasks\n'));
|
||||
|
||||
// Show org name if available
|
||||
if (context?.orgName) {
|
||||
console.log(chalk.white(` Organization: ${context.orgName}`));
|
||||
console.log(chalk.gray(` ID: ${orgId}`));
|
||||
} else {
|
||||
console.log(chalk.white(` Organization ID: ${orgId}`));
|
||||
}
|
||||
|
||||
// Show brief info
|
||||
if (context?.briefName) {
|
||||
console.log(chalk.white(`\n Brief: ${context.briefName}`));
|
||||
console.log(chalk.gray(` ID: ${briefId}`));
|
||||
} else {
|
||||
console.log(chalk.white(`\n Brief ID: ${briefId}`));
|
||||
}
|
||||
|
||||
const { confirmed } = await inquirer.prompt([
|
||||
{
|
||||
type: 'confirm',
|
||||
name: 'confirmed',
|
||||
message: 'Do you want to proceed with export?',
|
||||
default: true
|
||||
}
|
||||
]);
|
||||
|
||||
return confirmed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle errors
|
||||
*/
|
||||
private handleError(error: any): void {
|
||||
if (error instanceof AuthenticationError) {
|
||||
console.error(chalk.red(`\n✗ ${error.message}`));
|
||||
|
||||
if (error.code === 'NOT_AUTHENTICATED') {
|
||||
ui.displayWarning('Please authenticate first: tm auth login');
|
||||
}
|
||||
} else {
|
||||
const msg = error?.message ?? String(error);
|
||||
console.error(chalk.red(`Error: ${msg}`));
|
||||
|
||||
if (error.stack && process.env.DEBUG) {
|
||||
console.error(chalk.gray(error.stack));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the last export result (useful for testing)
|
||||
*/
|
||||
public getLastResult(): ExportCommandResult | undefined {
|
||||
return this.lastResult;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up resources
|
||||
*/
|
||||
async cleanup(): Promise<void> {
|
||||
// No resources to clean up
|
||||
}
|
||||
|
||||
/**
|
||||
* Register this command on an existing program
|
||||
*/
|
||||
static register(program: Command, name?: string): ExportCommand {
|
||||
const exportCommand = new ExportCommand(name);
|
||||
program.addCommand(exportCommand);
|
||||
return exportCommand;
|
||||
}
|
||||
}
|
||||
@@ -246,7 +246,7 @@ export class ListTasksCommand extends Command {
|
||||
task.subtasks.forEach((subtask) => {
|
||||
const subIcon = STATUS_ICONS[subtask.status];
|
||||
console.log(
|
||||
` ${chalk.gray(`${task.id}.${subtask.id}`)} ${subIcon} ${chalk.gray(subtask.title)}`
|
||||
` ${chalk.gray(String(subtask.id))} ${subIcon} ${chalk.gray(subtask.title)}`
|
||||
);
|
||||
});
|
||||
}
|
||||
@@ -281,9 +281,14 @@ export class ListTasksCommand extends Command {
|
||||
const priorityBreakdown = getPriorityBreakdown(tasks);
|
||||
|
||||
// Find next task following the same logic as findNextTask
|
||||
const nextTask = this.findNextTask(tasks);
|
||||
const nextTaskInfo = this.findNextTask(tasks);
|
||||
|
||||
// Display dashboard boxes
|
||||
// Get the full task object with complexity data already included
|
||||
const nextTask = nextTaskInfo
|
||||
? tasks.find((t) => String(t.id) === String(nextTaskInfo.id))
|
||||
: undefined;
|
||||
|
||||
// Display dashboard boxes (nextTask already has complexity from storage enrichment)
|
||||
displayDashboards(
|
||||
taskStats,
|
||||
subtaskStats,
|
||||
@@ -292,7 +297,7 @@ export class ListTasksCommand extends Command {
|
||||
nextTask
|
||||
);
|
||||
|
||||
// Task table - no title, just show the table directly
|
||||
// Task table
|
||||
console.log(
|
||||
ui.createTaskTable(tasks, {
|
||||
showSubtasks: withSubtasks,
|
||||
@@ -303,14 +308,16 @@ export class ListTasksCommand extends Command {
|
||||
|
||||
// Display recommended next task section immediately after table
|
||||
if (nextTask) {
|
||||
// Find the full task object to get description
|
||||
const fullTask = tasks.find((t) => String(t.id) === String(nextTask.id));
|
||||
const description = fullTask ? getTaskDescription(fullTask) : undefined;
|
||||
const description = getTaskDescription(nextTask);
|
||||
|
||||
displayRecommendedNextTask({
|
||||
...nextTask,
|
||||
status: 'pending', // Next task is typically pending
|
||||
description
|
||||
id: nextTask.id,
|
||||
title: nextTask.title,
|
||||
priority: nextTask.priority,
|
||||
status: nextTask.status,
|
||||
dependencies: nextTask.dependencies,
|
||||
description,
|
||||
complexity: nextTask.complexity as number | undefined
|
||||
});
|
||||
} else {
|
||||
displayRecommendedNextTask(undefined);
|
||||
@@ -467,18 +474,7 @@ export class ListTasksCommand extends Command {
|
||||
}
|
||||
|
||||
/**
|
||||
* Static method to register this command on an existing program
|
||||
* This is for gradual migration - allows commands.js to use this
|
||||
*/
|
||||
static registerOn(program: Command): Command {
|
||||
const listCommand = new ListTasksCommand();
|
||||
program.addCommand(listCommand);
|
||||
return listCommand;
|
||||
}
|
||||
|
||||
/**
|
||||
* Alternative registration that returns the command for chaining
|
||||
* Can also configure the command name if needed
|
||||
* Register this command on an existing program
|
||||
*/
|
||||
static register(program: Command, name?: string): ListTasksCommand {
|
||||
const listCommand = new ListTasksCommand(name);
|
||||
|
||||
@@ -258,9 +258,6 @@ export class SetStatusCommand extends Command {
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
// Show storage info
|
||||
console.log(chalk.gray(`\nUsing ${result.storageType} storage`));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -290,18 +287,7 @@ export class SetStatusCommand extends Command {
|
||||
}
|
||||
|
||||
/**
|
||||
* Static method to register this command on an existing program
|
||||
* This is for gradual migration - allows commands.js to use this
|
||||
*/
|
||||
static registerOn(program: Command): Command {
|
||||
const setStatusCommand = new SetStatusCommand();
|
||||
program.addCommand(setStatusCommand);
|
||||
return setStatusCommand;
|
||||
}
|
||||
|
||||
/**
|
||||
* Alternative registration that returns the command for chaining
|
||||
* Can also configure the command name if needed
|
||||
* Register this command on an existing program
|
||||
*/
|
||||
static register(program: Command, name?: string): SetStatusCommand {
|
||||
const setStatusCommand = new SetStatusCommand(name);
|
||||
|
||||
@@ -322,18 +322,7 @@ export class ShowCommand extends Command {
|
||||
}
|
||||
|
||||
/**
|
||||
* Static method to register this command on an existing program
|
||||
* This is for gradual migration - allows commands.js to use this
|
||||
*/
|
||||
static registerOn(program: Command): Command {
|
||||
const showCommand = new ShowCommand();
|
||||
program.addCommand(showCommand);
|
||||
return showCommand;
|
||||
}
|
||||
|
||||
/**
|
||||
* Alternative registration that returns the command for chaining
|
||||
* Can also configure the command name if needed
|
||||
* Register this command on an existing program
|
||||
*/
|
||||
static register(program: Command, name?: string): ShowCommand {
|
||||
const showCommand = new ShowCommand(name);
|
||||
|
||||
@@ -493,16 +493,7 @@ export class StartCommand extends Command {
|
||||
}
|
||||
|
||||
/**
|
||||
* Static method to register this command on an existing program
|
||||
*/
|
||||
static registerOn(program: Command): Command {
|
||||
const startCommand = new StartCommand();
|
||||
program.addCommand(startCommand);
|
||||
return startCommand;
|
||||
}
|
||||
|
||||
/**
|
||||
* Alternative registration that returns the command for chaining
|
||||
* Register this command on an existing program
|
||||
*/
|
||||
static register(program: Command, name?: string): StartCommand {
|
||||
const startCommand = new StartCommand(name);
|
||||
|
||||
@@ -10,6 +10,16 @@ export { AuthCommand } from './commands/auth.command.js';
|
||||
export { ContextCommand } from './commands/context.command.js';
|
||||
export { StartCommand } from './commands/start.command.js';
|
||||
export { SetStatusCommand } from './commands/set-status.command.js';
|
||||
export { ExportCommand } from './commands/export.command.js';
|
||||
export { AutopilotCommand } from './commands/autopilot.command.js';
|
||||
|
||||
// Command Registry
|
||||
export {
|
||||
CommandRegistry,
|
||||
registerAllCommands,
|
||||
registerCommandsByCategory,
|
||||
type CommandMetadata
|
||||
} from './command-registry.js';
|
||||
|
||||
// UI utilities (for other commands to use)
|
||||
export * as ui from './utils/ui.js';
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
import chalk from 'chalk';
|
||||
import boxen from 'boxen';
|
||||
import type { Task, TaskPriority } from '@tm/core/types';
|
||||
import { getComplexityWithColor } from '../../utils/ui.js';
|
||||
|
||||
/**
|
||||
* Statistics for task collection
|
||||
@@ -479,7 +480,7 @@ export function displayDependencyDashboard(
|
||||
? chalk.cyan(nextTask.dependencies.join(', '))
|
||||
: chalk.gray('None')
|
||||
}\n` +
|
||||
`Complexity: ${nextTask?.complexity || chalk.gray('N/A')}`;
|
||||
`Complexity: ${nextTask?.complexity !== undefined ? getComplexityWithColor(nextTask.complexity) : chalk.gray('N/A')}`;
|
||||
|
||||
return content;
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
import chalk from 'chalk';
|
||||
import boxen from 'boxen';
|
||||
import type { Task } from '@tm/core/types';
|
||||
import { getComplexityWithColor } from '../../utils/ui.js';
|
||||
|
||||
/**
|
||||
* Next task display options
|
||||
@@ -17,6 +18,7 @@ export interface NextTaskDisplayOptions {
|
||||
status?: string;
|
||||
dependencies?: (string | number)[];
|
||||
description?: string;
|
||||
complexity?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -82,6 +84,11 @@ export function displayRecommendedNextTask(
|
||||
: chalk.cyan(task.dependencies.join(', '));
|
||||
content.push(`Dependencies: ${depsDisplay}`);
|
||||
|
||||
// Complexity with color and label
|
||||
if (typeof task.complexity === 'number') {
|
||||
content.push(`Complexity: ${getComplexityWithColor(task.complexity)}`);
|
||||
}
|
||||
|
||||
// Description if available
|
||||
if (task.description) {
|
||||
content.push('');
|
||||
|
||||
@@ -9,7 +9,11 @@ import Table from 'cli-table3';
|
||||
import { marked, MarkedExtension } from 'marked';
|
||||
import { markedTerminal } from 'marked-terminal';
|
||||
import type { Task } from '@tm/core/types';
|
||||
import { getStatusWithColor, getPriorityWithColor } from '../../utils/ui.js';
|
||||
import {
|
||||
getStatusWithColor,
|
||||
getPriorityWithColor,
|
||||
getComplexityWithColor
|
||||
} from '../../utils/ui.js';
|
||||
|
||||
// Configure marked to use terminal renderer with subtle colors
|
||||
marked.use(
|
||||
@@ -108,7 +112,9 @@ export function displayTaskProperties(task: Task): void {
|
||||
getStatusWithColor(task.status),
|
||||
getPriorityWithColor(task.priority),
|
||||
deps,
|
||||
'N/A',
|
||||
typeof task.complexity === 'number'
|
||||
? getComplexityWithColor(task.complexity)
|
||||
: chalk.gray('N/A'),
|
||||
task.description || ''
|
||||
].join('\n');
|
||||
|
||||
@@ -186,8 +192,7 @@ export function displaySubtasks(
|
||||
status: any;
|
||||
description?: string;
|
||||
dependencies?: string[];
|
||||
}>,
|
||||
parentId: string | number
|
||||
}>
|
||||
): void {
|
||||
const terminalWidth = process.stdout.columns * 0.95 || 100;
|
||||
// Display subtasks header
|
||||
@@ -222,7 +227,7 @@ export function displaySubtasks(
|
||||
});
|
||||
|
||||
subtasks.forEach((subtask) => {
|
||||
const subtaskId = `${parentId}.${subtask.id}`;
|
||||
const subtaskId = String(subtask.id);
|
||||
|
||||
// Format dependencies
|
||||
const deps =
|
||||
@@ -323,7 +328,7 @@ export function displayTaskDetails(
|
||||
console.log(chalk.gray(` No subtasks with status '${statusFilter}'`));
|
||||
} else if (filteredSubtasks.length > 0) {
|
||||
console.log(); // Empty line for spacing
|
||||
displaySubtasks(filteredSubtasks, task.id);
|
||||
displaySubtasks(filteredSubtasks);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -158,10 +158,18 @@ export function displayUpgradeNotification(
|
||||
export async function performAutoUpdate(
|
||||
latestVersion: string
|
||||
): Promise<boolean> {
|
||||
if (process.env.TASKMASTER_SKIP_AUTO_UPDATE === '1' || process.env.CI) {
|
||||
console.log(
|
||||
chalk.dim('Skipping auto-update (TASKMASTER_SKIP_AUTO_UPDATE/CI).')
|
||||
);
|
||||
if (
|
||||
process.env.TASKMASTER_SKIP_AUTO_UPDATE === '1' ||
|
||||
process.env.CI ||
|
||||
process.env.NODE_ENV === 'test'
|
||||
) {
|
||||
const reason =
|
||||
process.env.TASKMASTER_SKIP_AUTO_UPDATE === '1'
|
||||
? 'TASKMASTER_SKIP_AUTO_UPDATE=1'
|
||||
: process.env.CI
|
||||
? 'CI environment'
|
||||
: 'NODE_ENV=test';
|
||||
console.log(chalk.dim(`Skipping auto-update (${reason})`));
|
||||
return false;
|
||||
}
|
||||
const spinner = ora({
|
||||
|
||||
@@ -84,7 +84,23 @@ export function getPriorityWithColor(priority: TaskPriority): string {
|
||||
}
|
||||
|
||||
/**
|
||||
* Get colored complexity display
|
||||
* Get complexity color and label based on score thresholds
|
||||
*/
|
||||
function getComplexityLevel(score: number): {
|
||||
color: (text: string) => string;
|
||||
label: string;
|
||||
} {
|
||||
if (score >= 7) {
|
||||
return { color: chalk.hex('#CC0000'), label: 'High' };
|
||||
} else if (score >= 4) {
|
||||
return { color: chalk.hex('#FF8800'), label: 'Medium' };
|
||||
} else {
|
||||
return { color: chalk.green, label: 'Low' };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get colored complexity display with dot indicator (simple format)
|
||||
*/
|
||||
export function getComplexityWithColor(complexity: number | string): string {
|
||||
const score =
|
||||
@@ -94,13 +110,20 @@ export function getComplexityWithColor(complexity: number | string): string {
|
||||
return chalk.gray('N/A');
|
||||
}
|
||||
|
||||
if (score >= 8) {
|
||||
return chalk.red.bold(`${score} (High)`);
|
||||
} else if (score >= 5) {
|
||||
return chalk.yellow(`${score} (Medium)`);
|
||||
} else {
|
||||
return chalk.green(`${score} (Low)`);
|
||||
const { color } = getComplexityLevel(score);
|
||||
return color(`● ${score}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get colored complexity display with /10 format (for dashboards)
|
||||
*/
|
||||
export function getComplexityWithScore(complexity: number | undefined): string {
|
||||
if (typeof complexity !== 'number') {
|
||||
return chalk.gray('N/A');
|
||||
}
|
||||
|
||||
const { color, label } = getComplexityLevel(complexity);
|
||||
return color(`${complexity}/10 (${label})`);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -263,12 +286,12 @@ export function createTaskTable(
|
||||
// Adjust column widths to better match the original layout
|
||||
const baseColWidths = showComplexity
|
||||
? [
|
||||
Math.floor(terminalWidth * 0.06),
|
||||
Math.floor(terminalWidth * 0.1),
|
||||
Math.floor(terminalWidth * 0.4),
|
||||
Math.floor(terminalWidth * 0.15),
|
||||
Math.floor(terminalWidth * 0.12),
|
||||
Math.floor(terminalWidth * 0.1),
|
||||
Math.floor(terminalWidth * 0.2),
|
||||
Math.floor(terminalWidth * 0.12)
|
||||
Math.floor(terminalWidth * 0.1)
|
||||
] // ID, Title, Status, Priority, Dependencies, Complexity
|
||||
: [
|
||||
Math.floor(terminalWidth * 0.08),
|
||||
@@ -323,8 +346,12 @@ export function createTaskTable(
|
||||
}
|
||||
|
||||
if (showComplexity) {
|
||||
// Show N/A if no complexity score
|
||||
row.push(chalk.gray('N/A'));
|
||||
// Show complexity score from report if available
|
||||
if (typeof task.complexity === 'number') {
|
||||
row.push(getComplexityWithColor(task.complexity));
|
||||
} else {
|
||||
row.push(chalk.gray('N/A'));
|
||||
}
|
||||
}
|
||||
|
||||
table.push(row);
|
||||
@@ -350,7 +377,11 @@ export function createTaskTable(
|
||||
}
|
||||
|
||||
if (showComplexity) {
|
||||
subRow.push(chalk.gray('--'));
|
||||
const complexityDisplay =
|
||||
typeof subtask.complexity === 'number'
|
||||
? getComplexityWithColor(subtask.complexity)
|
||||
: '--';
|
||||
subRow.push(chalk.gray(complexityDisplay));
|
||||
}
|
||||
|
||||
table.push(subRow);
|
||||
|
||||
@@ -1,22 +1,24 @@
|
||||
# Task Master Documentation
|
||||
|
||||
Welcome to the Task Master documentation. Use the links below to navigate to the information you need:
|
||||
Welcome to the Task Master documentation. This documentation site provides comprehensive guides for getting started with Task Master.
|
||||
|
||||
## Getting Started
|
||||
|
||||
- [Configuration Guide](archive/configuration.md) - Set up environment variables and customize Task Master
|
||||
- [Tutorial](archive/ctutorial.md) - Step-by-step guide to getting started with Task Master
|
||||
- [Quick Start Guide](/getting-started/quick-start) - Complete setup and first-time usage guide
|
||||
- [Requirements](/getting-started/quick-start/requirements) - What you need to get started
|
||||
- [Installation](/getting-started/quick-start/installation) - How to install Task Master
|
||||
|
||||
## Reference
|
||||
## Core Capabilities
|
||||
|
||||
- [Command Reference](archive/ccommand-reference.md) - Complete list of all available commands
|
||||
- [Task Structure](archive/ctask-structure.md) - Understanding the task format and features
|
||||
- [MCP Tools](/capabilities/mcp) - Model Control Protocol integration
|
||||
- [CLI Commands](/capabilities/cli-root-commands) - Command line interface reference
|
||||
- [Task Structure](/capabilities/task-structure) - Understanding tasks and subtasks
|
||||
|
||||
## Examples & Licensing
|
||||
## Best Practices
|
||||
|
||||
- [Example Interactions](archive/cexamples.md) - Common Cursor AI interaction examples
|
||||
- [Licensing Information](archive/clicensing.md) - Detailed information about the license
|
||||
- [Advanced Configuration](/best-practices/configuration-advanced) - Detailed configuration options
|
||||
- [Advanced Tasks](/best-practices/advanced-tasks) - Working with complex task structures
|
||||
|
||||
## Need More Help?
|
||||
|
||||
If you can't find what you're looking for in these docs, please check the [main README](../README.md) or visit our [GitHub repository](https://github.com/eyaltoledano/claude-task-master).
|
||||
If you can't find what you're looking for in these docs, please check the root README.md or visit our [GitHub repository](https://github.com/eyaltoledano/claude-task-master).
|
||||
|
||||
@@ -156,7 +156,7 @@ sidebarTitle: "CLI Commands"
|
||||
# Use an alternative tasks file
|
||||
task-master analyze-complexity --file=custom-tasks.json
|
||||
|
||||
# Use Perplexity AI for research-backed complexity analysis
|
||||
# Use your configured research model for research-backed complexity analysis
|
||||
task-master analyze-complexity --research
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
"getting-started/quick-start/execute-quick"
|
||||
]
|
||||
},
|
||||
"getting-started/api-keys",
|
||||
"getting-started/faq",
|
||||
"getting-started/contribute"
|
||||
]
|
||||
|
||||
267
apps/docs/getting-started/api-keys.mdx
Normal file
267
apps/docs/getting-started/api-keys.mdx
Normal file
@@ -0,0 +1,267 @@
|
||||
# API Keys Configuration
|
||||
|
||||
Task Master supports multiple AI providers through environment variables. This page lists all available API keys and their configuration requirements.
|
||||
|
||||
## Required API Keys
|
||||
|
||||
> **Note**: At least one required API key must be configured for Task Master to function.
|
||||
>
|
||||
> "Required: Yes" below means "required to use that specific provider," not "required globally." You only need at least one provider configured.
|
||||
|
||||
### ANTHROPIC_API_KEY (Recommended)
|
||||
- **Provider**: Anthropic Claude models
|
||||
- **Format**: `sk-ant-api03-...`
|
||||
- **Required**: ✅ **Yes**
|
||||
- **Models**: Claude 3.5 Sonnet, Claude 3 Haiku, Claude 3 Opus
|
||||
- **Get Key**: [Anthropic Console](https://console.anthropic.com/)
|
||||
|
||||
```bash
|
||||
ANTHROPIC_API_KEY="sk-ant-api03-your-key-here"
|
||||
```
|
||||
|
||||
### PERPLEXITY_API_KEY (Highly Recommended for Research)
|
||||
- **Provider**: Perplexity AI (Research features)
|
||||
- **Format**: `pplx-...`
|
||||
- **Required**: ✅ **Yes**
|
||||
- **Purpose**: Enables research-backed task expansions and updates
|
||||
- **Models**: Perplexity Sonar models
|
||||
- **Get Key**: [Perplexity API](https://www.perplexity.ai/settings/api)
|
||||
|
||||
```bash
|
||||
PERPLEXITY_API_KEY="pplx-your-key-here"
|
||||
```
|
||||
|
||||
### OPENAI_API_KEY
|
||||
- **Provider**: OpenAI GPT models
|
||||
- **Format**: `sk-proj-...` or `sk-...`
|
||||
- **Required**: ✅ **Yes**
|
||||
- **Models**: GPT-4, GPT-4 Turbo, GPT-3.5 Turbo, O1 models
|
||||
- **Get Key**: [OpenAI Platform](https://platform.openai.com/api-keys)
|
||||
|
||||
```bash
|
||||
OPENAI_API_KEY="sk-proj-your-key-here"
|
||||
```
|
||||
|
||||
### GOOGLE_API_KEY
|
||||
- **Provider**: Google Gemini models
|
||||
- **Format**: Various formats
|
||||
- **Required**: ✅ **Yes**
|
||||
- **Models**: Gemini Pro, Gemini Flash, Gemini Ultra
|
||||
- **Get Key**: [Google AI Studio](https://aistudio.google.com/app/apikey)
|
||||
- **Alternative**: Use `GOOGLE_APPLICATION_CREDENTIALS` for service account (Google Vertex)
|
||||
|
||||
```bash
|
||||
GOOGLE_API_KEY="your-google-api-key-here"
|
||||
```
|
||||
|
||||
### GROQ_API_KEY
|
||||
- **Provider**: Groq (High-performance inference)
|
||||
- **Required**: ✅ **Yes**
|
||||
- **Models**: Llama models, Mixtral models (via Groq)
|
||||
- **Get Key**: [Groq Console](https://console.groq.com/keys)
|
||||
|
||||
```bash
|
||||
GROQ_API_KEY="your-groq-key-here"
|
||||
```
|
||||
|
||||
### OPENROUTER_API_KEY
|
||||
- **Provider**: OpenRouter (Multiple model access)
|
||||
- **Required**: ✅ **Yes**
|
||||
- **Models**: Access to various models through single API
|
||||
- **Get Key**: [OpenRouter](https://openrouter.ai/keys)
|
||||
|
||||
```bash
|
||||
OPENROUTER_API_KEY="your-openrouter-key-here"
|
||||
```
|
||||
|
||||
### AZURE_OPENAI_API_KEY
|
||||
- **Provider**: Azure OpenAI Service
|
||||
- **Required**: ✅ **Yes**
|
||||
- **Requirements**: Also requires `AZURE_OPENAI_ENDPOINT` configuration
|
||||
- **Models**: GPT models via Azure
|
||||
- **Get Key**: [Azure Portal](https://portal.azure.com/)
|
||||
|
||||
```bash
|
||||
AZURE_OPENAI_API_KEY="your-azure-key-here"
|
||||
```
|
||||
|
||||
### XAI_API_KEY
|
||||
- **Provider**: xAI (Grok) models
|
||||
- **Required**: ✅ **Yes**
|
||||
- **Models**: Grok models
|
||||
- **Get Key**: [xAI Console](https://console.x.ai/)
|
||||
|
||||
```bash
|
||||
XAI_API_KEY="your-xai-key-here"
|
||||
```
|
||||
|
||||
## Optional API Keys
|
||||
|
||||
> **Note**: These API keys are optional - providers will work without them or use alternative authentication methods.
|
||||
|
||||
### AWS_ACCESS_KEY_ID (Bedrock)
|
||||
- **Provider**: AWS Bedrock
|
||||
- **Required**: ❌ **No** (uses AWS credential chain)
|
||||
- **Models**: Claude models via AWS Bedrock
|
||||
- **Authentication**: Uses AWS credential chain (profiles, IAM roles, etc.)
|
||||
- **Get Key**: [AWS Console](https://console.aws.amazon.com/iam/)
|
||||
|
||||
```bash
|
||||
# Optional - AWS credential chain is preferred
|
||||
AWS_ACCESS_KEY_ID="your-aws-access-key"
|
||||
AWS_SECRET_ACCESS_KEY="your-aws-secret-key"
|
||||
```
|
||||
|
||||
### CLAUDE_CODE_API_KEY
|
||||
- **Provider**: Claude Code CLI
|
||||
- **Required**: ❌ **No** (uses OAuth tokens)
|
||||
- **Purpose**: Integration with local Claude Code CLI
|
||||
- **Authentication**: Uses OAuth tokens, no API key needed
|
||||
|
||||
```bash
|
||||
# Not typically needed
|
||||
CLAUDE_CODE_API_KEY="not-usually-required"
|
||||
```
|
||||
|
||||
### GEMINI_API_KEY
|
||||
- **Provider**: Gemini CLI
|
||||
- **Required**: ❌ **No** (uses OAuth authentication)
|
||||
- **Purpose**: Integration with Gemini CLI
|
||||
- **Authentication**: Primarily uses OAuth via CLI, API key is optional
|
||||
|
||||
```bash
|
||||
# Optional - OAuth via CLI is preferred
|
||||
GEMINI_API_KEY="your-gemini-key-here"
|
||||
```
|
||||
|
||||
### GROK_CLI_API_KEY
|
||||
- **Provider**: Grok CLI
|
||||
- **Required**: ❌ **No** (can use CLI config)
|
||||
- **Purpose**: Integration with Grok CLI
|
||||
- **Authentication**: Can use Grok CLI's own config file
|
||||
|
||||
```bash
|
||||
# Optional - CLI config is preferred
|
||||
GROK_CLI_API_KEY="your-grok-cli-key"
|
||||
```
|
||||
|
||||
### OLLAMA_API_KEY
|
||||
- **Provider**: Ollama (Local/Remote)
|
||||
- **Required**: ❌ **No** (local installation doesn't need key)
|
||||
- **Purpose**: For remote Ollama servers that require authentication
|
||||
- **Requirements**: Only needed for remote servers with authentication
|
||||
- **Note**: Not needed for local Ollama installations
|
||||
|
||||
```bash
|
||||
# Only needed for remote Ollama servers
|
||||
OLLAMA_API_KEY="your-ollama-api-key-here"
|
||||
```
|
||||
|
||||
### GITHUB_API_KEY
|
||||
- **Provider**: GitHub (Import/Export features)
|
||||
- **Format**: `ghp_...` or `github_pat_...`
|
||||
- **Required**: ❌ **No** (for GitHub features only)
|
||||
- **Purpose**: GitHub import/export features
|
||||
- **Get Key**: [GitHub Settings](https://github.com/settings/tokens)
|
||||
|
||||
```bash
|
||||
GITHUB_API_KEY="ghp-your-github-key-here"
|
||||
```
|
||||
|
||||
## Configuration Methods
|
||||
|
||||
### Method 1: Environment File (.env)
|
||||
Create a `.env` file in your project root:
|
||||
|
||||
```bash
|
||||
# Copy from .env.example
|
||||
cp .env.example .env
|
||||
|
||||
# Edit with your keys
|
||||
vim .env
|
||||
```
|
||||
|
||||
### Method 2: System Environment Variables
|
||||
```bash
|
||||
export ANTHROPIC_API_KEY="your-key-here"
|
||||
export PERPLEXITY_API_KEY="your-key-here"
|
||||
# ... other keys
|
||||
```
|
||||
|
||||
### Method 3: MCP Server Configuration
|
||||
For Claude Code integration, configure keys in `.mcp.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"task-master-ai": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "task-master-ai"],
|
||||
"env": {
|
||||
"ANTHROPIC_API_KEY": "your-key-here",
|
||||
"PERPLEXITY_API_KEY": "your-key-here",
|
||||
"OPENAI_API_KEY": "your-key-here"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Key Requirements
|
||||
|
||||
### Minimum Requirements
|
||||
- **At least one** AI provider key is required
|
||||
- **ANTHROPIC_API_KEY** is recommended as the primary provider
|
||||
- **PERPLEXITY_API_KEY** is highly recommended for research features
|
||||
|
||||
### Provider-Specific Requirements
|
||||
- **Azure OpenAI**: Requires both `AZURE_OPENAI_API_KEY` and `AZURE_OPENAI_ENDPOINT` configuration
|
||||
- **Google Vertex**: Requires `VERTEX_PROJECT_ID` and `VERTEX_LOCATION` environment variables
|
||||
- **AWS Bedrock**: Uses AWS credential chain (profiles, IAM roles, etc.) instead of API keys
|
||||
- **Ollama**: Only needs API key for remote servers with authentication
|
||||
- **CLI Providers**: Gemini CLI, Grok CLI, and Claude Code use OAuth/CLI config instead of API keys
|
||||
|
||||
## Model Configuration
|
||||
|
||||
After setting up API keys, configure which models to use:
|
||||
|
||||
```bash
|
||||
# Interactive model setup
|
||||
task-master models --setup
|
||||
|
||||
# Set specific models
|
||||
task-master models --set-main claude-3-5-sonnet-20241022
|
||||
task-master models --set-research perplexity-llama-3.1-sonar-large-128k-online
|
||||
task-master models --set-fallback gpt-4o-mini
|
||||
```
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
1. **Never commit API keys** to version control
|
||||
2. **Use .env files** and add them to `.gitignore`
|
||||
3. **Rotate keys regularly** especially if compromised
|
||||
4. **Use minimal permissions** for service accounts
|
||||
5. **Monitor usage** to detect unauthorized access
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Key Validation
|
||||
```bash
|
||||
# Check if keys are properly configured
|
||||
task-master models
|
||||
|
||||
# Test specific provider
|
||||
task-master add-task --prompt="test task" --model=claude-3-5-sonnet-20241022
|
||||
```
|
||||
|
||||
### Common Issues
|
||||
- **Invalid key format**: Check the expected format for each provider
|
||||
- **Insufficient permissions**: Ensure keys have necessary API access
|
||||
- **Rate limits**: Some providers have usage limits
|
||||
- **Regional restrictions**: Some models may not be available in all regions
|
||||
|
||||
### Getting Help
|
||||
If you encounter issues with API key configuration:
|
||||
- Check the [FAQ](/getting-started/faq) for common solutions
|
||||
- Join our [Discord community](https://discord.gg/fWJkU7rf) for support
|
||||
- Report issues on [GitHub](https://github.com/eyaltoledano/claude-task-master/issues)
|
||||
@@ -108,5 +108,5 @@ You don’t need to configure everything up front. Most settings can be left as
|
||||
</Accordion>
|
||||
|
||||
<Note>
|
||||
For advanced configuration options and detailed customization, see our [Advanced Configuration Guide](/docs/best-practices/configuration-advanced) page.
|
||||
For advanced configuration options and detailed customization, see our [Advanced Configuration Guide](/best-practices/configuration-advanced) page.
|
||||
</Note>
|
||||
@@ -56,4 +56,4 @@ If you ran into problems and had to debug errors you can create new rules as you
|
||||
|
||||
By now you have all you need to get started executing code faster and smarter with Task Master.
|
||||
|
||||
If you have any questions please check out [Frequently Asked Questions](/docs/getting-started/faq)
|
||||
If you have any questions please check out [Frequently Asked Questions](/getting-started/faq)
|
||||
|
||||
@@ -30,6 +30,19 @@ cursor://anysphere.cursor-deeplink/mcp/install?name=taskmaster-ai&config=eyJjb21
|
||||
```
|
||||
|
||||
> **Note:** After clicking the link, you'll still need to add your API keys to the configuration. The link installs the MCP server with placeholder keys that you'll need to replace with your actual API keys.
|
||||
|
||||
### Claude Code Quick Install
|
||||
|
||||
For Claude Code users:
|
||||
|
||||
```bash
|
||||
claude mcp add taskmaster-ai -- npx -y task-master-ai
|
||||
```
|
||||
|
||||
Don't forget to add your API keys to the configuration:
|
||||
- in the root .env of your Project
|
||||
- in the "env" section of your mcp config for taskmaster-ai
|
||||
|
||||
</Accordion>
|
||||
## Installation Options
|
||||
|
||||
|
||||
@@ -6,13 +6,13 @@ sidebarTitle: "Quick Start"
|
||||
This guide is for new users who want to start using Task Master with minimal setup time.
|
||||
|
||||
It covers:
|
||||
- [Requirements](/docs/getting-started/quick-start/requirements): You will need Node.js and an AI model API Key.
|
||||
- [Installation](/docs/getting-started/quick-start/installation): How to Install Task Master.
|
||||
- [Configuration](/docs/getting-started/quick-start/configuration-quick): Setting up your API Key, MCP, and more.
|
||||
- [PRD](/docs/getting-started/quick-start/prd-quick): Writing and parsing your first PRD.
|
||||
- [Task Setup](/docs/getting-started/quick-start/tasks-quick): Preparing your tasks for execution.
|
||||
- [Executing Tasks](/docs/getting-started/quick-start/execute-quick): Using Task Master to execute tasks.
|
||||
- [Rules & Context](/docs/getting-started/quick-start/rules-quick): Learn how and why to build context in your project over time.
|
||||
- [Requirements](/getting-started/quick-start/requirements): You will need Node.js and an AI model API Key.
|
||||
- [Installation](/getting-started/quick-start/installation): How to Install Task Master.
|
||||
- [Configuration](/getting-started/quick-start/configuration-quick): Setting up your API Key, MCP, and more.
|
||||
- [PRD](/getting-started/quick-start/prd-quick): Writing and parsing your first PRD.
|
||||
- [Task Setup](/getting-started/quick-start/tasks-quick): Preparing your tasks for execution.
|
||||
- [Executing Tasks](/getting-started/quick-start/execute-quick): Using Task Master to execute tasks.
|
||||
- [Rules & Context](/getting-started/quick-start/rules-quick): Learn how and why to build context in your project over time.
|
||||
|
||||
<Tip>
|
||||
By the end of this guide, you'll have everything you need to begin working productively with Task Master.
|
||||
|
||||
@@ -61,9 +61,25 @@ Task Master can provide a complexity report which can be helpful to read before
|
||||
Can you analyze the complexity of our tasks to help me understand which ones need to be broken down further?
|
||||
```
|
||||
|
||||
The agent will use the `analyze_project_complexity` MCP tool, or you can run it directly with the CLI command:
|
||||
```bash
|
||||
task-master analyze-complexity
|
||||
```
|
||||
|
||||
For more comprehensive analysis using your configured research model, you can use:
|
||||
```bash
|
||||
task-master analyze-complexity --research
|
||||
```
|
||||
|
||||
<Tip>
|
||||
The `--research` flag uses whatever research model you have configured in `.taskmaster/config.json` (configurable via `task-master models --setup`) for research-backed complexity analysis, providing more informed recommendations.
|
||||
</Tip>
|
||||
|
||||
You can view the report in a friendly table using:
|
||||
```
|
||||
Can you show me the complexity report in a more readable format?
|
||||
```
|
||||
|
||||
<Check>Now you are ready to begin [executing tasks](/docs/getting-started/quick-start/execute-quick)</Check>
|
||||
For more detailed CLI options, see the [Analyze Task Complexity](/capabilities/cli-root-commands#analyze-task-complexity) section.
|
||||
|
||||
<Check>Now you are ready to begin [executing tasks](/getting-started/quick-start/execute-quick)</Check>
|
||||
@@ -4,7 +4,7 @@ Welcome to v1 of the Task Master Docs. Expect weekly updates as we expand and re
|
||||
|
||||
We've organized the docs into three sections depending on your experience level and goals:
|
||||
|
||||
### Getting Started - Jump in to [Quick Start](/docs/getting-started/quick-start)
|
||||
### Getting Started - Jump in to [Quick Start](/getting-started/quick-start)
|
||||
Designed for first-time users. Get set up, create your first PRD, and run your first task.
|
||||
|
||||
### Best Practices
|
||||
|
||||
@@ -1,5 +1,26 @@
|
||||
# Change Log
|
||||
|
||||
## 0.25.5-rc.0
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- Updated dependencies [[`aaacc3d`](https://github.com/eyaltoledano/claude-task-master/commit/aaacc3dae36247b4de72b2d2697f49e5df6d01e3), [`0079b7d`](https://github.com/eyaltoledano/claude-task-master/commit/0079b7defdad550811f704c470fdd01955d91d4d), [`0b2c696`](https://github.com/eyaltoledano/claude-task-master/commit/0b2c6967c4605c33a100cff16f6ce8ff09ad06f0), [`18aa416`](https://github.com/eyaltoledano/claude-task-master/commit/18aa416035f44345bde1c7321490345733a5d042), [`18aa416`](https://github.com/eyaltoledano/claude-task-master/commit/18aa416035f44345bde1c7321490345733a5d042), [`738ec51`](https://github.com/eyaltoledano/claude-task-master/commit/738ec51c049a295a12839b2dfddaf05e23b8fede), [`d67b81d`](https://github.com/eyaltoledano/claude-task-master/commit/d67b81d25ddd927fabb6f5deb368e8993519c541), [`b5fe723`](https://github.com/eyaltoledano/claude-task-master/commit/b5fe723f8ead928e9f2dbde13b833ee70ac3382d), [`2b69936`](https://github.com/eyaltoledano/claude-task-master/commit/2b69936ee7b34346d6de5175af20e077359e2e2a), [`986ac11`](https://github.com/eyaltoledano/claude-task-master/commit/986ac117aee00bcd3e6830a0f76e1ad6d10e0bca), [`20004a3`](https://github.com/eyaltoledano/claude-task-master/commit/20004a39ea848f747e1ff48981bfe176554e4055)]:
|
||||
- task-master-ai@0.28.0-rc.0
|
||||
|
||||
## 0.25.4
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- Updated dependencies [[`af53525`](https://github.com/eyaltoledano/claude-task-master/commit/af53525cbc660a595b67d4bb90d906911c71f45d)]:
|
||||
- task-master-ai@0.27.3
|
||||
|
||||
## 0.25.3
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- Updated dependencies [[`044a7bf`](https://github.com/eyaltoledano/claude-task-master/commit/044a7bfc98049298177bc655cf341d7a8b6a0011)]:
|
||||
- task-master-ai@0.27.2
|
||||
|
||||
## 0.25.2
|
||||
|
||||
### Patch Changes
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
"private": true,
|
||||
"displayName": "TaskMaster",
|
||||
"description": "A visual Kanban board interface for TaskMaster projects in VS Code",
|
||||
"version": "0.25.2",
|
||||
"version": "0.25.5-rc.0",
|
||||
"publisher": "Hamster",
|
||||
"icon": "assets/icon.png",
|
||||
"engines": {
|
||||
@@ -240,7 +240,7 @@
|
||||
"check-types": "tsc --noEmit"
|
||||
},
|
||||
"dependencies": {
|
||||
"task-master-ai": "0.27.1"
|
||||
"task-master-ai": "*"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@dnd-kit/core": "^6.3.1",
|
||||
@@ -276,7 +276,8 @@
|
||||
"react-dom": "^19.0.0",
|
||||
"tailwind-merge": "^3.3.1",
|
||||
"tailwindcss": "4.1.11",
|
||||
"typescript": "^5.7.3"
|
||||
"typescript": "^5.9.2",
|
||||
"@tm/core": "*"
|
||||
},
|
||||
"overrides": {
|
||||
"glob@<8": "^10.4.5",
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"name": "task-master-hamster",
|
||||
"displayName": "Taskmaster AI",
|
||||
"description": "A visual Kanban board interface for Taskmaster projects in VS Code",
|
||||
"version": "0.23.1",
|
||||
"version": "0.25.3",
|
||||
"publisher": "Hamster",
|
||||
"icon": "assets/icon.png",
|
||||
"engines": {
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
"outDir": "out",
|
||||
"lib": ["ES2022", "DOM"],
|
||||
"sourceMap": true,
|
||||
"rootDir": "src",
|
||||
"strict": true /* enable all strict type-checking options */,
|
||||
"moduleResolution": "Node",
|
||||
"esModuleInterop": true,
|
||||
@@ -21,8 +20,10 @@
|
||||
"@/*": ["./src/*"],
|
||||
"@/components/*": ["./src/components/*"],
|
||||
"@/lib/*": ["./src/lib/*"],
|
||||
"@tm/core": ["../core/src"]
|
||||
"@tm/core": ["../../packages/tm-core/src/index.ts"],
|
||||
"@tm/core/*": ["../../packages/tm-core/src/*"]
|
||||
}
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"exclude": ["node_modules", ".vscode-test", "out", "dist"]
|
||||
}
|
||||
|
||||
231
docs/claude-code-integration.md
Normal file
231
docs/claude-code-integration.md
Normal file
@@ -0,0 +1,231 @@
|
||||
# TODO: Move to apps/docs inside our documentation website
|
||||
|
||||
# Claude Code Integration Guide
|
||||
|
||||
This guide covers how to use Task Master with Claude Code AI SDK integration for enhanced AI-powered development workflows.
|
||||
|
||||
## Overview
|
||||
|
||||
Claude Code integration allows Task Master to leverage the Claude Code CLI for AI operations without requiring direct API keys. The integration uses OAuth tokens managed by the Claude Code CLI itself.
|
||||
|
||||
## Authentication Setup
|
||||
|
||||
The Claude Code provider uses token authentication managed by the Claude Code CLI.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
1. **Install Claude Code CLI** (if not already installed):
|
||||
|
||||
```bash
|
||||
# Installation method depends on your system
|
||||
# Follow Claude Code documentation for installation
|
||||
```
|
||||
|
||||
2. **Set up OAuth token** using Claude Code CLI:
|
||||
|
||||
```bash
|
||||
claude setup-token
|
||||
```
|
||||
|
||||
This command will:
|
||||
- Guide you through OAuth authentication
|
||||
- Store the token securely for CLI usage
|
||||
- Enable Task Master to use Claude Code without manual API key configuration
|
||||
|
||||
### Authentication Priority
|
||||
|
||||
Task Master will attempt authentication in this order:
|
||||
|
||||
1. **Environment Variable** (optional): `CLAUDE_CODE_OAUTH_TOKEN`
|
||||
- Useful for CI/CD environments or when you want to override the default token
|
||||
- Not required if you've set up the CLI token
|
||||
|
||||
2. **Claude Code CLI Token** (recommended): Token managed by `claude setup-token`
|
||||
- Automatically used when available
|
||||
- Most convenient for local development
|
||||
|
||||
3. **Fallback**: Error if neither is available
|
||||
|
||||
## Configuration
|
||||
|
||||
### Basic Configuration
|
||||
|
||||
Add Claude Code to your Task Master configuration:
|
||||
|
||||
```javascript
|
||||
// In your .taskmaster/config.json or via task-master models command
|
||||
{
|
||||
"models": {
|
||||
"main": "claude-code:sonnet", // Use Claude Code with Sonnet
|
||||
"research": "perplexity-llama-3.1-sonar-large-128k-online",
|
||||
"fallback": "claude-code:opus" // Use Claude Code with Opus as fallback
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Supported Models
|
||||
|
||||
- `claude-code:sonnet` - Claude 3.5 Sonnet via Claude Code CLI
|
||||
- `claude-code:opus` - Claude 3 Opus via Claude Code CLI
|
||||
|
||||
### Environment Variables (Optional)
|
||||
|
||||
While not required, you can optionally set:
|
||||
|
||||
```bash
|
||||
export CLAUDE_CODE_OAUTH_TOKEN="your_oauth_token_here"
|
||||
```
|
||||
|
||||
This is only needed in specific scenarios like:
|
||||
|
||||
- CI/CD pipelines
|
||||
- Docker containers
|
||||
- When you want to use a different token than the CLI default
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Basic Task Operations
|
||||
|
||||
```bash
|
||||
# Use Claude Code for task operations
|
||||
task-master add-task --prompt="Implement user authentication system" --research
|
||||
task-master expand --id=1 --research
|
||||
task-master update-task --id=1.1 --prompt="Add JWT token validation"
|
||||
```
|
||||
|
||||
### Model Configuration Commands
|
||||
|
||||
```bash
|
||||
# Set Claude Code as main model
|
||||
task-master models --set-main claude-code:sonnet
|
||||
|
||||
# Use interactive setup
|
||||
task-master models --setup
|
||||
# Then select "claude-code" from the provider list
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
#### 1. "Claude Code CLI not available" Error
|
||||
|
||||
**Problem**: Task Master cannot connect to Claude Code CLI.
|
||||
|
||||
**Solutions**:
|
||||
|
||||
- Ensure Claude Code CLI is installed and in your PATH
|
||||
- Run `claude setup-token` to configure authentication
|
||||
- Verify Claude Code CLI works: `claude --help`
|
||||
|
||||
#### 2. Authentication Failures
|
||||
|
||||
**Problem**: Token authentication is failing.
|
||||
|
||||
**Solutions**:
|
||||
|
||||
- Re-run `claude setup-token` to refresh your OAuth token
|
||||
- Check if your token has expired
|
||||
- Verify Claude Code CLI can authenticate: try a simple `claude` command
|
||||
|
||||
#### 3. Model Not Available
|
||||
|
||||
**Problem**: Specified Claude Code model is not supported.
|
||||
|
||||
**Solutions**:
|
||||
|
||||
- Use supported models: `sonnet` or `opus`
|
||||
- Check model availability: `task-master models --list`
|
||||
- Verify your Claude Code CLI has access to the requested model
|
||||
|
||||
### Debug Steps
|
||||
|
||||
1. **Test Claude Code CLI directly**:
|
||||
|
||||
```bash
|
||||
claude --help
|
||||
# Should show help without errors
|
||||
```
|
||||
|
||||
2. **Test authentication**:
|
||||
|
||||
```bash
|
||||
claude setup-token --verify
|
||||
# Should confirm token is valid
|
||||
```
|
||||
|
||||
3. **Test Task Master integration**:
|
||||
|
||||
```bash
|
||||
task-master models --test claude-code:sonnet
|
||||
# Should successfully connect and test the model
|
||||
```
|
||||
|
||||
4. **Check logs**:
|
||||
- Task Master logs will show detailed error messages
|
||||
- Use `--verbose` flag for more detailed output
|
||||
|
||||
### Environment-Specific Configuration
|
||||
|
||||
#### Docker/Containers
|
||||
|
||||
When running in Docker, you'll need to:
|
||||
|
||||
1. Install Claude Code CLI in your container
|
||||
2. Set up authentication via environment variable:
|
||||
|
||||
```dockerfile
|
||||
ENV CLAUDE_CODE_OAUTH_TOKEN="your_token_here"
|
||||
```
|
||||
|
||||
#### CI/CD Pipelines
|
||||
|
||||
For automated environments:
|
||||
|
||||
1. Set up a service account token or use environment variables
|
||||
2. Ensure Claude Code CLI is available in the pipeline environment
|
||||
3. Configure authentication before running Task Master commands
|
||||
|
||||
## Integration with AI SDK
|
||||
|
||||
Task Master's Claude Code integration uses the official `ai-sdk-provider-claude-code` package, providing:
|
||||
|
||||
- **Streaming Support**: Real-time token streaming for interactive experiences
|
||||
- **Full AI SDK Compatibility**: Works with generateText, streamText, and other AI SDK functions
|
||||
- **Automatic Error Handling**: Graceful degradation when Claude Code is unavailable
|
||||
- **Type Safety**: Full TypeScript support with proper type definitions
|
||||
|
||||
### Example AI SDK Usage
|
||||
|
||||
```javascript
|
||||
import { generateText } from 'ai';
|
||||
import { ClaudeCodeProvider } from './src/ai-providers/claude-code.js';
|
||||
|
||||
const provider = new ClaudeCodeProvider();
|
||||
const client = provider.getClient();
|
||||
|
||||
const result = await generateText({
|
||||
model: client('sonnet'),
|
||||
messages: [
|
||||
{ role: 'user', content: 'Hello Claude!' }
|
||||
]
|
||||
});
|
||||
|
||||
console.log(result.text);
|
||||
```
|
||||
|
||||
## Security Notes
|
||||
|
||||
- OAuth tokens are managed securely by Claude Code CLI
|
||||
- No API keys need to be stored in your project files
|
||||
- Tokens are automatically refreshed by the Claude Code CLI
|
||||
- Environment variables should only be used in secure environments
|
||||
|
||||
## Getting Help
|
||||
|
||||
If you encounter issues:
|
||||
|
||||
1. Check the Claude Code CLI documentation
|
||||
2. Verify your authentication setup with `claude setup-token --verify`
|
||||
3. Review Task Master logs for detailed error messages
|
||||
4. Open an issue with both Task Master and Claude Code version information
|
||||
@@ -383,6 +383,12 @@ task-master models --set-main=my-local-llama --ollama
|
||||
# Set a custom OpenRouter model for the research role
|
||||
task-master models --set-research=google/gemini-pro --openrouter
|
||||
|
||||
# Set Codex CLI model for the main role (uses ChatGPT subscription via OAuth)
|
||||
task-master models --set-main=gpt-5-codex --codex-cli
|
||||
|
||||
# Set Codex CLI model for the fallback role
|
||||
task-master models --set-fallback=gpt-5 --codex-cli
|
||||
|
||||
# Run interactive setup to configure models, including custom ones
|
||||
task-master models --setup
|
||||
```
|
||||
|
||||
@@ -235,6 +235,60 @@ node scripts/init.js
|
||||
- "MCP provider requires session context" → Ensure running in MCP environment
|
||||
- See the [MCP Provider Guide](./mcp-provider-guide.md) for detailed troubleshooting
|
||||
|
||||
### MCP Timeout Configuration
|
||||
|
||||
Long-running AI operations in taskmaster-ai can exceed the default 60-second MCP timeout. Operations like `parse_prd`, `expand_task`, `research`, and `analyze_project_complexity` may take 2-5 minutes to complete.
|
||||
|
||||
#### Adding Timeout Configuration
|
||||
|
||||
Add a `timeout` parameter to your MCP configuration to extend the timeout limit. The timeout configuration works identically across MCP clients including Cursor, Windsurf, and RooCode:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"task-master-ai": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "--package=task-master-ai", "task-master-ai"],
|
||||
"timeout": 300,
|
||||
"env": {
|
||||
"ANTHROPIC_API_KEY": "your-anthropic-api-key"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Configuration Details:**
|
||||
- **`timeout: 300`** - Sets timeout to 300 seconds (5 minutes)
|
||||
- **Value range**: 1-3600 seconds (1 second to 1 hour)
|
||||
- **Recommended**: 300 seconds provides sufficient time for most AI operations
|
||||
- **Format**: Integer value in seconds (not milliseconds)
|
||||
|
||||
#### Automatic Setup
|
||||
|
||||
When adding taskmaster rules for supported editors, the timeout configuration is automatically included:
|
||||
|
||||
```bash
|
||||
# Automatically includes timeout configuration
|
||||
task-master rules add cursor
|
||||
task-master rules add roo
|
||||
task-master rules add windsurf
|
||||
task-master rules add vscode
|
||||
```
|
||||
|
||||
#### Troubleshooting Timeouts
|
||||
|
||||
If you're still experiencing timeout errors:
|
||||
|
||||
1. **Verify configuration**: Check that `timeout: 300` is present in your MCP config
|
||||
2. **Restart editor**: Restart your editor after making configuration changes
|
||||
3. **Increase timeout**: For very complex operations, try `timeout: 600` (10 minutes)
|
||||
4. **Check API keys**: Ensure required API keys are properly configured
|
||||
|
||||
**Expected behavior:**
|
||||
- **Before fix**: Operations fail after 60 seconds with `MCP request timed out after 60000ms`
|
||||
- **After fix**: Operations complete successfully within the configured timeout limit
|
||||
|
||||
### Google Vertex AI Configuration
|
||||
|
||||
Google Vertex AI is Google Cloud's enterprise AI platform and requires specific configuration:
|
||||
@@ -375,3 +429,153 @@ Azure OpenAI provides enterprise-grade OpenAI models through Microsoft's Azure c
|
||||
- Verify the deployment name matches your configuration exactly (case-sensitive)
|
||||
- Ensure the model deployment is in a "Succeeded" state in Azure OpenAI Studio
|
||||
- Ensure youre not getting rate limited by `maxTokens` maintain appropriate Tokens per Minute Rate Limit (TPM) in your deployment.
|
||||
|
||||
### Codex CLI Provider
|
||||
|
||||
The Codex CLI provider integrates Task Master with OpenAI's Codex CLI, allowing you to use ChatGPT subscription models via OAuth authentication.
|
||||
|
||||
1. **Prerequisites**:
|
||||
- Node.js >= 18
|
||||
- Codex CLI >= 0.42.0 (>= 0.44.0 recommended)
|
||||
- ChatGPT subscription: Plus, Pro, Business, Edu, or Enterprise (for OAuth access to GPT-5 models)
|
||||
|
||||
2. **Installation**:
|
||||
```bash
|
||||
npm install -g @openai/codex
|
||||
```
|
||||
|
||||
3. **Authentication** (OAuth - Primary Method):
|
||||
```bash
|
||||
codex login
|
||||
```
|
||||
This will open a browser window for OAuth authentication with your ChatGPT account. Once authenticated, Task Master will automatically use these credentials.
|
||||
|
||||
4. **Optional API Key Method**:
|
||||
While OAuth is the primary and recommended authentication method, you can optionally set an OpenAI API key:
|
||||
```bash
|
||||
# In .env file
|
||||
OPENAI_API_KEY=sk-your-openai-api-key-here
|
||||
```
|
||||
**Note**: The API key will only be injected if explicitly provided. OAuth is always preferred.
|
||||
|
||||
5. **Configuration**:
|
||||
```json
|
||||
// In .taskmaster/config.json
|
||||
{
|
||||
"models": {
|
||||
"main": {
|
||||
"provider": "codex-cli",
|
||||
"modelId": "gpt-5-codex",
|
||||
"maxTokens": 128000,
|
||||
"temperature": 0.2
|
||||
},
|
||||
"fallback": {
|
||||
"provider": "codex-cli",
|
||||
"modelId": "gpt-5",
|
||||
"maxTokens": 128000,
|
||||
"temperature": 0.2
|
||||
}
|
||||
},
|
||||
"codexCli": {
|
||||
"allowNpx": true,
|
||||
"skipGitRepoCheck": true,
|
||||
"approvalMode": "on-failure",
|
||||
"sandboxMode": "workspace-write"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
6. **Available Models**:
|
||||
- `gpt-5` - Latest GPT-5 model (272K max input, 128K max output)
|
||||
- `gpt-5-codex` - GPT-5 optimized for agentic software engineering (272K max input, 128K max output)
|
||||
|
||||
7. **Codex CLI Settings (`codexCli` section)**:
|
||||
|
||||
The `codexCli` section in your configuration file supports the following options:
|
||||
|
||||
- **`allowNpx`** (boolean, default: `false`): Allow fallback to `npx @openai/codex` if CLI not found on PATH
|
||||
- **`skipGitRepoCheck`** (boolean, default: `false`): Skip git repository safety check (recommended for CI/non-repo usage)
|
||||
- **`approvalMode`** (string): Control command execution approval
|
||||
- `"untrusted"`: Require approval for all commands
|
||||
- `"on-failure"`: Only require approval after a command fails (default)
|
||||
- `"on-request"`: Approve only when explicitly requested
|
||||
- `"never"`: Never require approval (not recommended)
|
||||
- **`sandboxMode`** (string): Control filesystem access
|
||||
- `"read-only"`: Read-only access
|
||||
- `"workspace-write"`: Allow writes to workspace (default)
|
||||
- `"danger-full-access"`: Full filesystem access (use with caution)
|
||||
- **`codexPath`** (string, optional): Custom path to codex CLI executable
|
||||
- **`cwd`** (string, optional): Working directory for Codex CLI execution
|
||||
- **`fullAuto`** (boolean, optional): Fully automatic mode (equivalent to `--full-auto` flag)
|
||||
- **`dangerouslyBypassApprovalsAndSandbox`** (boolean, optional): Bypass all safety checks (dangerous!)
|
||||
- **`color`** (string, optional): Color handling - `"always"`, `"never"`, or `"auto"`
|
||||
- **`outputLastMessageFile`** (string, optional): Write last agent message to specified file
|
||||
- **`verbose`** (boolean, optional): Enable verbose logging
|
||||
- **`env`** (object, optional): Additional environment variables for Codex CLI
|
||||
|
||||
8. **Command-Specific Settings** (optional):
|
||||
You can override settings for specific Task Master commands:
|
||||
```json
|
||||
{
|
||||
"codexCli": {
|
||||
"allowNpx": true,
|
||||
"approvalMode": "on-failure",
|
||||
"commandSpecific": {
|
||||
"parse-prd": {
|
||||
"approvalMode": "never",
|
||||
"verbose": true
|
||||
},
|
||||
"expand": {
|
||||
"sandboxMode": "read-only"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
9. **Codebase Features**:
|
||||
The Codex CLI provider is codebase-capable, meaning it can analyze and interact with your project files. Codebase analysis features are automatically enabled when using `codex-cli` as your provider and `enableCodebaseAnalysis` is set to `true` in your global configuration (default).
|
||||
|
||||
10. **Setup Commands**:
|
||||
```bash
|
||||
# Set Codex CLI for main role
|
||||
task-master models --set-main gpt-5-codex --codex-cli
|
||||
|
||||
# Set Codex CLI for fallback role
|
||||
task-master models --set-fallback gpt-5 --codex-cli
|
||||
|
||||
# Verify configuration
|
||||
task-master models
|
||||
```
|
||||
|
||||
11. **Troubleshooting**:
|
||||
|
||||
**"codex: command not found" error:**
|
||||
- Install Codex CLI globally: `npm install -g @openai/codex`
|
||||
- Verify installation: `codex --version`
|
||||
- Alternatively, enable `allowNpx: true` in your codexCli configuration
|
||||
|
||||
**"Not logged in" errors:**
|
||||
- Run `codex login` to authenticate with your ChatGPT account
|
||||
- Verify authentication status: `codex` (opens interactive CLI)
|
||||
|
||||
**"Old version" warnings:**
|
||||
- Check version: `codex --version`
|
||||
- Upgrade: `npm install -g @openai/codex@latest`
|
||||
- Minimum version: 0.42.0, recommended: >= 0.44.0
|
||||
|
||||
**"Model not available" errors:**
|
||||
- Only `gpt-5` and `gpt-5-codex` are available via OAuth subscription
|
||||
- Verify your ChatGPT subscription is active
|
||||
- For other OpenAI models, use the standard `openai` provider with an API key
|
||||
|
||||
**API key not being used:**
|
||||
- API key is only injected when explicitly provided
|
||||
- OAuth authentication is always preferred
|
||||
- If you want to use an API key, ensure `OPENAI_API_KEY` is set in your `.env` file
|
||||
|
||||
12. **Important Notes**:
|
||||
- OAuth subscription required for model access (no API key needed for basic operation)
|
||||
- Limited to OAuth-available models only (`gpt-5` and `gpt-5-codex`)
|
||||
- Pricing information is not available for OAuth models (shows as "Unknown" in cost calculations)
|
||||
- See [Codex CLI Provider Documentation](./providers/codex-cli.md) for more details
|
||||
|
||||
420
docs/contributor-docs/worktree-setup.md
Normal file
420
docs/contributor-docs/worktree-setup.md
Normal file
@@ -0,0 +1,420 @@
|
||||
# Git Worktree Setup for Parallel Development
|
||||
|
||||
Simple git worktree setup for running multiple AI coding assistants in parallel.
|
||||
|
||||
## Why Worktrees?
|
||||
|
||||
Instead of Docker complexity, use git worktrees to create isolated working directories:
|
||||
|
||||
✅ **Editor Agnostic** - Works with Cursor, Windsurf, VS Code, Claude Code, etc.
|
||||
✅ **Simple** - No Docker, no containers, just git
|
||||
✅ **Fast** - Instant setup, shared git history
|
||||
✅ **Flexible** - Each worktree can be on a different branch
|
||||
✅ **Task Master Works** - Full access to `.taskmaster/` in each worktree
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Create a Worktree
|
||||
|
||||
```bash
|
||||
# Using current branch as base
|
||||
./scripts/create-worktree.sh
|
||||
|
||||
# Or specify a branch name
|
||||
./scripts/create-worktree.sh feature/my-feature
|
||||
```
|
||||
|
||||
This creates a worktree in `../claude-task-master-worktrees/<branch-name>/`
|
||||
|
||||
### 2. Open in Your Editor
|
||||
|
||||
```bash
|
||||
# Navigate to the worktree
|
||||
cd ../claude-task-master-worktrees/auto-main/ # (or whatever branch)
|
||||
|
||||
# Open with your preferred AI editor
|
||||
cursor . # Cursor
|
||||
code . # VS Code
|
||||
windsurf . # Windsurf
|
||||
claude # Claude Code CLI
|
||||
```
|
||||
|
||||
### 3. Work in Parallel
|
||||
|
||||
**Main directory** (where you are now):
|
||||
```bash
|
||||
# Keep working normally
|
||||
git checkout main
|
||||
cursor .
|
||||
```
|
||||
|
||||
**Worktree directory**:
|
||||
```bash
|
||||
cd ../claude-task-master-worktrees/auto-main/
|
||||
# Different files, different branch, same git repo
|
||||
claude
|
||||
```
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Example 1: Let Claude Work Autonomously
|
||||
|
||||
```bash
|
||||
# Create worktree
|
||||
./scripts/create-worktree.sh auto/taskmaster-work
|
||||
|
||||
# Navigate there
|
||||
cd ../claude-task-master-worktrees/auto-taskmaster-work/
|
||||
|
||||
# Start Claude
|
||||
claude
|
||||
|
||||
# In Claude session
|
||||
> Use task-master to get the next task and complete it
|
||||
```
|
||||
|
||||
**Meanwhile in your main directory:**
|
||||
```bash
|
||||
# You keep working normally
|
||||
cursor .
|
||||
# No conflicts!
|
||||
```
|
||||
|
||||
### Example 2: Multiple AI Assistants in Parallel
|
||||
|
||||
```bash
|
||||
# Create multiple worktrees
|
||||
./scripts/create-worktree.sh cursor/feature-a
|
||||
./scripts/create-worktree.sh claude/feature-b
|
||||
./scripts/create-worktree.sh windsurf/feature-c
|
||||
|
||||
# Terminal 1
|
||||
cd ../claude-task-master-worktrees/cursor-feature-a/
|
||||
cursor .
|
||||
|
||||
# Terminal 2
|
||||
cd ../claude-task-master-worktrees/claude-feature-b/
|
||||
claude
|
||||
|
||||
# Terminal 3
|
||||
cd ../claude-task-master-worktrees/windsurf-feature-c/
|
||||
windsurf .
|
||||
```
|
||||
|
||||
### Example 3: Test vs Implementation
|
||||
|
||||
```bash
|
||||
# Main directory: Write implementation
|
||||
cursor .
|
||||
|
||||
# Worktree: Have Claude write tests
|
||||
cd ../claude-task-master-worktrees/auto-main/
|
||||
claude -p "Write tests for the recent changes in the main branch"
|
||||
```
|
||||
|
||||
## How It Works
|
||||
|
||||
### Directory Structure
|
||||
|
||||
```
|
||||
/Volumes/Workspace/workspace/contrib/task-master/
|
||||
├── claude-task-master/ # Main directory (this one)
|
||||
│ ├── .git/ # Shared git repo
|
||||
│ ├── .taskmaster/ # Synced via git
|
||||
│ └── your code...
|
||||
│
|
||||
└── claude-task-master-worktrees/ # Worktrees directory
|
||||
├── auto-main/ # Worktree 1
|
||||
│ ├── .git -> (points to main .git)
|
||||
│ ├── .taskmaster/ # Same tasks, synced
|
||||
│ └── your code... (on branch auto/main)
|
||||
│
|
||||
└── feature-x/ # Worktree 2
|
||||
├── .git -> (points to main .git)
|
||||
├── .taskmaster/
|
||||
└── your code... (on branch feature/x)
|
||||
```
|
||||
|
||||
### Shared Git Repository
|
||||
|
||||
All worktrees share the same `.git`:
|
||||
- Commits in one worktree are immediately visible in others
|
||||
- Branches are shared
|
||||
- Git history is shared
|
||||
- Only the working files differ
|
||||
|
||||
## Task Master in Worktrees
|
||||
|
||||
Task Master works perfectly in worktrees:
|
||||
|
||||
```bash
|
||||
# In any worktree
|
||||
task-master list # Same tasks
|
||||
task-master next # Same task queue
|
||||
task-master show 1.2 # Same task data
|
||||
|
||||
# Changes are shared (if committed/pushed)
|
||||
```
|
||||
|
||||
### Recommended Workflow
|
||||
|
||||
Use **tags** to separate task contexts:
|
||||
|
||||
```bash
|
||||
# Main directory - use default tag
|
||||
task-master list
|
||||
|
||||
# Worktree 1 - use separate tag
|
||||
cd ../claude-task-master-worktrees/auto-main/
|
||||
task-master add-tag --name=claude-auto
|
||||
task-master use-tag --name=claude-auto
|
||||
task-master list # Shows claude-auto tasks only
|
||||
```
|
||||
|
||||
## Managing Worktrees
|
||||
|
||||
### List All Worktrees
|
||||
|
||||
```bash
|
||||
./scripts/list-worktrees.sh
|
||||
|
||||
# Or directly with git
|
||||
git worktree list
|
||||
```
|
||||
|
||||
### Remove a Worktree
|
||||
|
||||
```bash
|
||||
# Remove specific worktree
|
||||
git worktree remove ../claude-task-master-worktrees/auto-main/
|
||||
|
||||
# Or if there are uncommitted changes, force it
|
||||
git worktree remove --force ../claude-task-master-worktrees/auto-main/
|
||||
```
|
||||
|
||||
### Sync Changes Between Worktrees
|
||||
|
||||
Changes are automatically synced through git:
|
||||
|
||||
```bash
|
||||
# In worktree
|
||||
git add .
|
||||
git commit -m "feat: implement feature"
|
||||
git push
|
||||
|
||||
# In main directory
|
||||
git pull
|
||||
# Changes are now available
|
||||
```
|
||||
|
||||
## Common Workflows
|
||||
|
||||
### 1. Autonomous Claude with Task Master
|
||||
|
||||
**Setup:**
|
||||
```bash
|
||||
./scripts/create-worktree.sh auto/claude-work
|
||||
cd ../claude-task-master-worktrees/auto-claude-work/
|
||||
```
|
||||
|
||||
**Run:**
|
||||
```bash
|
||||
# Copy the autonomous script
|
||||
cp ../claude-task-master/run-autonomous-tasks.sh .
|
||||
|
||||
# Run Claude autonomously
|
||||
./run-autonomous-tasks.sh
|
||||
```
|
||||
|
||||
**Monitor from main directory:**
|
||||
```bash
|
||||
# In another terminal, in main directory
|
||||
watch -n 5 "task-master list"
|
||||
```
|
||||
|
||||
### 2. Code Review Workflow
|
||||
|
||||
**Main directory:**
|
||||
```bash
|
||||
# You write code
|
||||
cursor .
|
||||
git add .
|
||||
git commit -m "feat: new feature"
|
||||
```
|
||||
|
||||
**Worktree:**
|
||||
```bash
|
||||
cd ../claude-task-master-worktrees/auto-main/
|
||||
git pull
|
||||
|
||||
# Have Claude review
|
||||
claude -p "Review the latest commit and suggest improvements"
|
||||
```
|
||||
|
||||
### 3. Parallel Feature Development
|
||||
|
||||
**Worktree 1 (Backend):**
|
||||
```bash
|
||||
./scripts/create-worktree.sh backend/api
|
||||
cd ../claude-task-master-worktrees/backend-api/
|
||||
cursor .
|
||||
# Work on API
|
||||
```
|
||||
|
||||
**Worktree 2 (Frontend):**
|
||||
```bash
|
||||
./scripts/create-worktree.sh frontend/ui
|
||||
cd ../claude-task-master-worktrees/frontend-ui/
|
||||
windsurf .
|
||||
# Work on UI
|
||||
```
|
||||
|
||||
**Main directory:**
|
||||
```bash
|
||||
# Monitor and merge
|
||||
git log --all --graph --oneline
|
||||
```
|
||||
|
||||
## Tips
|
||||
|
||||
### 1. Branch Naming Convention
|
||||
|
||||
Use prefixes to organize:
|
||||
- `auto/*` - For autonomous AI work
|
||||
- `cursor/*` - For Cursor-specific features
|
||||
- `claude/*` - For Claude-specific features
|
||||
- `review/*` - For code review worktrees
|
||||
|
||||
### 2. Commit Often in Worktrees
|
||||
|
||||
Worktrees make it easy to try things:
|
||||
```bash
|
||||
# In worktree
|
||||
git commit -m "experiment: trying approach X"
|
||||
# If it doesn't work, just delete the worktree
|
||||
git worktree remove .
|
||||
```
|
||||
|
||||
### 3. Use Different npm Dependencies
|
||||
|
||||
Each worktree can have different `node_modules`:
|
||||
```bash
|
||||
# Main directory
|
||||
npm install
|
||||
|
||||
# Worktree (different dependencies)
|
||||
cd ../claude-task-master-worktrees/auto-main/
|
||||
npm install
|
||||
# Installs independently
|
||||
```
|
||||
|
||||
### 4. .env Files
|
||||
|
||||
Each worktree can have its own `.env`:
|
||||
```bash
|
||||
# Main directory
|
||||
echo "API_URL=http://localhost:3000" > .env
|
||||
|
||||
# Worktree
|
||||
cd ../claude-task-master-worktrees/auto-main/
|
||||
echo "API_URL=http://localhost:4000" > .env
|
||||
# Different config!
|
||||
```
|
||||
|
||||
## Cleanup
|
||||
|
||||
### Remove All Worktrees
|
||||
|
||||
```bash
|
||||
# List and manually remove
|
||||
./scripts/list-worktrees.sh
|
||||
|
||||
# Remove each one
|
||||
git worktree remove ../claude-task-master-worktrees/auto-main/
|
||||
git worktree remove ../claude-task-master-worktrees/feature-x/
|
||||
|
||||
# Or remove all at once (careful!)
|
||||
rm -rf ../claude-task-master-worktrees/
|
||||
git worktree prune # Clean up git's worktree metadata
|
||||
```
|
||||
|
||||
### Delete Remote Branches
|
||||
|
||||
```bash
|
||||
# After merging/done with branches
|
||||
git branch -d auto/claude-work
|
||||
git push origin --delete auto/claude-work
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "Cannot create worktree: already exists"
|
||||
|
||||
```bash
|
||||
# Remove the existing worktree first
|
||||
git worktree remove ../claude-task-master-worktrees/auto-main/
|
||||
```
|
||||
|
||||
### "Branch already checked out"
|
||||
|
||||
Git won't let you check out the same branch in multiple worktrees:
|
||||
```bash
|
||||
# Use a different branch name
|
||||
./scripts/create-worktree.sh auto/main-2
|
||||
```
|
||||
|
||||
### Changes Not Syncing
|
||||
|
||||
Worktrees don't auto-sync files. Use git:
|
||||
```bash
|
||||
# In worktree with changes
|
||||
git add .
|
||||
git commit -m "changes"
|
||||
git push
|
||||
|
||||
# In other worktree
|
||||
git pull
|
||||
```
|
||||
|
||||
### npm install Fails
|
||||
|
||||
Each worktree needs its own `node_modules`:
|
||||
```bash
|
||||
cd ../claude-task-master-worktrees/auto-main/
|
||||
npm install
|
||||
```
|
||||
|
||||
## Comparison to Docker
|
||||
|
||||
| Feature | Git Worktrees | Docker |
|
||||
|---------|---------------|--------|
|
||||
| Setup time | Instant | Minutes (build) |
|
||||
| Disk usage | Minimal (shared .git) | GBs per container |
|
||||
| Editor support | Native (any editor) | Limited (need special setup) |
|
||||
| File sync | Via git | Via volumes (can be slow) |
|
||||
| Resource usage | None (native) | RAM/CPU overhead |
|
||||
| Complexity | Simple (just git) | Complex (Dockerfile, compose, etc.) |
|
||||
| npm install | Per worktree | Per container |
|
||||
| AI editor support | ✅ All editors work | ⚠️ Need web-based or special config |
|
||||
|
||||
**TL;DR: Worktrees are simpler, faster, and more flexible for this use case.**
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
```bash
|
||||
# 1. Create worktree
|
||||
./scripts/create-worktree.sh auto/claude-work
|
||||
|
||||
# 2. Open in AI editor
|
||||
cd ../claude-task-master-worktrees/auto-claude-work/
|
||||
cursor . # or claude, windsurf, code, etc.
|
||||
|
||||
# 3. Work in parallel
|
||||
# Main directory: You work
|
||||
# Worktree: AI works
|
||||
# No conflicts!
|
||||
```
|
||||
|
||||
**Simple, fast, editor-agnostic.** 🚀
|
||||
463
docs/examples/codex-cli-usage.md
Normal file
463
docs/examples/codex-cli-usage.md
Normal file
@@ -0,0 +1,463 @@
|
||||
# Codex CLI Provider Usage Examples
|
||||
|
||||
This guide provides practical examples of using Task Master with the Codex CLI provider.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before using these examples, ensure you have:
|
||||
|
||||
```bash
|
||||
# 1. Codex CLI installed
|
||||
npm install -g @openai/codex
|
||||
|
||||
# 2. Authenticated with ChatGPT
|
||||
codex login
|
||||
|
||||
# 3. Codex CLI configured as your provider
|
||||
task-master models --set-main gpt-5-codex --codex-cli
|
||||
```
|
||||
|
||||
## Example 1: Basic Task Creation
|
||||
|
||||
Use Codex CLI to create tasks from a simple description:
|
||||
|
||||
```bash
|
||||
# Add a task with AI-powered enhancement
|
||||
task-master add-task --prompt="Implement user authentication with JWT" --research
|
||||
```
|
||||
|
||||
**What happens**:
|
||||
1. Task Master sends your prompt to GPT-5-Codex via the CLI
|
||||
2. The AI analyzes your request and generates a detailed task
|
||||
3. The task is added to your `.taskmaster/tasks/tasks.json`
|
||||
4. OAuth credentials are automatically used (no API key needed)
|
||||
|
||||
## Example 2: Parsing a Product Requirements Document
|
||||
|
||||
Create a comprehensive task list from a PRD:
|
||||
|
||||
```bash
|
||||
# Create your PRD
|
||||
cat > my-feature.txt <<EOF
|
||||
# User Profile Feature
|
||||
|
||||
## Requirements
|
||||
1. Users can view their profile
|
||||
2. Users can edit their information
|
||||
3. Profile pictures can be uploaded
|
||||
4. Email verification required
|
||||
|
||||
## Technical Constraints
|
||||
- Use React for frontend
|
||||
- Node.js/Express backend
|
||||
- PostgreSQL database
|
||||
EOF
|
||||
|
||||
# Parse with Codex CLI
|
||||
task-master parse-prd my-feature.txt --num-tasks 12
|
||||
```
|
||||
|
||||
**What happens**:
|
||||
1. GPT-5-Codex reads and analyzes your PRD
|
||||
2. Generates structured tasks with dependencies
|
||||
3. Creates subtasks for complex items
|
||||
4. Saves everything to `.taskmaster/tasks/`
|
||||
|
||||
## Example 3: Expanding Tasks with Research
|
||||
|
||||
Break down a complex task into detailed subtasks:
|
||||
|
||||
```bash
|
||||
# First, show your current tasks
|
||||
task-master list
|
||||
|
||||
# Expand a specific task (e.g., task 1.2)
|
||||
task-master expand --id=1.2 --research --force
|
||||
```
|
||||
|
||||
**What happens**:
|
||||
1. Codex CLI uses GPT-5 for research-level analysis
|
||||
2. Breaks down the task into logical subtasks
|
||||
3. Adds implementation details and test strategies
|
||||
4. Updates the task with dependency information
|
||||
|
||||
## Example 4: Analyzing Project Complexity
|
||||
|
||||
Get AI-powered insights into your project's task complexity:
|
||||
|
||||
```bash
|
||||
# Analyze all tasks
|
||||
task-master analyze-complexity --research
|
||||
|
||||
# View the complexity report
|
||||
task-master complexity-report
|
||||
```
|
||||
|
||||
**What happens**:
|
||||
1. GPT-5 analyzes each task's scope and requirements
|
||||
2. Assigns complexity scores and estimates subtask counts
|
||||
3. Generates a detailed report
|
||||
4. Saves to `.taskmaster/reports/task-complexity-report.json`
|
||||
|
||||
## Example 5: Using Custom Codex CLI Settings
|
||||
|
||||
Configure Codex CLI behavior for different commands:
|
||||
|
||||
```json
|
||||
// In .taskmaster/config.json
|
||||
{
|
||||
"models": {
|
||||
"main": {
|
||||
"provider": "codex-cli",
|
||||
"modelId": "gpt-5-codex",
|
||||
"maxTokens": 128000,
|
||||
"temperature": 0.2
|
||||
}
|
||||
},
|
||||
"codexCli": {
|
||||
"allowNpx": true,
|
||||
"approvalMode": "on-failure",
|
||||
"sandboxMode": "workspace-write",
|
||||
"commandSpecific": {
|
||||
"parse-prd": {
|
||||
"verbose": true,
|
||||
"approvalMode": "never"
|
||||
},
|
||||
"expand": {
|
||||
"sandboxMode": "read-only",
|
||||
"verbose": true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```bash
|
||||
# Now parse-prd runs with verbose output and no approvals
|
||||
task-master parse-prd requirements.txt
|
||||
|
||||
# Expand runs with read-only mode
|
||||
task-master expand --id=2.1
|
||||
```
|
||||
|
||||
## Example 6: Workflow - Building a Feature End-to-End
|
||||
|
||||
Complete workflow from PRD to implementation tracking:
|
||||
|
||||
```bash
|
||||
# Step 1: Initialize project
|
||||
task-master init
|
||||
|
||||
# Step 2: Set up Codex CLI
|
||||
task-master models --set-main gpt-5-codex --codex-cli
|
||||
task-master models --set-fallback gpt-5 --codex-cli
|
||||
|
||||
# Step 3: Create PRD
|
||||
cat > feature-prd.txt <<EOF
|
||||
# Authentication System
|
||||
|
||||
Implement a complete authentication system with:
|
||||
- User registration
|
||||
- Email verification
|
||||
- Password reset
|
||||
- Two-factor authentication
|
||||
- Session management
|
||||
EOF
|
||||
|
||||
# Step 4: Parse PRD into tasks
|
||||
task-master parse-prd feature-prd.txt --num-tasks 8
|
||||
|
||||
# Step 5: Analyze complexity
|
||||
task-master analyze-complexity --research
|
||||
|
||||
# Step 6: Expand complex tasks
|
||||
task-master expand --all --research
|
||||
|
||||
# Step 7: Start working
|
||||
task-master next
|
||||
# Shows: Task 1.1: User registration database schema
|
||||
|
||||
# Step 8: Mark completed as you work
|
||||
task-master set-status --id=1.1 --status=done
|
||||
|
||||
# Step 9: Continue to next task
|
||||
task-master next
|
||||
```
|
||||
|
||||
## Example 7: Multi-Role Configuration
|
||||
|
||||
Use Codex CLI for main tasks, Perplexity for research:
|
||||
|
||||
```json
|
||||
// In .taskmaster/config.json
|
||||
{
|
||||
"models": {
|
||||
"main": {
|
||||
"provider": "codex-cli",
|
||||
"modelId": "gpt-5-codex",
|
||||
"maxTokens": 128000,
|
||||
"temperature": 0.2
|
||||
},
|
||||
"research": {
|
||||
"provider": "perplexity",
|
||||
"modelId": "sonar-pro",
|
||||
"maxTokens": 8700,
|
||||
"temperature": 0.1
|
||||
},
|
||||
"fallback": {
|
||||
"provider": "codex-cli",
|
||||
"modelId": "gpt-5",
|
||||
"maxTokens": 128000,
|
||||
"temperature": 0.2
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```bash
|
||||
# Main task operations use GPT-5-Codex
|
||||
task-master add-task --prompt="Build REST API endpoint"
|
||||
|
||||
# Research operations use Perplexity
|
||||
task-master analyze-complexity --research
|
||||
|
||||
# Fallback to GPT-5 if needed
|
||||
task-master expand --id=3.2 --force
|
||||
```
|
||||
|
||||
## Example 8: Troubleshooting Common Issues
|
||||
|
||||
### Issue: Codex CLI not found
|
||||
|
||||
```bash
|
||||
# Check if Codex is installed
|
||||
codex --version
|
||||
|
||||
# If not found, install globally
|
||||
npm install -g @openai/codex
|
||||
|
||||
# Or enable npx fallback in config
|
||||
cat >> .taskmaster/config.json <<EOF
|
||||
{
|
||||
"codexCli": {
|
||||
"allowNpx": true
|
||||
}
|
||||
}
|
||||
EOF
|
||||
```
|
||||
|
||||
### Issue: Not authenticated
|
||||
|
||||
```bash
|
||||
# Check auth status
|
||||
codex
|
||||
# Use /about command to see auth info
|
||||
|
||||
# Re-authenticate if needed
|
||||
codex login
|
||||
```
|
||||
|
||||
### Issue: Want more verbose output
|
||||
|
||||
```bash
|
||||
# Enable verbose mode in config
|
||||
cat >> .taskmaster/config.json <<EOF
|
||||
{
|
||||
"codexCli": {
|
||||
"verbose": true
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
# Or for specific commands
|
||||
task-master parse-prd my-prd.txt
|
||||
# (verbose output shows detailed Codex CLI interactions)
|
||||
```
|
||||
|
||||
## Example 9: CI/CD Integration
|
||||
|
||||
Use Codex CLI in automated workflows:
|
||||
|
||||
```yaml
|
||||
# .github/workflows/task-analysis.yml
|
||||
name: Analyze Task Complexity
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- '.taskmaster/**'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20'
|
||||
|
||||
- name: Install Task Master
|
||||
run: npm install -g task-master-ai
|
||||
|
||||
- name: Configure Codex CLI
|
||||
run: |
|
||||
npm install -g @openai/codex
|
||||
echo "${{ secrets.OPENAI_CODEX_API_KEY }}" > ~/.codex-auth
|
||||
env:
|
||||
OPENAI_CODEX_API_KEY: ${{ secrets.OPENAI_CODEX_API_KEY }}
|
||||
|
||||
- name: Configure Task Master
|
||||
run: |
|
||||
cat > .taskmaster/config.json <<EOF
|
||||
{
|
||||
"models": {
|
||||
"main": {
|
||||
"provider": "codex-cli",
|
||||
"modelId": "gpt-5"
|
||||
}
|
||||
},
|
||||
"codexCli": {
|
||||
"allowNpx": true,
|
||||
"skipGitRepoCheck": true,
|
||||
"approvalMode": "never",
|
||||
"fullAuto": true
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
- name: Analyze Complexity
|
||||
run: task-master analyze-complexity --research
|
||||
|
||||
- name: Upload Report
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: complexity-report
|
||||
path: .taskmaster/reports/task-complexity-report.json
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### 1. Use OAuth for Development
|
||||
|
||||
```bash
|
||||
# For local development, use OAuth (no API key needed)
|
||||
codex login
|
||||
task-master models --set-main gpt-5-codex --codex-cli
|
||||
```
|
||||
|
||||
### 2. Configure Approval Modes Appropriately
|
||||
|
||||
```json
|
||||
{
|
||||
"codexCli": {
|
||||
"approvalMode": "on-failure", // Safe default
|
||||
"sandboxMode": "workspace-write" // Restricts to project directory
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Use Command-Specific Settings
|
||||
|
||||
```json
|
||||
{
|
||||
"codexCli": {
|
||||
"commandSpecific": {
|
||||
"parse-prd": {
|
||||
"approvalMode": "never", // PRD parsing is safe
|
||||
"verbose": true
|
||||
},
|
||||
"expand": {
|
||||
"approvalMode": "on-request", // More cautious for task expansion
|
||||
"verbose": false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Leverage Codebase Analysis
|
||||
|
||||
```json
|
||||
{
|
||||
"global": {
|
||||
"enableCodebaseAnalysis": true // Let Codex analyze your code
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 5. Handle Errors Gracefully
|
||||
|
||||
```bash
|
||||
# Always configure a fallback model
|
||||
task-master models --set-fallback gpt-5 --codex-cli
|
||||
|
||||
# Or use a different provider as fallback
|
||||
task-master models --set-fallback claude-3-5-sonnet
|
||||
```
|
||||
|
||||
## Next Steps
|
||||
|
||||
- Read the [Codex CLI Provider Documentation](../providers/codex-cli.md)
|
||||
- Explore [Configuration Options](../configuration.md#codex-cli-provider)
|
||||
- Check out [Command Reference](../command-reference.md)
|
||||
- Learn about [Task Structure](../task-structure.md)
|
||||
|
||||
## Common Patterns
|
||||
|
||||
### Pattern: Daily Development Workflow
|
||||
|
||||
```bash
|
||||
# Morning: Review tasks
|
||||
task-master list
|
||||
|
||||
# Get next task
|
||||
task-master next
|
||||
|
||||
# Work on task...
|
||||
|
||||
# Update task with notes
|
||||
task-master update-subtask --id=2.3 --prompt="Implemented authentication middleware"
|
||||
|
||||
# Mark complete
|
||||
task-master set-status --id=2.3 --status=done
|
||||
|
||||
# Repeat
|
||||
```
|
||||
|
||||
### Pattern: Feature Planning
|
||||
|
||||
```bash
|
||||
# Write feature spec
|
||||
vim new-feature.txt
|
||||
|
||||
# Generate tasks
|
||||
task-master parse-prd new-feature.txt --num-tasks 10
|
||||
|
||||
# Analyze and expand
|
||||
task-master analyze-complexity --research
|
||||
task-master expand --all --research --force
|
||||
|
||||
# Review and adjust
|
||||
task-master list
|
||||
```
|
||||
|
||||
### Pattern: Sprint Planning
|
||||
|
||||
```bash
|
||||
# Parse sprint requirements
|
||||
task-master parse-prd sprint-requirements.txt
|
||||
|
||||
# Analyze complexity
|
||||
task-master analyze-complexity --research
|
||||
|
||||
# View report
|
||||
task-master complexity-report
|
||||
|
||||
# Adjust task estimates based on complexity scores
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
For more examples and advanced usage, see the [full documentation](https://docs.task-master.dev).
|
||||
@@ -1,4 +1,4 @@
|
||||
# Available Models as of September 19, 2025
|
||||
# Available Models as of October 5, 2025
|
||||
|
||||
## Main Models
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
| anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 |
|
||||
| claude-code | opus | 0.725 | 0 | 0 |
|
||||
| claude-code | sonnet | 0.727 | 0 | 0 |
|
||||
| codex-cli | gpt-5 | 0.749 | 0 | 0 |
|
||||
| codex-cli | gpt-5-codex | 0.749 | 0 | 0 |
|
||||
| mcp | mcp-sampling | — | 0 | 0 |
|
||||
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
|
||||
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
|
||||
@@ -100,6 +102,8 @@
|
||||
| ----------- | -------------------------------------------- | --------- | ---------- | ----------- |
|
||||
| claude-code | opus | 0.725 | 0 | 0 |
|
||||
| claude-code | sonnet | 0.727 | 0 | 0 |
|
||||
| codex-cli | gpt-5 | 0.749 | 0 | 0 |
|
||||
| codex-cli | gpt-5-codex | 0.749 | 0 | 0 |
|
||||
| mcp | mcp-sampling | — | 0 | 0 |
|
||||
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
|
||||
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
|
||||
@@ -119,7 +123,7 @@
|
||||
| groq | deepseek-r1-distill-llama-70b | 0.52 | 0.75 | 0.99 |
|
||||
| perplexity | sonar-pro | — | 3 | 15 |
|
||||
| perplexity | sonar | — | 1 | 1 |
|
||||
| perplexity | deep-research | 0.211 | 2 | 8 |
|
||||
| perplexity | sonar-deep-research | 0.211 | 2 | 8 |
|
||||
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
|
||||
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
|
||||
| bedrock | us.anthropic.claude-3-opus-20240229-v1:0 | 0.725 | 15 | 75 |
|
||||
@@ -140,6 +144,8 @@
|
||||
| anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 |
|
||||
| claude-code | opus | 0.725 | 0 | 0 |
|
||||
| claude-code | sonnet | 0.727 | 0 | 0 |
|
||||
| codex-cli | gpt-5 | 0.749 | 0 | 0 |
|
||||
| codex-cli | gpt-5-codex | 0.749 | 0 | 0 |
|
||||
| mcp | mcp-sampling | — | 0 | 0 |
|
||||
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
|
||||
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
|
||||
|
||||
510
docs/providers/codex-cli.md
Normal file
510
docs/providers/codex-cli.md
Normal file
@@ -0,0 +1,510 @@
|
||||
# Codex CLI Provider
|
||||
|
||||
The `codex-cli` provider integrates Task Master with OpenAI's Codex CLI via the community AI SDK provider [`ai-sdk-provider-codex-cli`](https://github.com/ben-vargas/ai-sdk-provider-codex-cli). It uses your ChatGPT subscription (OAuth) via `codex login`, with optional `OPENAI_CODEX_API_KEY` support.
|
||||
|
||||
## Why Use Codex CLI?
|
||||
|
||||
The primary benefits of using the `codex-cli` provider include:
|
||||
|
||||
- **Use Latest OpenAI Models**: Access to cutting-edge models like GPT-5 and GPT-5-Codex via ChatGPT subscription
|
||||
- **OAuth Authentication**: No API key management needed - authenticate once with `codex login`
|
||||
- **Built-in Tool Execution**: Native support for command execution, file changes, MCP tools, and web search
|
||||
- **Native JSON Schema Support**: Structured output generation without post-processing
|
||||
- **Approval/Sandbox Modes**: Fine-grained control over command execution and filesystem access for safety
|
||||
|
||||
## Quickstart
|
||||
|
||||
Get up and running with Codex CLI in 3 steps:
|
||||
|
||||
```bash
|
||||
# 1. Install Codex CLI globally
|
||||
npm install -g @openai/codex
|
||||
|
||||
# 2. Authenticate with your ChatGPT account
|
||||
codex login
|
||||
|
||||
# 3. Configure Task Master to use Codex CLI
|
||||
task-master models --set-main gpt-5-codex --codex-cli
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Node.js**: >= 18.0.0
|
||||
- **Codex CLI**: >= 0.42.0 (>= 0.44.0 recommended)
|
||||
- **ChatGPT Subscription**: Required for OAuth access (Plus, Pro, Business, Edu, or Enterprise)
|
||||
- **Task Master**: >= 0.27.3 (version with Codex CLI support)
|
||||
|
||||
### Checking Your Versions
|
||||
|
||||
```bash
|
||||
# Check Node.js version
|
||||
node --version
|
||||
|
||||
# Check Codex CLI version
|
||||
codex --version
|
||||
|
||||
# Check Task Master version
|
||||
task-master --version
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
### Install Codex CLI
|
||||
|
||||
```bash
|
||||
# Install globally via npm
|
||||
npm install -g @openai/codex
|
||||
|
||||
# Verify installation
|
||||
codex --version
|
||||
```
|
||||
|
||||
Expected output: `v0.44.0` or higher
|
||||
|
||||
### Install Task Master (if not already installed)
|
||||
|
||||
```bash
|
||||
# Install globally
|
||||
npm install -g task-master-ai
|
||||
|
||||
# Or install in your project
|
||||
npm install --save-dev task-master-ai
|
||||
```
|
||||
|
||||
## Authentication
|
||||
|
||||
### OAuth Authentication (Primary Method - Recommended)
|
||||
|
||||
The Codex CLI provider is designed to use OAuth authentication with your ChatGPT subscription:
|
||||
|
||||
```bash
|
||||
# Launch Codex CLI and authenticate
|
||||
codex login
|
||||
```
|
||||
|
||||
This will:
|
||||
1. Open a browser window for OAuth authentication
|
||||
2. Prompt you to log in with your ChatGPT account
|
||||
3. Store authentication credentials locally
|
||||
4. Allow Task Master to automatically use these credentials
|
||||
|
||||
To verify your authentication:
|
||||
```bash
|
||||
# Open interactive Codex CLI
|
||||
codex
|
||||
|
||||
# Use /about command to see auth status
|
||||
/about
|
||||
```
|
||||
|
||||
### Optional: API Key Method
|
||||
|
||||
While OAuth is the primary and recommended method, you can optionally use an OpenAI API key:
|
||||
|
||||
```bash
|
||||
# In your .env file
|
||||
OPENAI_CODEX_API_KEY=sk-your-openai-api-key-here
|
||||
```
|
||||
|
||||
**Important Notes**:
|
||||
- The API key will **only** be injected when explicitly provided
|
||||
- OAuth authentication is always preferred when available
|
||||
- Using an API key doesn't provide access to subscription-only models like GPT-5-Codex
|
||||
- For full OpenAI API access with non-subscription models, consider using the standard `openai` provider instead
|
||||
- `OPENAI_CODEX_API_KEY` is specific to the codex-cli provider to avoid conflicts with the `openai` provider's `OPENAI_API_KEY`
|
||||
|
||||
## Available Models
|
||||
|
||||
The Codex CLI provider supports only models available through ChatGPT subscription:
|
||||
|
||||
| Model ID | Description | Max Input Tokens | Max Output Tokens |
|
||||
|----------|-------------|------------------|-------------------|
|
||||
| `gpt-5` | Latest GPT-5 model | 272K | 128K |
|
||||
| `gpt-5-codex` | GPT-5 optimized for agentic software engineering | 272K | 128K |
|
||||
|
||||
**Note**: These models are only available via OAuth subscription through Codex CLI (ChatGPT Plus, Pro, Business, Edu, or Enterprise plans). For other OpenAI models, use the standard `openai` provider with an API key.
|
||||
|
||||
**Research Capabilities**: Both GPT-5 models support web search tools, making them suitable for the `research` role in addition to `main` and `fallback` roles.
|
||||
|
||||
## Configuration
|
||||
|
||||
### Basic Configuration
|
||||
|
||||
Add Codex CLI to your `.taskmaster/config.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"models": {
|
||||
"main": {
|
||||
"provider": "codex-cli",
|
||||
"modelId": "gpt-5-codex",
|
||||
"maxTokens": 128000,
|
||||
"temperature": 0.2
|
||||
},
|
||||
"fallback": {
|
||||
"provider": "codex-cli",
|
||||
"modelId": "gpt-5",
|
||||
"maxTokens": 128000,
|
||||
"temperature": 0.2
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Advanced Configuration with Codex CLI Settings
|
||||
|
||||
The `codexCli` section allows you to customize Codex CLI behavior:
|
||||
|
||||
```json
|
||||
{
|
||||
"models": {
|
||||
"main": {
|
||||
"provider": "codex-cli",
|
||||
"modelId": "gpt-5-codex",
|
||||
"maxTokens": 128000,
|
||||
"temperature": 0.2
|
||||
}
|
||||
},
|
||||
"codexCli": {
|
||||
"allowNpx": true,
|
||||
"skipGitRepoCheck": true,
|
||||
"approvalMode": "on-failure",
|
||||
"sandboxMode": "workspace-write",
|
||||
"verbose": false
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Codex CLI Settings Reference
|
||||
|
||||
#### Core Settings
|
||||
|
||||
- **`allowNpx`** (boolean, default: `false`)
|
||||
- Allow fallback to `npx @openai/codex` if the CLI is not found on PATH
|
||||
- Useful for CI environments or systems without global npm installations
|
||||
- Example: `"allowNpx": true`
|
||||
|
||||
- **`skipGitRepoCheck`** (boolean, default: `false`)
|
||||
- Skip git repository safety check before execution
|
||||
- Recommended for CI environments or non-repository usage
|
||||
- Example: `"skipGitRepoCheck": true`
|
||||
|
||||
#### Execution Control
|
||||
|
||||
- **`approvalMode`** (string)
|
||||
- Controls when to require user approval for command execution
|
||||
- Options:
|
||||
- `"untrusted"`: Require approval for all commands
|
||||
- `"on-failure"`: Only require approval after a command fails (default)
|
||||
- `"on-request"`: Approve only when explicitly requested
|
||||
- `"never"`: Never require approval (use with caution)
|
||||
- Example: `"approvalMode": "on-failure"`
|
||||
|
||||
- **`sandboxMode`** (string)
|
||||
- Controls filesystem access permissions
|
||||
- Options:
|
||||
- `"read-only"`: Read-only access to filesystem
|
||||
- `"workspace-write"`: Allow writes to workspace directory (default)
|
||||
- `"danger-full-access"`: Full filesystem access (use with extreme caution)
|
||||
- Example: `"sandboxMode": "workspace-write"`
|
||||
|
||||
#### Path and Environment
|
||||
|
||||
- **`codexPath`** (string, optional)
|
||||
- Custom path to Codex CLI executable
|
||||
- Useful when Codex is installed in a non-standard location
|
||||
- Example: `"codexPath": "/usr/local/bin/codex"`
|
||||
|
||||
- **`cwd`** (string, optional)
|
||||
- Working directory for Codex CLI execution
|
||||
- Defaults to current working directory
|
||||
- Example: `"cwd": "/path/to/project"`
|
||||
|
||||
- **`env`** (object, optional)
|
||||
- Additional environment variables for Codex CLI
|
||||
- Example: `"env": { "DEBUG": "true" }`
|
||||
|
||||
#### Advanced Settings
|
||||
|
||||
- **`fullAuto`** (boolean, optional)
|
||||
- Fully automatic mode (equivalent to `--full-auto` flag)
|
||||
- Bypasses most approvals for fully automated workflows
|
||||
- Example: `"fullAuto": true`
|
||||
|
||||
- **`dangerouslyBypassApprovalsAndSandbox`** (boolean, optional)
|
||||
- Bypass all safety checks including approvals and sandbox
|
||||
- **WARNING**: Use with extreme caution - can execute arbitrary code
|
||||
- Example: `"dangerouslyBypassApprovalsAndSandbox": false`
|
||||
|
||||
- **`color`** (string, optional)
|
||||
- Force color handling in Codex CLI output
|
||||
- Options: `"always"`, `"never"`, `"auto"`
|
||||
- Example: `"color": "auto"`
|
||||
|
||||
- **`outputLastMessageFile`** (string, optional)
|
||||
- Write last agent message to specified file
|
||||
- Useful for debugging or logging
|
||||
- Example: `"outputLastMessageFile": "./last-message.txt"`
|
||||
|
||||
- **`verbose`** (boolean, optional)
|
||||
- Enable verbose provider logging
|
||||
- Helpful for debugging issues
|
||||
- Example: `"verbose": true`
|
||||
|
||||
### Command-Specific Settings
|
||||
|
||||
Override settings for specific Task Master commands:
|
||||
|
||||
```json
|
||||
{
|
||||
"codexCli": {
|
||||
"allowNpx": true,
|
||||
"approvalMode": "on-failure",
|
||||
"commandSpecific": {
|
||||
"parse-prd": {
|
||||
"approvalMode": "never",
|
||||
"verbose": true
|
||||
},
|
||||
"expand": {
|
||||
"sandboxMode": "read-only"
|
||||
},
|
||||
"add-task": {
|
||||
"approvalMode": "untrusted"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### Setting Codex CLI Models
|
||||
|
||||
```bash
|
||||
# Set Codex CLI for main role
|
||||
task-master models --set-main gpt-5-codex --codex-cli
|
||||
|
||||
# Set Codex CLI for fallback role
|
||||
task-master models --set-fallback gpt-5 --codex-cli
|
||||
|
||||
# Set Codex CLI for research role
|
||||
task-master models --set-research gpt-5 --codex-cli
|
||||
|
||||
# Verify configuration
|
||||
task-master models
|
||||
```
|
||||
|
||||
### Using Codex CLI with Task Master Commands
|
||||
|
||||
Once configured, use Task Master commands as normal:
|
||||
|
||||
```bash
|
||||
# Parse a PRD with Codex CLI
|
||||
task-master parse-prd my-requirements.txt
|
||||
|
||||
# Analyze project complexity
|
||||
task-master analyze-complexity --research
|
||||
|
||||
# Expand a task into subtasks
|
||||
task-master expand --id=1.2
|
||||
|
||||
# Add a new task with AI assistance
|
||||
task-master add-task --prompt="Implement user authentication" --research
|
||||
```
|
||||
|
||||
The provider will automatically use your OAuth credentials when Codex CLI is configured.
|
||||
|
||||
## Codebase Features
|
||||
|
||||
The Codex CLI provider is **codebase-capable**, meaning it can analyze and interact with your project files. This enables advanced features like:
|
||||
|
||||
- **Code Analysis**: Understanding your project structure and dependencies
|
||||
- **Intelligent Suggestions**: Context-aware task recommendations
|
||||
- **File Operations**: Reading and analyzing project files for better task generation
|
||||
- **Pattern Recognition**: Identifying common patterns and best practices in your codebase
|
||||
|
||||
### Enabling Codebase Analysis
|
||||
|
||||
Codebase analysis is automatically enabled when:
|
||||
1. Your provider is set to `codex-cli`
|
||||
2. `enableCodebaseAnalysis` is `true` in your global configuration (default)
|
||||
|
||||
To verify or configure:
|
||||
|
||||
```json
|
||||
{
|
||||
"global": {
|
||||
"enableCodebaseAnalysis": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "codex: command not found" Error
|
||||
|
||||
**Symptoms**: Task Master reports that the Codex CLI is not found.
|
||||
|
||||
**Solutions**:
|
||||
1. **Install Codex CLI globally**:
|
||||
```bash
|
||||
npm install -g @openai/codex
|
||||
```
|
||||
|
||||
2. **Verify installation**:
|
||||
```bash
|
||||
codex --version
|
||||
```
|
||||
|
||||
3. **Alternative: Enable npx fallback**:
|
||||
```json
|
||||
{
|
||||
"codexCli": {
|
||||
"allowNpx": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### "Not logged in" Errors
|
||||
|
||||
**Symptoms**: Authentication errors when trying to use Codex CLI.
|
||||
|
||||
**Solutions**:
|
||||
1. **Authenticate with OAuth**:
|
||||
```bash
|
||||
codex login
|
||||
```
|
||||
|
||||
2. **Verify authentication status**:
|
||||
```bash
|
||||
codex
|
||||
# Then use /about command
|
||||
```
|
||||
|
||||
3. **Re-authenticate if needed**:
|
||||
```bash
|
||||
# Logout first
|
||||
codex
|
||||
# Use /auth command to change auth method
|
||||
|
||||
# Then login again
|
||||
codex login
|
||||
```
|
||||
|
||||
### "Old version" Warnings
|
||||
|
||||
**Symptoms**: Warnings about Codex CLI version being outdated.
|
||||
|
||||
**Solutions**:
|
||||
1. **Check current version**:
|
||||
```bash
|
||||
codex --version
|
||||
```
|
||||
|
||||
2. **Upgrade to latest version**:
|
||||
```bash
|
||||
npm install -g @openai/codex@latest
|
||||
```
|
||||
|
||||
3. **Verify upgrade**:
|
||||
```bash
|
||||
codex --version
|
||||
```
|
||||
Should show >= 0.44.0
|
||||
|
||||
### "Model not available" Errors
|
||||
|
||||
**Symptoms**: Error indicating the requested model is not available.
|
||||
|
||||
**Causes and Solutions**:
|
||||
|
||||
1. **Using unsupported model**:
|
||||
- Only `gpt-5` and `gpt-5-codex` are available via Codex CLI
|
||||
- For other OpenAI models, use the standard `openai` provider
|
||||
|
||||
2. **Subscription not active**:
|
||||
- Verify your ChatGPT subscription is active
|
||||
- Check subscription status at <https://platform.openai.com>
|
||||
|
||||
3. **Wrong provider selected**:
|
||||
- Verify you're using `--codex-cli` flag when setting models
|
||||
- Check `.taskmaster/config.json` shows `"provider": "codex-cli"`
|
||||
|
||||
### API Key Not Being Used
|
||||
|
||||
**Symptoms**: You've set `OPENAI_CODEX_API_KEY` but it's not being used.
|
||||
|
||||
**Expected Behavior**:
|
||||
- OAuth authentication is always preferred
|
||||
- API key is only injected when explicitly provided
|
||||
- API key doesn't grant access to subscription-only models
|
||||
|
||||
**Solutions**:
|
||||
1. **Verify OAuth is working**:
|
||||
```bash
|
||||
codex
|
||||
# Check /about for auth status
|
||||
```
|
||||
|
||||
2. **If you want to force API key usage**:
|
||||
- This is not recommended with Codex CLI
|
||||
- Consider using the standard `openai` provider instead
|
||||
|
||||
3. **Verify .env file is being loaded**:
|
||||
```bash
|
||||
# Check if .env exists in project root
|
||||
ls -la .env
|
||||
|
||||
# Verify OPENAI_CODEX_API_KEY is set
|
||||
grep OPENAI_CODEX_API_KEY .env
|
||||
```
|
||||
|
||||
### Approval/Sandbox Issues
|
||||
|
||||
**Symptoms**: Commands are blocked or filesystem access is denied.
|
||||
|
||||
**Solutions**:
|
||||
|
||||
1. **Adjust approval mode**:
|
||||
```json
|
||||
{
|
||||
"codexCli": {
|
||||
"approvalMode": "on-request"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
2. **Adjust sandbox mode**:
|
||||
```json
|
||||
{
|
||||
"codexCli": {
|
||||
"sandboxMode": "workspace-write"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
3. **For fully automated workflows** (use cautiously):
|
||||
```json
|
||||
{
|
||||
"codexCli": {
|
||||
"fullAuto": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Important Notes
|
||||
|
||||
- **OAuth subscription required**: No API key needed for basic operation, but requires active ChatGPT subscription
|
||||
- **Limited model selection**: Only `gpt-5` and `gpt-5-codex` available via OAuth
|
||||
- **Pricing information**: Not available for OAuth models (shows as "Unknown" in cost calculations)
|
||||
- **No automatic dependency**: The `@openai/codex` package is not added to Task Master's dependencies - install it globally or enable `allowNpx`
|
||||
- **Codebase analysis**: Automatically enabled when using `codex-cli` provider
|
||||
- **Safety first**: Default settings prioritize safety with `approvalMode: "on-failure"` and `sandboxMode: "workspace-write"`
|
||||
|
||||
## See Also
|
||||
|
||||
- [Configuration Guide](../configuration.md#codex-cli-provider) - Complete Codex CLI configuration reference
|
||||
- [Command Reference](../command-reference.md) - Using `--codex-cli` flag with commands
|
||||
- [Gemini CLI Provider](./gemini-cli.md) - Similar CLI-based provider for Google Gemini
|
||||
- [Claude Code Integration](../claude-code-integration.md) - Another CLI-based provider
|
||||
- [ai-sdk-provider-codex-cli](https://github.com/ben-vargas/ai-sdk-provider-codex-cli) - Source code for the provider package
|
||||
@@ -69,11 +69,29 @@ export function resolveTasksPath(args, log = silentLogger) {
|
||||
|
||||
// Use core findTasksPath with explicit path and normalized projectRoot context
|
||||
if (projectRoot) {
|
||||
return coreFindTasksPath(explicitPath, { projectRoot }, log);
|
||||
const foundPath = coreFindTasksPath(explicitPath, { projectRoot }, log);
|
||||
// If core function returns null and no explicit path was provided,
|
||||
// construct the expected default path as documented
|
||||
if (foundPath === null && !explicitPath) {
|
||||
const defaultPath = path.join(
|
||||
projectRoot,
|
||||
'.taskmaster',
|
||||
'tasks',
|
||||
'tasks.json'
|
||||
);
|
||||
log?.info?.(
|
||||
`Core findTasksPath returned null, using default path: ${defaultPath}`
|
||||
);
|
||||
return defaultPath;
|
||||
}
|
||||
return foundPath;
|
||||
}
|
||||
|
||||
// Fallback to core function without projectRoot context
|
||||
return coreFindTasksPath(explicitPath, null, log);
|
||||
const foundPath = coreFindTasksPath(explicitPath, null, log);
|
||||
// Note: When no projectRoot is available, we can't construct a default path
|
||||
// so we return null and let the calling code handle the error
|
||||
return foundPath;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -75,13 +75,50 @@ function generateExampleFromSchema(schema) {
|
||||
return result;
|
||||
|
||||
case 'ZodString':
|
||||
return 'string';
|
||||
// Check for min/max length constraints
|
||||
if (def.checks) {
|
||||
const minCheck = def.checks.find((c) => c.kind === 'min');
|
||||
const maxCheck = def.checks.find((c) => c.kind === 'max');
|
||||
if (minCheck && maxCheck) {
|
||||
return (
|
||||
'<string between ' +
|
||||
minCheck.value +
|
||||
'-' +
|
||||
maxCheck.value +
|
||||
' characters>'
|
||||
);
|
||||
} else if (minCheck) {
|
||||
return '<string with at least ' + minCheck.value + ' characters>';
|
||||
} else if (maxCheck) {
|
||||
return '<string up to ' + maxCheck.value + ' characters>';
|
||||
}
|
||||
}
|
||||
return '<string>';
|
||||
|
||||
case 'ZodNumber':
|
||||
return 0;
|
||||
// Check for int, positive, min/max constraints
|
||||
if (def.checks) {
|
||||
const intCheck = def.checks.find((c) => c.kind === 'int');
|
||||
const minCheck = def.checks.find((c) => c.kind === 'min');
|
||||
const maxCheck = def.checks.find((c) => c.kind === 'max');
|
||||
|
||||
if (intCheck && minCheck && minCheck.value > 0) {
|
||||
return '<positive integer>';
|
||||
} else if (intCheck) {
|
||||
return '<integer>';
|
||||
} else if (minCheck || maxCheck) {
|
||||
return (
|
||||
'<number' +
|
||||
(minCheck ? ' >= ' + minCheck.value : '') +
|
||||
(maxCheck ? ' <= ' + maxCheck.value : '') +
|
||||
'>'
|
||||
);
|
||||
}
|
||||
}
|
||||
return '<number>';
|
||||
|
||||
case 'ZodBoolean':
|
||||
return false;
|
||||
return '<boolean>';
|
||||
|
||||
case 'ZodArray':
|
||||
const elementExample = generateExampleFromSchema(def.type);
|
||||
|
||||
11765
package-lock.json
generated
11765
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
53
package.json
53
package.json
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "task-master-ai",
|
||||
"version": "0.27.1",
|
||||
"version": "0.28.0-rc.2",
|
||||
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
|
||||
"main": "index.js",
|
||||
"type": "module",
|
||||
@@ -17,7 +17,7 @@
|
||||
"turbo:build": "turbo build",
|
||||
"turbo:typecheck": "turbo typecheck",
|
||||
"build:build-config": "npm run build -w @tm/build-config",
|
||||
"test": "node --experimental-vm-modules node_modules/.bin/jest",
|
||||
"test": "cross-env NODE_ENV=test node --experimental-vm-modules node_modules/.bin/jest",
|
||||
"test:unit": "node --experimental-vm-modules node_modules/.bin/jest --testPathPattern=unit",
|
||||
"test:integration": "node --experimental-vm-modules node_modules/.bin/jest --testPathPattern=integration",
|
||||
"test:fails": "node --experimental-vm-modules node_modules/.bin/jest --onlyFailures",
|
||||
@@ -52,23 +52,27 @@
|
||||
"author": "Eyal Toledano",
|
||||
"license": "MIT WITH Commons-Clause",
|
||||
"dependencies": {
|
||||
"@ai-sdk/amazon-bedrock": "^2.2.9",
|
||||
"@ai-sdk/anthropic": "^1.2.10",
|
||||
"@ai-sdk/azure": "^1.3.17",
|
||||
"@ai-sdk/google": "^1.2.13",
|
||||
"@ai-sdk/google-vertex": "^2.2.23",
|
||||
"@ai-sdk/groq": "^1.2.9",
|
||||
"@ai-sdk/mistral": "^1.2.7",
|
||||
"@ai-sdk/openai": "^1.3.20",
|
||||
"@ai-sdk/perplexity": "^1.1.7",
|
||||
"@ai-sdk/xai": "^1.2.15",
|
||||
"@anthropic-ai/sdk": "^0.39.0",
|
||||
"@aws-sdk/credential-providers": "^3.817.0",
|
||||
"@ai-sdk/amazon-bedrock": "^3.0.23",
|
||||
"@ai-sdk/anthropic": "^2.0.18",
|
||||
"@ai-sdk/azure": "^2.0.34",
|
||||
"@ai-sdk/google": "^2.0.16",
|
||||
"@ai-sdk/google-vertex": "^3.0.29",
|
||||
"@ai-sdk/groq": "^2.0.21",
|
||||
"@ai-sdk/mistral": "^2.0.16",
|
||||
"@ai-sdk/openai": "^2.0.34",
|
||||
"@ai-sdk/perplexity": "^2.0.10",
|
||||
"@ai-sdk/provider": "^2.0.0",
|
||||
"@ai-sdk/provider-utils": "^3.0.10",
|
||||
"@ai-sdk/xai": "^2.0.22",
|
||||
"@aws-sdk/credential-providers": "^3.895.0",
|
||||
"@inquirer/search": "^3.0.15",
|
||||
"@openrouter/ai-sdk-provider": "^0.4.5",
|
||||
"@openrouter/ai-sdk-provider": "^1.2.0",
|
||||
"@streamparser/json": "^0.0.22",
|
||||
"@supabase/supabase-js": "^2.57.4",
|
||||
"ai": "^4.3.10",
|
||||
"ai": "^5.0.51",
|
||||
"ai-sdk-provider-claude-code": "^1.1.4",
|
||||
"ai-sdk-provider-codex-cli": "^0.3.0",
|
||||
"ai-sdk-provider-gemini-cli": "^1.1.1",
|
||||
"ajv": "^8.17.1",
|
||||
"ajv-formats": "^3.0.1",
|
||||
"boxen": "^8.0.1",
|
||||
@@ -78,7 +82,7 @@
|
||||
"cli-table3": "^0.6.5",
|
||||
"commander": "^12.1.0",
|
||||
"cors": "^2.8.5",
|
||||
"dotenv": "^16.3.1",
|
||||
"dotenv": "^16.6.1",
|
||||
"express": "^4.21.2",
|
||||
"fastmcp": "^3.5.0",
|
||||
"figlet": "^1.8.0",
|
||||
@@ -93,17 +97,14 @@
|
||||
"lru-cache": "^10.2.0",
|
||||
"marked": "^15.0.12",
|
||||
"marked-terminal": "^7.3.0",
|
||||
"ollama-ai-provider": "^1.2.0",
|
||||
"openai": "^4.89.0",
|
||||
"ollama-ai-provider-v2": "^1.3.1",
|
||||
"ora": "^8.2.0",
|
||||
"uuid": "^11.1.0",
|
||||
"zod": "^3.23.8",
|
||||
"zod-to-json-schema": "^3.24.5"
|
||||
"zod": "^4.1.11"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@anthropic-ai/claude-code": "^1.0.88",
|
||||
"@biomejs/cli-linux-x64": "^1.9.4",
|
||||
"ai-sdk-provider-gemini-cli": "^0.1.3"
|
||||
"@biomejs/cli-linux-x64": "^1.9.4"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18.0.0"
|
||||
@@ -127,12 +128,12 @@
|
||||
"@changesets/changelog-github": "^0.5.1",
|
||||
"@changesets/cli": "^2.28.1",
|
||||
"@manypkg/cli": "^0.25.1",
|
||||
"@tm/ai-sdk-provider-grok-cli": "*",
|
||||
"@tm/cli": "*",
|
||||
"@types/jest": "^29.5.14",
|
||||
"@types/marked-terminal": "^6.1.1",
|
||||
"concurrently": "^9.2.1",
|
||||
"cross-env": "^10.0.0",
|
||||
"dotenv-mono": "^1.5.1",
|
||||
"execa": "^8.0.1",
|
||||
"jest": "^29.7.0",
|
||||
"jest-environment-node": "^29.7.0",
|
||||
@@ -142,7 +143,7 @@
|
||||
"ts-jest": "^29.4.2",
|
||||
"tsdown": "^0.15.2",
|
||||
"tsx": "^4.20.4",
|
||||
"turbo": "^2.5.6",
|
||||
"typescript": "^5.7.3"
|
||||
"turbo": "2.5.6",
|
||||
"typescript": "^5.9.2"
|
||||
}
|
||||
}
|
||||
|
||||
165
packages/ai-sdk-provider-grok-cli/README.md
Normal file
165
packages/ai-sdk-provider-grok-cli/README.md
Normal file
@@ -0,0 +1,165 @@
|
||||
# AI SDK Provider for Grok CLI
|
||||
|
||||
A provider for the [AI SDK](https://sdk.vercel.ai) that integrates with [Grok CLI](https://docs.x.ai/api) for accessing xAI's Grok language models.
|
||||
|
||||
## Features
|
||||
|
||||
- ✅ **AI SDK v5 Compatible** - Full support for the latest AI SDK interfaces
|
||||
- ✅ **Streaming & Non-streaming** - Both generation modes supported
|
||||
- ✅ **Error Handling** - Comprehensive error handling with retry logic
|
||||
- ✅ **Type Safety** - Full TypeScript support with proper type definitions
|
||||
- ✅ **JSON Mode** - Automatic JSON extraction from responses
|
||||
- ✅ **Abort Signals** - Proper cancellation support
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
npm install @tm/ai-sdk-provider-grok-cli
|
||||
# or
|
||||
yarn add @tm/ai-sdk-provider-grok-cli
|
||||
```
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. Install the Grok CLI:
|
||||
|
||||
```bash
|
||||
npm install -g grok-cli
|
||||
# or follow xAI's installation instructions
|
||||
```
|
||||
|
||||
2. Set up authentication:
|
||||
|
||||
```bash
|
||||
export GROK_CLI_API_KEY="your-api-key"
|
||||
# or configure via grok CLI: grok config set api-key your-key
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### Basic Usage
|
||||
|
||||
```typescript
|
||||
import { grokCli } from '@tm/ai-sdk-provider-grok-cli';
|
||||
import { generateText } from 'ai';
|
||||
|
||||
const result = await generateText({
|
||||
model: grokCli('grok-3-latest'),
|
||||
prompt: 'Write a haiku about TypeScript'
|
||||
});
|
||||
|
||||
console.log(result.text);
|
||||
```
|
||||
|
||||
### Streaming
|
||||
|
||||
```typescript
|
||||
import { grokCli } from '@tm/ai-sdk-provider-grok-cli';
|
||||
import { streamText } from 'ai';
|
||||
|
||||
const { textStream } = await streamText({
|
||||
model: grokCli('grok-4-latest'),
|
||||
prompt: 'Explain quantum computing'
|
||||
});
|
||||
|
||||
for await (const delta of textStream) {
|
||||
process.stdout.write(delta);
|
||||
}
|
||||
```
|
||||
|
||||
### JSON Mode
|
||||
|
||||
```typescript
|
||||
import { grokCli } from '@tm/ai-sdk-provider-grok-cli';
|
||||
import { generateObject } from 'ai';
|
||||
import { z } from 'zod';
|
||||
|
||||
const result = await generateObject({
|
||||
model: grokCli('grok-3-latest'),
|
||||
schema: z.object({
|
||||
name: z.string(),
|
||||
age: z.number(),
|
||||
hobbies: z.array(z.string())
|
||||
}),
|
||||
prompt: 'Generate a person profile'
|
||||
});
|
||||
|
||||
console.log(result.object);
|
||||
```
|
||||
|
||||
## Supported Models
|
||||
|
||||
- `grok-3-latest` - Grok 3 (latest version)
|
||||
- `grok-4-latest` - Grok 4 (latest version)
|
||||
- `grok-4` - Grok 4 (stable)
|
||||
- Custom model strings supported
|
||||
|
||||
## Configuration
|
||||
|
||||
### Provider Settings
|
||||
|
||||
```typescript
|
||||
import { createGrokCli } from '@tm/ai-sdk-provider-grok-cli';
|
||||
|
||||
const grok = createGrokCli({
|
||||
apiKey: 'your-api-key', // Optional if set via env/CLI
|
||||
timeout: 120000, // 2 minutes default
|
||||
workingDirectory: '/path/to/project', // Optional
|
||||
baseURL: 'https://api.x.ai' // Optional
|
||||
});
|
||||
```
|
||||
|
||||
### Model Settings
|
||||
|
||||
```typescript
|
||||
const model = grok('grok-4-latest', {
|
||||
timeout: 300000, // 5 minutes for grok-4
|
||||
// Other CLI-specific settings
|
||||
});
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
The provider includes comprehensive error handling:
|
||||
|
||||
```typescript
|
||||
import {
|
||||
isAuthenticationError,
|
||||
isTimeoutError,
|
||||
isInstallationError
|
||||
} from '@tm/ai-sdk-provider-grok-cli';
|
||||
|
||||
try {
|
||||
const result = await generateText({
|
||||
model: grokCli('grok-4-latest'),
|
||||
prompt: 'Hello!'
|
||||
});
|
||||
} catch (error) {
|
||||
if (isAuthenticationError(error)) {
|
||||
console.error('Authentication failed:', error.message);
|
||||
} else if (isTimeoutError(error)) {
|
||||
console.error('Request timed out:', error.message);
|
||||
} else if (isInstallationError(error)) {
|
||||
console.error('Grok CLI not installed or not found in PATH');
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
```bash
|
||||
# Install dependencies
|
||||
npm install
|
||||
|
||||
# Start development mode (keep running during development)
|
||||
npm run dev
|
||||
|
||||
# Type check
|
||||
npm run typecheck
|
||||
|
||||
# Run tests (requires build first)
|
||||
NODE_ENV=production npm run build
|
||||
npm test
|
||||
```
|
||||
|
||||
**Important**: Always run `npm run dev` and keep it running during development. This ensures proper compilation and hot-reloading of TypeScript files.
|
||||
35
packages/ai-sdk-provider-grok-cli/package.json
Normal file
35
packages/ai-sdk-provider-grok-cli/package.json
Normal file
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"name": "@tm/ai-sdk-provider-grok-cli",
|
||||
"private": true,
|
||||
"description": "AI SDK provider for Grok CLI integration",
|
||||
"type": "module",
|
||||
"types": "./src/index.ts",
|
||||
"main": "./dist/index.js",
|
||||
"exports": {
|
||||
".": "./src/index.ts"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "vitest run",
|
||||
"test:watch": "vitest",
|
||||
"test:ui": "vitest --ui",
|
||||
"typecheck": "tsc --noEmit"
|
||||
},
|
||||
"dependencies": {
|
||||
"@ai-sdk/provider": "^2.0.0",
|
||||
"@ai-sdk/provider-utils": "^3.0.10",
|
||||
"jsonc-parser": "^3.3.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^22.18.6",
|
||||
"typescript": "^5.9.2",
|
||||
"vitest": "^3.2.4"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"keywords": ["ai", "grok", "x.ai", "cli", "language-model", "provider"],
|
||||
"files": ["dist/**/*", "README.md"],
|
||||
"publishConfig": {
|
||||
"access": "public"
|
||||
}
|
||||
}
|
||||
188
packages/ai-sdk-provider-grok-cli/src/errors.test.ts
Normal file
188
packages/ai-sdk-provider-grok-cli/src/errors.test.ts
Normal file
@@ -0,0 +1,188 @@
|
||||
/**
|
||||
* Tests for error handling utilities
|
||||
*/
|
||||
|
||||
import { APICallError, LoadAPIKeyError } from '@ai-sdk/provider';
|
||||
import { describe, expect, it } from 'vitest';
|
||||
import {
|
||||
createAPICallError,
|
||||
createAuthenticationError,
|
||||
createInstallationError,
|
||||
createTimeoutError,
|
||||
getErrorMetadata,
|
||||
isAuthenticationError,
|
||||
isInstallationError,
|
||||
isTimeoutError
|
||||
} from './errors.js';
|
||||
|
||||
describe('createAPICallError', () => {
|
||||
it('should create APICallError with metadata', () => {
|
||||
const error = createAPICallError({
|
||||
message: 'Test error',
|
||||
code: 'TEST_ERROR',
|
||||
exitCode: 1,
|
||||
stderr: 'Error output',
|
||||
stdout: 'Success output',
|
||||
promptExcerpt: 'Test prompt',
|
||||
isRetryable: true
|
||||
});
|
||||
|
||||
expect(error).toBeInstanceOf(APICallError);
|
||||
expect(error.message).toBe('Test error');
|
||||
expect(error.isRetryable).toBe(true);
|
||||
expect(error.url).toBe('grok-cli://command');
|
||||
expect(error.data).toEqual({
|
||||
code: 'TEST_ERROR',
|
||||
exitCode: 1,
|
||||
stderr: 'Error output',
|
||||
stdout: 'Success output',
|
||||
promptExcerpt: 'Test prompt'
|
||||
});
|
||||
});
|
||||
|
||||
it('should create APICallError with minimal parameters', () => {
|
||||
const error = createAPICallError({
|
||||
message: 'Simple error'
|
||||
});
|
||||
|
||||
expect(error).toBeInstanceOf(APICallError);
|
||||
expect(error.message).toBe('Simple error');
|
||||
expect(error.isRetryable).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('createAuthenticationError', () => {
|
||||
it('should create LoadAPIKeyError with custom message', () => {
|
||||
const error = createAuthenticationError({
|
||||
message: 'Custom auth error'
|
||||
});
|
||||
|
||||
expect(error).toBeInstanceOf(LoadAPIKeyError);
|
||||
expect(error.message).toBe('Custom auth error');
|
||||
});
|
||||
|
||||
it('should create LoadAPIKeyError with default message', () => {
|
||||
const error = createAuthenticationError({});
|
||||
|
||||
expect(error).toBeInstanceOf(LoadAPIKeyError);
|
||||
expect(error.message).toContain('Authentication failed');
|
||||
});
|
||||
});
|
||||
|
||||
describe('createTimeoutError', () => {
|
||||
it('should create APICallError for timeout', () => {
|
||||
const error = createTimeoutError({
|
||||
message: 'Operation timed out',
|
||||
timeoutMs: 5000,
|
||||
promptExcerpt: 'Test prompt'
|
||||
});
|
||||
|
||||
expect(error).toBeInstanceOf(APICallError);
|
||||
expect(error.message).toBe('Operation timed out');
|
||||
expect(error.isRetryable).toBe(true);
|
||||
expect(error.data).toEqual({
|
||||
code: 'TIMEOUT',
|
||||
promptExcerpt: 'Test prompt',
|
||||
timeoutMs: 5000
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('createInstallationError', () => {
|
||||
it('should create APICallError for installation issues', () => {
|
||||
const error = createInstallationError({
|
||||
message: 'CLI not found'
|
||||
});
|
||||
|
||||
expect(error).toBeInstanceOf(APICallError);
|
||||
expect(error.message).toBe('CLI not found');
|
||||
expect(error.isRetryable).toBe(false);
|
||||
expect(error.url).toBe('grok-cli://installation');
|
||||
});
|
||||
|
||||
it('should create APICallError with default message', () => {
|
||||
const error = createInstallationError({});
|
||||
|
||||
expect(error).toBeInstanceOf(APICallError);
|
||||
expect(error.message).toContain('Grok CLI is not installed');
|
||||
});
|
||||
});
|
||||
|
||||
describe('isAuthenticationError', () => {
|
||||
it('should return true for LoadAPIKeyError', () => {
|
||||
const error = new LoadAPIKeyError({ message: 'Auth failed' });
|
||||
expect(isAuthenticationError(error)).toBe(true);
|
||||
});
|
||||
|
||||
it('should return true for APICallError with 401 exit code', () => {
|
||||
const error = new APICallError({
|
||||
message: 'Unauthorized',
|
||||
data: { exitCode: 401 }
|
||||
});
|
||||
expect(isAuthenticationError(error)).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false for other errors', () => {
|
||||
const error = new Error('Generic error');
|
||||
expect(isAuthenticationError(error)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('isTimeoutError', () => {
|
||||
it('should return true for timeout APICallError', () => {
|
||||
const error = new APICallError({
|
||||
message: 'Timeout',
|
||||
data: { code: 'TIMEOUT' }
|
||||
});
|
||||
expect(isTimeoutError(error)).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false for other errors', () => {
|
||||
const error = new APICallError({ message: 'Other error' });
|
||||
expect(isTimeoutError(error)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('isInstallationError', () => {
|
||||
it('should return true for installation APICallError', () => {
|
||||
const error = new APICallError({
|
||||
message: 'Not installed',
|
||||
url: 'grok-cli://installation'
|
||||
});
|
||||
expect(isInstallationError(error)).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false for other errors', () => {
|
||||
const error = new APICallError({ message: 'Other error' });
|
||||
expect(isInstallationError(error)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getErrorMetadata', () => {
|
||||
it('should return metadata from APICallError', () => {
|
||||
const metadata = {
|
||||
code: 'TEST_ERROR',
|
||||
exitCode: 1,
|
||||
stderr: 'Error output'
|
||||
};
|
||||
const error = new APICallError({
|
||||
message: 'Test error',
|
||||
data: metadata
|
||||
});
|
||||
|
||||
const result = getErrorMetadata(error);
|
||||
expect(result).toEqual(metadata);
|
||||
});
|
||||
|
||||
it('should return undefined for errors without metadata', () => {
|
||||
const error = new Error('Generic error');
|
||||
const result = getErrorMetadata(error);
|
||||
expect(result).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should return undefined for APICallError without data', () => {
|
||||
const error = new APICallError({ message: 'Test error' });
|
||||
const result = getErrorMetadata(error);
|
||||
expect(result).toBeUndefined();
|
||||
});
|
||||
});
|
||||
187
packages/ai-sdk-provider-grok-cli/src/errors.ts
Normal file
187
packages/ai-sdk-provider-grok-cli/src/errors.ts
Normal file
@@ -0,0 +1,187 @@
|
||||
/**
|
||||
* Error handling utilities for Grok CLI provider
|
||||
*/
|
||||
|
||||
import { APICallError, LoadAPIKeyError } from '@ai-sdk/provider';
|
||||
import type { GrokCliErrorMetadata } from './types.js';
|
||||
|
||||
/**
|
||||
* Parameters for creating API call errors
|
||||
*/
|
||||
interface CreateAPICallErrorParams {
|
||||
/** Error message */
|
||||
message: string;
|
||||
/** Error code */
|
||||
code?: string;
|
||||
/** Process exit code */
|
||||
exitCode?: number;
|
||||
/** Standard error output */
|
||||
stderr?: string;
|
||||
/** Standard output */
|
||||
stdout?: string;
|
||||
/** Excerpt of the prompt */
|
||||
promptExcerpt?: string;
|
||||
/** Whether the error is retryable */
|
||||
isRetryable?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parameters for creating authentication errors
|
||||
*/
|
||||
interface CreateAuthenticationErrorParams {
|
||||
/** Error message */
|
||||
message?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parameters for creating timeout errors
|
||||
*/
|
||||
interface CreateTimeoutErrorParams {
|
||||
/** Error message */
|
||||
message: string;
|
||||
/** Excerpt of the prompt */
|
||||
promptExcerpt?: string;
|
||||
/** Timeout in milliseconds */
|
||||
timeoutMs: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parameters for creating installation errors
|
||||
*/
|
||||
interface CreateInstallationErrorParams {
|
||||
/** Error message */
|
||||
message?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an API call error with Grok CLI specific metadata
|
||||
*/
|
||||
export function createAPICallError({
|
||||
message,
|
||||
code,
|
||||
exitCode,
|
||||
stderr,
|
||||
stdout,
|
||||
promptExcerpt,
|
||||
isRetryable = false
|
||||
}: CreateAPICallErrorParams): APICallError {
|
||||
const metadata: GrokCliErrorMetadata = {
|
||||
code,
|
||||
exitCode,
|
||||
stderr,
|
||||
stdout,
|
||||
promptExcerpt
|
||||
};
|
||||
|
||||
return new APICallError({
|
||||
message,
|
||||
isRetryable,
|
||||
url: 'grok-cli://command',
|
||||
requestBodyValues: promptExcerpt ? { prompt: promptExcerpt } : undefined,
|
||||
data: metadata
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an authentication error
|
||||
*/
|
||||
export function createAuthenticationError({
|
||||
message
|
||||
}: CreateAuthenticationErrorParams): LoadAPIKeyError {
|
||||
return new LoadAPIKeyError({
|
||||
message:
|
||||
message ||
|
||||
'Authentication failed. Please ensure Grok CLI is properly configured with API key.'
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a timeout error
|
||||
*/
|
||||
export function createTimeoutError({
|
||||
message,
|
||||
promptExcerpt,
|
||||
timeoutMs
|
||||
}: CreateTimeoutErrorParams): APICallError {
|
||||
const metadata: GrokCliErrorMetadata & { timeoutMs: number } = {
|
||||
code: 'TIMEOUT',
|
||||
promptExcerpt,
|
||||
timeoutMs
|
||||
};
|
||||
|
||||
return new APICallError({
|
||||
message,
|
||||
isRetryable: true,
|
||||
url: 'grok-cli://command',
|
||||
requestBodyValues: promptExcerpt ? { prompt: promptExcerpt } : undefined,
|
||||
data: metadata
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a CLI installation error
|
||||
*/
|
||||
export function createInstallationError({
|
||||
message
|
||||
}: CreateInstallationErrorParams): APICallError {
|
||||
return new APICallError({
|
||||
message:
|
||||
message ||
|
||||
'Grok CLI is not installed or not found in PATH. Please install with: npm install -g @vibe-kit/grok-cli',
|
||||
isRetryable: false,
|
||||
url: 'grok-cli://installation',
|
||||
requestBodyValues: undefined
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if an error is an authentication error
|
||||
*/
|
||||
export function isAuthenticationError(
|
||||
error: unknown
|
||||
): error is LoadAPIKeyError {
|
||||
if (error instanceof LoadAPIKeyError) return true;
|
||||
if (error instanceof APICallError) {
|
||||
const metadata = error.data as GrokCliErrorMetadata | undefined;
|
||||
if (!metadata) return false;
|
||||
return (
|
||||
metadata.exitCode === 401 ||
|
||||
metadata.code === 'AUTHENTICATION_ERROR' ||
|
||||
metadata.code === 'UNAUTHORIZED'
|
||||
);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if an error is a timeout error
|
||||
*/
|
||||
export function isTimeoutError(error: unknown): error is APICallError {
|
||||
if (
|
||||
error instanceof APICallError &&
|
||||
(error.data as GrokCliErrorMetadata)?.code === 'TIMEOUT'
|
||||
)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if an error is an installation error
|
||||
*/
|
||||
export function isInstallationError(error: unknown): error is APICallError {
|
||||
if (error instanceof APICallError && error.url === 'grok-cli://installation')
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get error metadata from an error
|
||||
*/
|
||||
export function getErrorMetadata(
|
||||
error: unknown
|
||||
): GrokCliErrorMetadata | undefined {
|
||||
if (error instanceof APICallError && error.data) {
|
||||
return error.data as GrokCliErrorMetadata;
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
@@ -1,53 +1,51 @@
|
||||
/**
|
||||
* @fileoverview Grok CLI Language Model implementation
|
||||
* Grok CLI Language Model implementation for AI SDK v5
|
||||
*/
|
||||
|
||||
import { spawn } from 'child_process';
|
||||
import { promises as fs } from 'fs';
|
||||
import { homedir } from 'os';
|
||||
import { join } from 'path';
|
||||
import type {
|
||||
LanguageModelV2,
|
||||
LanguageModelV2CallOptions,
|
||||
LanguageModelV2CallWarning
|
||||
} from '@ai-sdk/provider';
|
||||
import { NoSuchModelError } from '@ai-sdk/provider';
|
||||
import { generateId } from '@ai-sdk/provider-utils';
|
||||
import {
|
||||
createPromptFromMessages,
|
||||
convertFromGrokCliResponse,
|
||||
escapeShellArg
|
||||
} from './message-converter.js';
|
||||
import { extractJson } from './json-extractor.js';
|
||||
|
||||
import {
|
||||
createAPICallError,
|
||||
createAuthenticationError,
|
||||
createInstallationError,
|
||||
createTimeoutError
|
||||
} from './errors.js';
|
||||
import { spawn } from 'child_process';
|
||||
import { promises as fs } from 'fs';
|
||||
import { join } from 'path';
|
||||
import { homedir } from 'os';
|
||||
import { extractJson } from './json-extractor.js';
|
||||
import {
|
||||
convertFromGrokCliResponse,
|
||||
createPromptFromMessages,
|
||||
escapeShellArg
|
||||
} from './message-converter.js';
|
||||
import type {
|
||||
GrokCliLanguageModelOptions,
|
||||
GrokCliModelId,
|
||||
GrokCliSettings
|
||||
} from './types.js';
|
||||
|
||||
/**
|
||||
* @typedef {import('./types.js').GrokCliSettings} GrokCliSettings
|
||||
* @typedef {import('./types.js').GrokCliModelId} GrokCliModelId
|
||||
* Grok CLI Language Model implementation for AI SDK v5
|
||||
*/
|
||||
export class GrokCliLanguageModel implements LanguageModelV2 {
|
||||
readonly specificationVersion = 'v2' as const;
|
||||
readonly defaultObjectGenerationMode = 'json' as const;
|
||||
readonly supportsImageUrls = false;
|
||||
readonly supportsStructuredOutputs = false;
|
||||
readonly supportedUrls: Record<string, RegExp[]> = {};
|
||||
|
||||
/**
|
||||
* @typedef {Object} GrokCliLanguageModelOptions
|
||||
* @property {GrokCliModelId} id - Model ID
|
||||
* @property {GrokCliSettings} [settings] - Model settings
|
||||
*/
|
||||
readonly modelId: GrokCliModelId;
|
||||
readonly settings: GrokCliSettings;
|
||||
|
||||
export class GrokCliLanguageModel {
|
||||
specificationVersion = 'v1';
|
||||
defaultObjectGenerationMode = 'json';
|
||||
supportsImageUrls = false;
|
||||
supportsStructuredOutputs = false;
|
||||
|
||||
/** @type {GrokCliModelId} */
|
||||
modelId;
|
||||
|
||||
/** @type {GrokCliSettings} */
|
||||
settings;
|
||||
|
||||
/**
|
||||
* @param {GrokCliLanguageModelOptions} options
|
||||
*/
|
||||
constructor(options) {
|
||||
constructor(options: GrokCliLanguageModelOptions) {
|
||||
this.modelId = options.id;
|
||||
this.settings = options.settings ?? {};
|
||||
|
||||
@@ -64,15 +62,14 @@ export class GrokCliLanguageModel {
|
||||
}
|
||||
}
|
||||
|
||||
get provider() {
|
||||
get provider(): string {
|
||||
return 'grok-cli';
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if Grok CLI is installed and available
|
||||
* @returns {Promise<boolean>}
|
||||
*/
|
||||
async checkGrokCliInstallation() {
|
||||
private async checkGrokCliInstallation(): Promise<boolean> {
|
||||
return new Promise((resolve) => {
|
||||
const child = spawn('grok', ['--version'], {
|
||||
stdio: 'pipe'
|
||||
@@ -85,9 +82,8 @@ export class GrokCliLanguageModel {
|
||||
|
||||
/**
|
||||
* Get API key from settings or environment
|
||||
* @returns {Promise<string|null>}
|
||||
*/
|
||||
async getApiKey() {
|
||||
private async getApiKey(): Promise<string | null> {
|
||||
// Check settings first
|
||||
if (this.settings.apiKey) {
|
||||
return this.settings.apiKey;
|
||||
@@ -111,22 +107,32 @@ export class GrokCliLanguageModel {
|
||||
|
||||
/**
|
||||
* Execute Grok CLI command
|
||||
* @param {Array<string>} args - Command line arguments
|
||||
* @param {Object} options - Execution options
|
||||
* @returns {Promise<{stdout: string, stderr: string, exitCode: number}>}
|
||||
*/
|
||||
async executeGrokCli(args, options = {}) {
|
||||
const timeout = options.timeout || this.settings.timeout || 120000; // 2 minutes default
|
||||
private async executeGrokCli(
|
||||
args: string[],
|
||||
options: { timeout?: number; apiKey?: string } = {}
|
||||
): Promise<{ stdout: string; stderr: string; exitCode: number }> {
|
||||
// Default timeout based on model type
|
||||
let defaultTimeout = 120000; // 2 minutes default
|
||||
if (this.modelId.includes('grok-4')) {
|
||||
defaultTimeout = 600000; // 10 minutes for grok-4 models (they seem to hang during setup)
|
||||
}
|
||||
|
||||
const timeout = options.timeout ?? this.settings.timeout ?? defaultTimeout;
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const child = spawn('grok', args, {
|
||||
stdio: 'pipe',
|
||||
cwd: this.settings.workingDirectory || process.cwd()
|
||||
cwd: this.settings.workingDirectory || process.cwd(),
|
||||
env:
|
||||
options.apiKey === undefined
|
||||
? process.env
|
||||
: { ...process.env, GROK_CLI_API_KEY: options.apiKey }
|
||||
});
|
||||
|
||||
let stdout = '';
|
||||
let stderr = '';
|
||||
let timeoutId;
|
||||
let timeoutId: NodeJS.Timeout | undefined;
|
||||
|
||||
// Set up timeout
|
||||
if (timeout > 0) {
|
||||
@@ -142,24 +148,26 @@ export class GrokCliLanguageModel {
|
||||
}, timeout);
|
||||
}
|
||||
|
||||
child.stdout.on('data', (data) => {
|
||||
stdout += data.toString();
|
||||
child.stdout?.on('data', (data) => {
|
||||
const chunk = data.toString();
|
||||
stdout += chunk;
|
||||
});
|
||||
|
||||
child.stderr.on('data', (data) => {
|
||||
stderr += data.toString();
|
||||
child.stderr?.on('data', (data) => {
|
||||
const chunk = data.toString();
|
||||
stderr += chunk;
|
||||
});
|
||||
|
||||
child.on('error', (error) => {
|
||||
if (timeoutId) clearTimeout(timeoutId);
|
||||
|
||||
if (error.code === 'ENOENT') {
|
||||
if ((error as any).code === 'ENOENT') {
|
||||
reject(createInstallationError({}));
|
||||
} else {
|
||||
reject(
|
||||
createAPICallError({
|
||||
message: `Failed to execute Grok CLI: ${error.message}`,
|
||||
code: error.code,
|
||||
code: (error as any).code,
|
||||
stderr: error.message,
|
||||
isRetryable: false
|
||||
})
|
||||
@@ -180,15 +188,18 @@ export class GrokCliLanguageModel {
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate unsupported parameter warnings
|
||||
* @param {Object} options - Generation options
|
||||
* @returns {Array} Warnings array
|
||||
* Generate comprehensive warnings for unsupported parameters and validation issues
|
||||
*/
|
||||
generateUnsupportedWarnings(options) {
|
||||
const warnings = [];
|
||||
const unsupportedParams = [];
|
||||
private generateAllWarnings(
|
||||
options: LanguageModelV2CallOptions,
|
||||
prompt: string
|
||||
): LanguageModelV2CallWarning[] {
|
||||
const warnings: LanguageModelV2CallWarning[] = [];
|
||||
const unsupportedParams: string[] = [];
|
||||
|
||||
// Grok CLI supports some parameters but not all AI SDK parameters
|
||||
// Check for unsupported parameters
|
||||
if (options.temperature !== undefined)
|
||||
unsupportedParams.push('temperature');
|
||||
if (options.topP !== undefined) unsupportedParams.push('topP');
|
||||
if (options.topK !== undefined) unsupportedParams.push('topK');
|
||||
if (options.presencePenalty !== undefined)
|
||||
@@ -200,24 +211,51 @@ export class GrokCliLanguageModel {
|
||||
if (options.seed !== undefined) unsupportedParams.push('seed');
|
||||
|
||||
if (unsupportedParams.length > 0) {
|
||||
// Add a warning for each unsupported parameter
|
||||
for (const param of unsupportedParams) {
|
||||
warnings.push({
|
||||
type: 'unsupported-setting',
|
||||
setting: param,
|
||||
setting: param as
|
||||
| 'temperature'
|
||||
| 'topP'
|
||||
| 'topK'
|
||||
| 'presencePenalty'
|
||||
| 'frequencyPenalty'
|
||||
| 'stopSequences'
|
||||
| 'seed',
|
||||
details: `Grok CLI does not support the ${param} parameter. It will be ignored.`
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Add model validation warnings if needed
|
||||
if (!this.modelId || this.modelId.trim() === '') {
|
||||
warnings.push({
|
||||
type: 'other',
|
||||
message: 'Model ID is empty or invalid'
|
||||
});
|
||||
}
|
||||
|
||||
// Add prompt validation
|
||||
if (!prompt || prompt.trim() === '') {
|
||||
warnings.push({
|
||||
type: 'other',
|
||||
message: 'Prompt is empty'
|
||||
});
|
||||
}
|
||||
|
||||
return warnings;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate text using Grok CLI
|
||||
* @param {Object} options - Generation options
|
||||
* @returns {Promise<Object>}
|
||||
*/
|
||||
async doGenerate(options) {
|
||||
async doGenerate(options: LanguageModelV2CallOptions) {
|
||||
// Handle abort signal early
|
||||
if (options.abortSignal?.aborted) {
|
||||
throw options.abortSignal.reason || new Error('Request aborted');
|
||||
}
|
||||
|
||||
// Check CLI installation
|
||||
const isInstalled = await this.checkGrokCliInstallation();
|
||||
if (!isInstalled) {
|
||||
@@ -234,7 +272,7 @@ export class GrokCliLanguageModel {
|
||||
}
|
||||
|
||||
const prompt = createPromptFromMessages(options.prompt);
|
||||
const warnings = this.generateUnsupportedWarnings(options);
|
||||
const warnings = this.generateAllWarnings(options, prompt);
|
||||
|
||||
// Build command arguments
|
||||
const args = ['--prompt', escapeShellArg(prompt)];
|
||||
@@ -244,10 +282,11 @@ export class GrokCliLanguageModel {
|
||||
args.push('--model', this.modelId);
|
||||
}
|
||||
|
||||
// Add API key if available
|
||||
if (apiKey) {
|
||||
args.push('--api-key', apiKey);
|
||||
}
|
||||
// Skip API key parameter if it's likely already configured to avoid hanging
|
||||
// The CLI seems to hang when trying to save API keys for grok-4 models
|
||||
// if (apiKey) {
|
||||
// args.push('--api-key', apiKey);
|
||||
// }
|
||||
|
||||
// Add base URL if provided in settings
|
||||
if (this.settings.baseURL) {
|
||||
@@ -260,9 +299,7 @@ export class GrokCliLanguageModel {
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await this.executeGrokCli(args, {
|
||||
timeout: this.settings.timeout
|
||||
});
|
||||
const result = await this.executeGrokCli(args, { apiKey });
|
||||
|
||||
if (result.exitCode !== 0) {
|
||||
// Handle authentication errors
|
||||
@@ -290,19 +327,37 @@ export class GrokCliLanguageModel {
|
||||
let text = response.text || '';
|
||||
|
||||
// Extract JSON if in object-json mode
|
||||
if (options.mode?.type === 'object-json' && text) {
|
||||
const isObjectJson = (
|
||||
o: unknown
|
||||
): o is { mode: { type: 'object-json' } } =>
|
||||
!!o &&
|
||||
typeof o === 'object' &&
|
||||
'mode' in o &&
|
||||
(o as any).mode?.type === 'object-json';
|
||||
if (isObjectJson(options) && text) {
|
||||
text = extractJson(text);
|
||||
}
|
||||
|
||||
return {
|
||||
text: text || undefined,
|
||||
usage: response.usage || { promptTokens: 0, completionTokens: 0 },
|
||||
finishReason: 'stop',
|
||||
content: [
|
||||
{
|
||||
type: 'text' as const,
|
||||
text: text || ''
|
||||
}
|
||||
],
|
||||
usage: response.usage
|
||||
? {
|
||||
inputTokens: response.usage.promptTokens,
|
||||
outputTokens: response.usage.completionTokens,
|
||||
totalTokens: response.usage.totalTokens
|
||||
}
|
||||
: { inputTokens: 0, outputTokens: 0, totalTokens: 0 },
|
||||
finishReason: 'stop' as const,
|
||||
rawCall: {
|
||||
rawPrompt: prompt,
|
||||
rawSettings: args
|
||||
},
|
||||
warnings: warnings.length > 0 ? warnings : undefined,
|
||||
warnings: warnings,
|
||||
response: {
|
||||
id: generateId(),
|
||||
timestamp: new Date(),
|
||||
@@ -314,20 +369,23 @@ export class GrokCliLanguageModel {
|
||||
providerMetadata: {
|
||||
'grok-cli': {
|
||||
exitCode: result.exitCode,
|
||||
stderr: result.stderr || undefined
|
||||
...(result.stderr && { stderr: result.stderr })
|
||||
}
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
// Re-throw our custom errors
|
||||
if (error.name === 'APICallError' || error.name === 'LoadAPIKeyError') {
|
||||
if (
|
||||
(error as any).name === 'APICallError' ||
|
||||
(error as any).name === 'LoadAPIKeyError'
|
||||
) {
|
||||
throw error;
|
||||
}
|
||||
|
||||
// Wrap other errors
|
||||
throw createAPICallError({
|
||||
message: `Grok CLI execution failed: ${error.message}`,
|
||||
code: error.code,
|
||||
message: `Grok CLI execution failed: ${(error as Error).message}`,
|
||||
code: (error as any).code,
|
||||
promptExcerpt: prompt.substring(0, 200),
|
||||
isRetryable: false
|
||||
});
|
||||
@@ -338,15 +396,39 @@ export class GrokCliLanguageModel {
|
||||
* Stream text using Grok CLI
|
||||
* Note: Grok CLI doesn't natively support streaming, so this simulates streaming
|
||||
* by generating the full response and then streaming it in chunks
|
||||
* @param {Object} options - Stream options
|
||||
* @returns {Promise<Object>}
|
||||
*/
|
||||
async doStream(options) {
|
||||
const warnings = this.generateUnsupportedWarnings(options);
|
||||
async doStream(options: LanguageModelV2CallOptions) {
|
||||
const prompt = createPromptFromMessages(options.prompt);
|
||||
const warnings = this.generateAllWarnings(options, prompt);
|
||||
|
||||
const stream = new ReadableStream({
|
||||
start: async (controller) => {
|
||||
let abortListener: (() => void) | undefined;
|
||||
|
||||
try {
|
||||
// Handle abort signal
|
||||
if (options.abortSignal?.aborted) {
|
||||
throw options.abortSignal.reason || new Error('Request aborted');
|
||||
}
|
||||
|
||||
// Set up abort listener
|
||||
if (options.abortSignal) {
|
||||
abortListener = () => {
|
||||
controller.enqueue({
|
||||
type: 'error',
|
||||
error:
|
||||
options.abortSignal?.reason || new Error('Request aborted')
|
||||
});
|
||||
controller.close();
|
||||
};
|
||||
options.abortSignal.addEventListener('abort', abortListener, {
|
||||
once: true
|
||||
});
|
||||
}
|
||||
|
||||
// Emit stream-start with warnings
|
||||
controller.enqueue({ type: 'stream-start', warnings });
|
||||
|
||||
// Generate the full response first
|
||||
const result = await this.doGenerate(options);
|
||||
|
||||
@@ -359,20 +441,48 @@ export class GrokCliLanguageModel {
|
||||
});
|
||||
|
||||
// Simulate streaming by chunking the text
|
||||
const text = result.text || '';
|
||||
const content = result.content || [];
|
||||
const text =
|
||||
content.length > 0 && content[0].type === 'text'
|
||||
? content[0].text
|
||||
: '';
|
||||
const chunkSize = 50; // Characters per chunk
|
||||
let textPartId: string | undefined;
|
||||
|
||||
// Emit text-start if we have content
|
||||
if (text.length > 0) {
|
||||
textPartId = generateId();
|
||||
controller.enqueue({
|
||||
type: 'text-start',
|
||||
id: textPartId
|
||||
});
|
||||
}
|
||||
|
||||
for (let i = 0; i < text.length; i += chunkSize) {
|
||||
// Check for abort during streaming
|
||||
if (options.abortSignal?.aborted) {
|
||||
throw options.abortSignal.reason || new Error('Request aborted');
|
||||
}
|
||||
|
||||
const chunk = text.slice(i, i + chunkSize);
|
||||
controller.enqueue({
|
||||
type: 'text-delta',
|
||||
textDelta: chunk
|
||||
id: textPartId!,
|
||||
delta: chunk
|
||||
});
|
||||
|
||||
// Add small delay to simulate streaming
|
||||
await new Promise((resolve) => setTimeout(resolve, 20));
|
||||
}
|
||||
|
||||
// Close text part if opened
|
||||
if (textPartId) {
|
||||
controller.enqueue({
|
||||
type: 'text-end',
|
||||
id: textPartId
|
||||
});
|
||||
}
|
||||
|
||||
// Emit finish event
|
||||
controller.enqueue({
|
||||
type: 'finish',
|
||||
@@ -388,19 +498,22 @@ export class GrokCliLanguageModel {
|
||||
error
|
||||
});
|
||||
controller.close();
|
||||
} finally {
|
||||
// Clean up abort listener
|
||||
if (options.abortSignal && abortListener) {
|
||||
options.abortSignal.removeEventListener('abort', abortListener);
|
||||
}
|
||||
}
|
||||
},
|
||||
cancel: () => {
|
||||
// Clean up if stream is cancelled
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
stream,
|
||||
rawCall: {
|
||||
rawPrompt: createPromptFromMessages(options.prompt),
|
||||
rawSettings: {}
|
||||
},
|
||||
warnings: warnings.length > 0 ? warnings : undefined,
|
||||
request: {
|
||||
body: createPromptFromMessages(options.prompt)
|
||||
body: prompt
|
||||
}
|
||||
};
|
||||
}
|
||||
121
packages/ai-sdk-provider-grok-cli/src/grok-cli-provider.test.ts
Normal file
121
packages/ai-sdk-provider-grok-cli/src/grok-cli-provider.test.ts
Normal file
@@ -0,0 +1,121 @@
|
||||
/**
|
||||
* Tests for Grok CLI provider
|
||||
*/
|
||||
|
||||
import { NoSuchModelError } from '@ai-sdk/provider';
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import { GrokCliLanguageModel } from './grok-cli-language-model.js';
|
||||
import { createGrokCli, grokCli } from './grok-cli-provider.js';
|
||||
|
||||
// Mock the GrokCliLanguageModel
|
||||
vi.mock('./grok-cli-language-model.js', () => ({
|
||||
GrokCliLanguageModel: vi.fn().mockImplementation((options) => ({
|
||||
modelId: options.id,
|
||||
settings: options.settings,
|
||||
provider: 'grok-cli'
|
||||
}))
|
||||
}));
|
||||
|
||||
describe('createGrokCli', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
it('should create a provider with default settings', () => {
|
||||
const provider = createGrokCli();
|
||||
expect(typeof provider).toBe('function');
|
||||
expect(typeof provider.languageModel).toBe('function');
|
||||
expect(typeof provider.chat).toBe('function');
|
||||
expect(typeof provider.textEmbeddingModel).toBe('function');
|
||||
expect(typeof provider.imageModel).toBe('function');
|
||||
});
|
||||
|
||||
it('should create a provider with custom default settings', () => {
|
||||
const defaultSettings = {
|
||||
timeout: 5000,
|
||||
workingDirectory: '/custom/path'
|
||||
};
|
||||
const provider = createGrokCli({ defaultSettings });
|
||||
|
||||
const model = provider('grok-2-mini');
|
||||
|
||||
expect(GrokCliLanguageModel).toHaveBeenCalledWith({
|
||||
id: 'grok-2-mini',
|
||||
settings: defaultSettings
|
||||
});
|
||||
});
|
||||
|
||||
it('should create language models with merged settings', () => {
|
||||
const defaultSettings = { timeout: 5000 };
|
||||
const provider = createGrokCli({ defaultSettings });
|
||||
|
||||
const modelSettings = { apiKey: 'test-key' };
|
||||
const model = provider('grok-2', modelSettings);
|
||||
|
||||
expect(GrokCliLanguageModel).toHaveBeenCalledWith({
|
||||
id: 'grok-2',
|
||||
settings: { timeout: 5000, apiKey: 'test-key' }
|
||||
});
|
||||
});
|
||||
|
||||
it('should create models via languageModel method', () => {
|
||||
const provider = createGrokCli();
|
||||
const model = provider.languageModel('grok-2-mini', { timeout: 1000 });
|
||||
|
||||
expect(GrokCliLanguageModel).toHaveBeenCalledWith({
|
||||
id: 'grok-2-mini',
|
||||
settings: { timeout: 1000 }
|
||||
});
|
||||
});
|
||||
|
||||
it('should create models via chat method (alias)', () => {
|
||||
const provider = createGrokCli();
|
||||
const model = provider.chat('grok-2');
|
||||
|
||||
expect(GrokCliLanguageModel).toHaveBeenCalledWith({
|
||||
id: 'grok-2',
|
||||
settings: {}
|
||||
});
|
||||
});
|
||||
|
||||
it('should throw error when called with new keyword', () => {
|
||||
const provider = createGrokCli();
|
||||
expect(() => {
|
||||
// @ts-expect-error - intentionally testing invalid usage
|
||||
new provider('grok-2');
|
||||
}).toThrow(
|
||||
'The Grok CLI model function cannot be called with the new keyword.'
|
||||
);
|
||||
});
|
||||
|
||||
it('should throw NoSuchModelError for textEmbeddingModel', () => {
|
||||
const provider = createGrokCli();
|
||||
expect(() => {
|
||||
provider.textEmbeddingModel('test-model');
|
||||
}).toThrow(NoSuchModelError);
|
||||
});
|
||||
|
||||
it('should throw NoSuchModelError for imageModel', () => {
|
||||
const provider = createGrokCli();
|
||||
expect(() => {
|
||||
provider.imageModel('test-model');
|
||||
}).toThrow(NoSuchModelError);
|
||||
});
|
||||
});
|
||||
|
||||
describe('default grokCli provider', () => {
|
||||
it('should be a pre-configured provider instance', () => {
|
||||
expect(typeof grokCli).toBe('function');
|
||||
expect(typeof grokCli.languageModel).toBe('function');
|
||||
expect(typeof grokCli.chat).toBe('function');
|
||||
});
|
||||
|
||||
it('should create models with default configuration', () => {
|
||||
const model = grokCli('grok-2-mini');
|
||||
|
||||
expect(GrokCliLanguageModel).toHaveBeenCalledWith({
|
||||
id: 'grok-2-mini',
|
||||
settings: {}
|
||||
});
|
||||
});
|
||||
});
|
||||
108
packages/ai-sdk-provider-grok-cli/src/grok-cli-provider.ts
Normal file
108
packages/ai-sdk-provider-grok-cli/src/grok-cli-provider.ts
Normal file
@@ -0,0 +1,108 @@
|
||||
/**
|
||||
* Grok CLI provider implementation for AI SDK v5
|
||||
*/
|
||||
|
||||
import type { LanguageModelV2, ProviderV2 } from '@ai-sdk/provider';
|
||||
import { NoSuchModelError } from '@ai-sdk/provider';
|
||||
import { GrokCliLanguageModel } from './grok-cli-language-model.js';
|
||||
import type { GrokCliModelId, GrokCliSettings } from './types.js';
|
||||
|
||||
/**
|
||||
* Grok CLI provider interface that extends the AI SDK's ProviderV2
|
||||
*/
|
||||
export interface GrokCliProvider extends ProviderV2 {
|
||||
/**
|
||||
* Creates a language model instance for the specified model ID.
|
||||
* This is a shorthand for calling `languageModel()`.
|
||||
*/
|
||||
(modelId: GrokCliModelId, settings?: GrokCliSettings): LanguageModelV2;
|
||||
|
||||
/**
|
||||
* Creates a language model instance for text generation.
|
||||
*/
|
||||
languageModel(
|
||||
modelId: GrokCliModelId,
|
||||
settings?: GrokCliSettings
|
||||
): LanguageModelV2;
|
||||
|
||||
/**
|
||||
* Alias for `languageModel()` to maintain compatibility with AI SDK patterns.
|
||||
*/
|
||||
chat(modelId: GrokCliModelId, settings?: GrokCliSettings): LanguageModelV2;
|
||||
|
||||
textEmbeddingModel(modelId: string): never;
|
||||
imageModel(modelId: string): never;
|
||||
}
|
||||
|
||||
/**
|
||||
* Configuration options for creating a Grok CLI provider instance
|
||||
*/
|
||||
export interface GrokCliProviderSettings {
|
||||
/**
|
||||
* Default settings to use for all models created by this provider.
|
||||
* Individual model settings will override these defaults.
|
||||
*/
|
||||
defaultSettings?: GrokCliSettings;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a Grok CLI provider instance with the specified configuration.
|
||||
* The provider can be used to create language models for interacting with Grok models.
|
||||
*/
|
||||
export function createGrokCli(
|
||||
options: GrokCliProviderSettings = {}
|
||||
): GrokCliProvider {
|
||||
const createModel = (
|
||||
modelId: GrokCliModelId,
|
||||
settings: GrokCliSettings = {}
|
||||
): LanguageModelV2 => {
|
||||
const mergedSettings = {
|
||||
...options.defaultSettings,
|
||||
...settings
|
||||
};
|
||||
|
||||
return new GrokCliLanguageModel({
|
||||
id: modelId,
|
||||
settings: mergedSettings
|
||||
});
|
||||
};
|
||||
|
||||
const provider = function (
|
||||
modelId: GrokCliModelId,
|
||||
settings?: GrokCliSettings
|
||||
) {
|
||||
if (new.target) {
|
||||
throw new Error(
|
||||
'The Grok CLI model function cannot be called with the new keyword.'
|
||||
);
|
||||
}
|
||||
|
||||
return createModel(modelId, settings);
|
||||
};
|
||||
|
||||
provider.languageModel = createModel;
|
||||
provider.chat = createModel; // Alias for languageModel
|
||||
|
||||
// Add textEmbeddingModel method that throws NoSuchModelError
|
||||
provider.textEmbeddingModel = (modelId: string) => {
|
||||
throw new NoSuchModelError({
|
||||
modelId,
|
||||
modelType: 'textEmbeddingModel'
|
||||
});
|
||||
};
|
||||
|
||||
provider.imageModel = (modelId: string) => {
|
||||
throw new NoSuchModelError({
|
||||
modelId,
|
||||
modelType: 'imageModel'
|
||||
});
|
||||
};
|
||||
|
||||
return provider as GrokCliProvider;
|
||||
}
|
||||
|
||||
/**
|
||||
* Default Grok CLI provider instance.
|
||||
* Pre-configured provider for quick usage without custom settings.
|
||||
*/
|
||||
export const grokCli = createGrokCli();
|
||||
64
packages/ai-sdk-provider-grok-cli/src/index.ts
Normal file
64
packages/ai-sdk-provider-grok-cli/src/index.ts
Normal file
@@ -0,0 +1,64 @@
|
||||
/**
|
||||
* Provider exports for creating and configuring Grok CLI instances.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Creates a new Grok CLI provider instance and the default provider instance.
|
||||
*/
|
||||
export { createGrokCli, grokCli } from './grok-cli-provider.js';
|
||||
|
||||
/**
|
||||
* Type definitions for the Grok CLI provider.
|
||||
*/
|
||||
export type {
|
||||
GrokCliProvider,
|
||||
GrokCliProviderSettings
|
||||
} from './grok-cli-provider.js';
|
||||
|
||||
/**
|
||||
* Language model implementation for Grok CLI.
|
||||
* This class implements the AI SDK's LanguageModelV2 interface.
|
||||
*/
|
||||
export { GrokCliLanguageModel } from './grok-cli-language-model.js';
|
||||
|
||||
/**
|
||||
* Type definitions for Grok CLI language models.
|
||||
*/
|
||||
export type {
|
||||
GrokCliModelId,
|
||||
GrokCliLanguageModelOptions,
|
||||
GrokCliSettings,
|
||||
GrokCliMessage,
|
||||
GrokCliResponse,
|
||||
GrokCliErrorMetadata
|
||||
} from './types.js';
|
||||
|
||||
/**
|
||||
* Error handling utilities for Grok CLI.
|
||||
* These functions help create and identify specific error types.
|
||||
*/
|
||||
export {
|
||||
isAuthenticationError,
|
||||
isTimeoutError,
|
||||
isInstallationError,
|
||||
getErrorMetadata,
|
||||
createAPICallError,
|
||||
createAuthenticationError,
|
||||
createTimeoutError,
|
||||
createInstallationError
|
||||
} from './errors.js';
|
||||
|
||||
/**
|
||||
* Message conversion utilities for Grok CLI communication.
|
||||
*/
|
||||
export {
|
||||
convertToGrokCliMessages,
|
||||
convertFromGrokCliResponse,
|
||||
createPromptFromMessages,
|
||||
escapeShellArg
|
||||
} from './message-converter.js';
|
||||
|
||||
/**
|
||||
* JSON extraction utilities for parsing Grok responses.
|
||||
*/
|
||||
export { extractJson } from './json-extractor.js';
|
||||
81
packages/ai-sdk-provider-grok-cli/src/json-extractor.test.ts
Normal file
81
packages/ai-sdk-provider-grok-cli/src/json-extractor.test.ts
Normal file
@@ -0,0 +1,81 @@
|
||||
/**
|
||||
* Tests for JSON extraction utilities
|
||||
*/
|
||||
|
||||
import { describe, expect, it } from 'vitest';
|
||||
import { extractJson } from './json-extractor.js';
|
||||
|
||||
describe('extractJson', () => {
|
||||
it('should extract JSON from markdown code blocks', () => {
|
||||
const text = '```json\n{"name": "test", "value": 42}\n```';
|
||||
const result = extractJson(text);
|
||||
expect(JSON.parse(result)).toEqual({ name: 'test', value: 42 });
|
||||
});
|
||||
|
||||
it('should extract JSON from generic code blocks', () => {
|
||||
const text = '```\n{"name": "test", "value": 42}\n```';
|
||||
const result = extractJson(text);
|
||||
expect(JSON.parse(result)).toEqual({ name: 'test', value: 42 });
|
||||
});
|
||||
|
||||
it('should remove JavaScript variable declarations', () => {
|
||||
const text = 'const result = {"name": "test", "value": 42};';
|
||||
const result = extractJson(text);
|
||||
expect(JSON.parse(result)).toEqual({ name: 'test', value: 42 });
|
||||
});
|
||||
|
||||
it('should handle let variable declarations', () => {
|
||||
const text = 'let data = {"name": "test", "value": 42};';
|
||||
const result = extractJson(text);
|
||||
expect(JSON.parse(result)).toEqual({ name: 'test', value: 42 });
|
||||
});
|
||||
|
||||
it('should handle var variable declarations', () => {
|
||||
const text = 'var config = {"name": "test", "value": 42};';
|
||||
const result = extractJson(text);
|
||||
expect(JSON.parse(result)).toEqual({ name: 'test', value: 42 });
|
||||
});
|
||||
|
||||
it('should extract JSON arrays', () => {
|
||||
const text = '[{"name": "test1"}, {"name": "test2"}]';
|
||||
const result = extractJson(text);
|
||||
expect(JSON.parse(result)).toEqual([{ name: 'test1' }, { name: 'test2' }]);
|
||||
});
|
||||
|
||||
it('should convert JavaScript object literals to JSON', () => {
|
||||
const text = "{name: 'test', value: 42}";
|
||||
const result = extractJson(text);
|
||||
expect(JSON.parse(result)).toEqual({ name: 'test', value: 42 });
|
||||
});
|
||||
|
||||
it('should return valid JSON (canonical formatting)', () => {
|
||||
const text = '{"name": "test", "value": 42}';
|
||||
const result = extractJson(text);
|
||||
expect(JSON.parse(result)).toEqual({ name: 'test', value: 42 });
|
||||
});
|
||||
|
||||
it('should return original text when JSON parsing fails completely', () => {
|
||||
const text = 'This is not JSON at all';
|
||||
const result = extractJson(text);
|
||||
expect(result).toBe('This is not JSON at all');
|
||||
});
|
||||
|
||||
it('should handle complex nested objects', () => {
|
||||
const text =
|
||||
'```json\n{\n "user": {\n "name": "John",\n "age": 30\n },\n "items": [1, 2, 3]\n}\n```';
|
||||
const result = extractJson(text);
|
||||
expect(JSON.parse(result)).toEqual({
|
||||
user: {
|
||||
name: 'John',
|
||||
age: 30
|
||||
},
|
||||
items: [1, 2, 3]
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle mixed quotes in object literals', () => {
|
||||
const text = `{name: "test", value: 'mixed quotes'}`;
|
||||
const result = extractJson(text);
|
||||
expect(JSON.parse(result)).toEqual({ name: 'test', value: 'mixed quotes' });
|
||||
});
|
||||
});
|
||||
132
packages/ai-sdk-provider-grok-cli/src/json-extractor.ts
Normal file
132
packages/ai-sdk-provider-grok-cli/src/json-extractor.ts
Normal file
@@ -0,0 +1,132 @@
|
||||
/**
|
||||
* Extract JSON from AI's response using a tolerant parser.
|
||||
*
|
||||
* The function removes common wrappers such as markdown fences or variable
|
||||
* declarations and then attempts to parse the remaining text with
|
||||
* `jsonc-parser`. If valid JSON (or JSONC) can be parsed, it is returned as a
|
||||
* string via `JSON.stringify`. Otherwise the original text is returned.
|
||||
*
|
||||
* @param text - Raw text which may contain JSON
|
||||
* @returns A valid JSON string if extraction succeeds, otherwise the original text
|
||||
*/
|
||||
import { parse, type ParseError } from 'jsonc-parser';
|
||||
|
||||
export function extractJson(text: string): string {
|
||||
let content = text.trim();
|
||||
|
||||
// Strip ```json or ``` fences
|
||||
const fenceMatch = /```(?:json)?\s*([\s\S]*?)\s*```/i.exec(content);
|
||||
if (fenceMatch) {
|
||||
content = fenceMatch[1];
|
||||
}
|
||||
|
||||
// Strip variable declarations like `const foo =` or `let foo =`
|
||||
const varMatch = /^\s*(?:const|let|var)\s+\w+\s*=\s*([\s\S]*)/i.exec(content);
|
||||
if (varMatch) {
|
||||
content = varMatch[1];
|
||||
// Remove trailing semicolon if present
|
||||
if (content.trim().endsWith(';')) {
|
||||
content = content.trim().slice(0, -1);
|
||||
}
|
||||
}
|
||||
|
||||
// Find the first opening bracket
|
||||
const firstObj = content.indexOf('{');
|
||||
const firstArr = content.indexOf('[');
|
||||
if (firstObj === -1 && firstArr === -1) {
|
||||
return text;
|
||||
}
|
||||
const start =
|
||||
firstArr === -1
|
||||
? firstObj
|
||||
: firstObj === -1
|
||||
? firstArr
|
||||
: Math.min(firstObj, firstArr);
|
||||
content = content.slice(start);
|
||||
|
||||
// Try to parse the entire string with jsonc-parser
|
||||
const tryParse = (value: string): string | undefined => {
|
||||
const errors: ParseError[] = [];
|
||||
try {
|
||||
const result = parse(value, errors, { allowTrailingComma: true });
|
||||
if (errors.length === 0) {
|
||||
return JSON.stringify(result, null, 2);
|
||||
}
|
||||
} catch {
|
||||
// ignore
|
||||
}
|
||||
return undefined;
|
||||
};
|
||||
|
||||
const parsed = tryParse(content);
|
||||
if (parsed !== undefined) {
|
||||
return parsed;
|
||||
}
|
||||
|
||||
// If parsing the full string failed, use a more efficient approach
|
||||
// to find valid JSON boundaries
|
||||
const openChar = content[0];
|
||||
const closeChar = openChar === '{' ? '}' : ']';
|
||||
|
||||
// Find all potential closing positions by tracking nesting depth
|
||||
const closingPositions: number[] = [];
|
||||
let depth = 0;
|
||||
let inString = false;
|
||||
let escapeNext = false;
|
||||
|
||||
for (let i = 0; i < content.length; i++) {
|
||||
const char = content[i];
|
||||
|
||||
if (escapeNext) {
|
||||
escapeNext = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (char === '\\') {
|
||||
escapeNext = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (char === '"' && !inString) {
|
||||
inString = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (char === '"' && inString) {
|
||||
inString = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip content inside strings
|
||||
if (inString) continue;
|
||||
|
||||
if (char === openChar) {
|
||||
depth++;
|
||||
} else if (char === closeChar) {
|
||||
depth--;
|
||||
if (depth === 0) {
|
||||
closingPositions.push(i + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try parsing at each valid closing position, starting from the end
|
||||
for (let i = closingPositions.length - 1; i >= 0; i--) {
|
||||
const attempt = tryParse(content.slice(0, closingPositions[i]));
|
||||
if (attempt !== undefined) {
|
||||
return attempt;
|
||||
}
|
||||
}
|
||||
|
||||
// As a final fallback, try the original character-by-character approach
|
||||
// but only for the last 1000 characters to limit performance impact
|
||||
const searchStart = Math.max(0, content.length - 1000);
|
||||
for (let end = content.length - 1; end > searchStart; end--) {
|
||||
const attempt = tryParse(content.slice(0, end));
|
||||
if (attempt !== undefined) {
|
||||
return attempt;
|
||||
}
|
||||
}
|
||||
|
||||
return text;
|
||||
}
|
||||
163
packages/ai-sdk-provider-grok-cli/src/message-converter.test.ts
Normal file
163
packages/ai-sdk-provider-grok-cli/src/message-converter.test.ts
Normal file
@@ -0,0 +1,163 @@
|
||||
/**
|
||||
* Tests for message conversion utilities
|
||||
*/
|
||||
|
||||
import { describe, expect, it } from 'vitest';
|
||||
import {
|
||||
convertFromGrokCliResponse,
|
||||
convertToGrokCliMessages,
|
||||
createPromptFromMessages,
|
||||
escapeShellArg
|
||||
} from './message-converter.js';
|
||||
|
||||
describe('convertToGrokCliMessages', () => {
|
||||
it('should convert string content messages', () => {
|
||||
const messages = [
|
||||
{ role: 'user', content: 'Hello, world!' },
|
||||
{ role: 'assistant', content: 'Hi there!' }
|
||||
];
|
||||
|
||||
const result = convertToGrokCliMessages(messages);
|
||||
|
||||
expect(result).toEqual([
|
||||
{ role: 'user', content: 'Hello, world!' },
|
||||
{ role: 'assistant', content: 'Hi there!' }
|
||||
]);
|
||||
});
|
||||
|
||||
it('should convert array content messages', () => {
|
||||
const messages = [
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Hello' },
|
||||
{ type: 'text', text: 'World' }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
const result = convertToGrokCliMessages(messages);
|
||||
|
||||
expect(result).toEqual([{ role: 'user', content: 'Hello\nWorld' }]);
|
||||
});
|
||||
|
||||
it('should convert object content messages', () => {
|
||||
const messages = [
|
||||
{
|
||||
role: 'user',
|
||||
content: { text: 'Hello from object' }
|
||||
}
|
||||
];
|
||||
|
||||
const result = convertToGrokCliMessages(messages);
|
||||
|
||||
expect(result).toEqual([{ role: 'user', content: 'Hello from object' }]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('convertFromGrokCliResponse', () => {
|
||||
it('should parse JSONL response format', () => {
|
||||
const responseText = `{"role": "assistant", "content": "Hello there!", "usage": {"prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15}}`;
|
||||
|
||||
const result = convertFromGrokCliResponse(responseText);
|
||||
|
||||
expect(result).toEqual({
|
||||
text: 'Hello there!',
|
||||
usage: {
|
||||
promptTokens: 10,
|
||||
completionTokens: 5,
|
||||
totalTokens: 15
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle multiple lines in JSONL format', () => {
|
||||
const responseText = `{"role": "user", "content": "Hello"}
|
||||
{"role": "assistant", "content": "Hi there!", "usage": {"prompt_tokens": 5, "completion_tokens": 3}}`;
|
||||
|
||||
const result = convertFromGrokCliResponse(responseText);
|
||||
|
||||
expect(result).toEqual({
|
||||
text: 'Hi there!',
|
||||
usage: {
|
||||
promptTokens: 5,
|
||||
completionTokens: 3,
|
||||
totalTokens: 0
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
it('should fallback to raw text when parsing fails', () => {
|
||||
const responseText = 'Invalid JSON response';
|
||||
|
||||
const result = convertFromGrokCliResponse(responseText);
|
||||
|
||||
expect(result).toEqual({
|
||||
text: 'Invalid JSON response',
|
||||
usage: undefined
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('createPromptFromMessages', () => {
|
||||
it('should create formatted prompt from messages', () => {
|
||||
const messages = [
|
||||
{ role: 'system', content: 'You are a helpful assistant.' },
|
||||
{ role: 'user', content: 'What is 2+2?' },
|
||||
{ role: 'assistant', content: '2+2 equals 4.' }
|
||||
];
|
||||
|
||||
const result = createPromptFromMessages(messages);
|
||||
|
||||
expect(result).toBe(
|
||||
'System: You are a helpful assistant.\n\nUser: What is 2+2?\n\nAssistant: 2+2 equals 4.'
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle custom role names', () => {
|
||||
const messages = [{ role: 'custom', content: 'Custom message' }];
|
||||
|
||||
const result = createPromptFromMessages(messages);
|
||||
|
||||
expect(result).toBe('custom: Custom message');
|
||||
});
|
||||
|
||||
it('should trim whitespace from message content', () => {
|
||||
const messages = [
|
||||
{ role: 'user', content: ' Hello with spaces ' },
|
||||
{ role: 'assistant', content: '\n\nResponse with newlines\n\n' }
|
||||
];
|
||||
|
||||
const result = createPromptFromMessages(messages);
|
||||
|
||||
expect(result).toBe(
|
||||
'User: Hello with spaces\n\nAssistant: Response with newlines'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('escapeShellArg', () => {
|
||||
it('should escape single quotes', () => {
|
||||
const arg = "It's a test";
|
||||
const result = escapeShellArg(arg);
|
||||
expect(result).toBe("'It'\\''s a test'");
|
||||
});
|
||||
|
||||
it('should handle strings without special characters', () => {
|
||||
const arg = 'simple string';
|
||||
const result = escapeShellArg(arg);
|
||||
expect(result).toBe("'simple string'");
|
||||
});
|
||||
|
||||
it('should convert non-string values to strings', () => {
|
||||
const arg = 123;
|
||||
const result = escapeShellArg(arg);
|
||||
expect(result).toBe("'123'");
|
||||
});
|
||||
|
||||
it('should handle empty strings', () => {
|
||||
const arg = '';
|
||||
const result = escapeShellArg(arg);
|
||||
expect(result).toBe("''");
|
||||
});
|
||||
});
|
||||
@@ -1,17 +1,28 @@
|
||||
/**
|
||||
* @fileoverview Message format conversion utilities for Grok CLI provider
|
||||
* Message format conversion utilities for Grok CLI provider
|
||||
*/
|
||||
|
||||
import type { GrokCliMessage, GrokCliResponse } from './types.js';
|
||||
|
||||
/**
|
||||
* @typedef {import('./types.js').GrokCliMessage} GrokCliMessage
|
||||
* AI SDK message type (simplified interface)
|
||||
*/
|
||||
interface AISDKMessage {
|
||||
role: string;
|
||||
content:
|
||||
| string
|
||||
| Array<{ type: string; text?: string }>
|
||||
| { text?: string; [key: string]: unknown };
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert AI SDK messages to Grok CLI compatible format
|
||||
* @param {Array<Object>} messages - AI SDK message array
|
||||
* @returns {Array<GrokCliMessage>} Grok CLI compatible messages
|
||||
* @param messages - AI SDK message array
|
||||
* @returns Grok CLI compatible messages
|
||||
*/
|
||||
export function convertToGrokCliMessages(messages) {
|
||||
export function convertToGrokCliMessages(
|
||||
messages: AISDKMessage[]
|
||||
): GrokCliMessage[] {
|
||||
return messages.map((message) => {
|
||||
// Handle different message content types
|
||||
let content = '';
|
||||
@@ -22,7 +33,7 @@ export function convertToGrokCliMessages(messages) {
|
||||
// Handle multi-part content (text and images)
|
||||
content = message.content
|
||||
.filter((part) => part.type === 'text')
|
||||
.map((part) => part.text)
|
||||
.map((part) => part.text || '')
|
||||
.join('\n');
|
||||
} else if (message.content && typeof message.content === 'object') {
|
||||
// Handle object content
|
||||
@@ -38,10 +49,17 @@ export function convertToGrokCliMessages(messages) {
|
||||
|
||||
/**
|
||||
* Convert Grok CLI response to AI SDK format
|
||||
* @param {string} responseText - Raw response text from Grok CLI (JSONL format)
|
||||
* @returns {Object} AI SDK compatible response object
|
||||
* @param responseText - Raw response text from Grok CLI (JSONL format)
|
||||
* @returns AI SDK compatible response object
|
||||
*/
|
||||
export function convertFromGrokCliResponse(responseText) {
|
||||
export function convertFromGrokCliResponse(responseText: string): {
|
||||
text: string;
|
||||
usage?: {
|
||||
promptTokens: number;
|
||||
completionTokens: number;
|
||||
totalTokens: number;
|
||||
};
|
||||
} {
|
||||
try {
|
||||
// Grok CLI outputs JSONL format - each line is a separate JSON message
|
||||
const lines = responseText
|
||||
@@ -50,10 +68,10 @@ export function convertFromGrokCliResponse(responseText) {
|
||||
.filter((line) => line.trim());
|
||||
|
||||
// Parse each line as JSON and find assistant messages
|
||||
const messages = [];
|
||||
const messages: GrokCliResponse[] = [];
|
||||
for (const line of lines) {
|
||||
try {
|
||||
const message = JSON.parse(line);
|
||||
const message = JSON.parse(line) as GrokCliResponse;
|
||||
messages.push(message);
|
||||
} catch (parseError) {
|
||||
// Skip invalid JSON lines
|
||||
@@ -95,10 +113,10 @@ export function convertFromGrokCliResponse(responseText) {
|
||||
|
||||
/**
|
||||
* Create a prompt string for Grok CLI from messages
|
||||
* @param {Array<Object>} messages - AI SDK message array
|
||||
* @returns {string} Formatted prompt string
|
||||
* @param messages - AI SDK message array
|
||||
* @returns Formatted prompt string
|
||||
*/
|
||||
export function createPromptFromMessages(messages) {
|
||||
export function createPromptFromMessages(messages: AISDKMessage[]): string {
|
||||
const grokMessages = convertToGrokCliMessages(messages);
|
||||
|
||||
// Create a conversation-style prompt
|
||||
@@ -122,14 +140,14 @@ export function createPromptFromMessages(messages) {
|
||||
|
||||
/**
|
||||
* Escape shell arguments for safe CLI execution
|
||||
* @param {string} arg - Argument to escape
|
||||
* @returns {string} Shell-escaped argument
|
||||
* @param arg - Argument to escape
|
||||
* @returns Shell-escaped argument
|
||||
*/
|
||||
export function escapeShellArg(arg) {
|
||||
export function escapeShellArg(arg: string | unknown): string {
|
||||
if (typeof arg !== 'string') {
|
||||
arg = String(arg);
|
||||
}
|
||||
|
||||
// Replace single quotes with '\''
|
||||
return "'" + arg.replace(/'/g, "'\\''") + "'";
|
||||
return "'" + (arg as string).replace(/'/g, "'\\''") + "'";
|
||||
}
|
||||
81
packages/ai-sdk-provider-grok-cli/src/types.ts
Normal file
81
packages/ai-sdk-provider-grok-cli/src/types.ts
Normal file
@@ -0,0 +1,81 @@
|
||||
/**
|
||||
* Type definitions for Grok CLI provider
|
||||
*/
|
||||
|
||||
/**
|
||||
* Settings for configuring Grok CLI behavior
|
||||
*/
|
||||
export interface GrokCliSettings {
|
||||
/** API key for Grok CLI */
|
||||
apiKey?: string;
|
||||
/** Base URL for Grok API */
|
||||
baseURL?: string;
|
||||
/** Default model to use */
|
||||
model?: string;
|
||||
/** Timeout in milliseconds */
|
||||
timeout?: number;
|
||||
/** Working directory for CLI commands */
|
||||
workingDirectory?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Model identifiers supported by Grok CLI
|
||||
*/
|
||||
export type GrokCliModelId = string;
|
||||
|
||||
/**
|
||||
* Error metadata for Grok CLI operations
|
||||
*/
|
||||
export interface GrokCliErrorMetadata {
|
||||
/** Error code */
|
||||
code?: string;
|
||||
/** Process exit code */
|
||||
exitCode?: number;
|
||||
/** Standard error output */
|
||||
stderr?: string;
|
||||
/** Standard output */
|
||||
stdout?: string;
|
||||
/** Excerpt of the prompt that caused the error */
|
||||
promptExcerpt?: string;
|
||||
/** Timeout value in milliseconds */
|
||||
timeoutMs?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Message format for Grok CLI communication
|
||||
*/
|
||||
export interface GrokCliMessage {
|
||||
/** Message role (user, assistant, system) */
|
||||
role: string;
|
||||
/** Message content */
|
||||
content: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Response format from Grok CLI
|
||||
*/
|
||||
export interface GrokCliResponse {
|
||||
/** Message role */
|
||||
role: string;
|
||||
/** Response content */
|
||||
content: string;
|
||||
/** Token usage information */
|
||||
usage?: {
|
||||
/** Input tokens used */
|
||||
prompt_tokens?: number;
|
||||
/** Output tokens used */
|
||||
completion_tokens?: number;
|
||||
/** Total tokens used */
|
||||
total_tokens?: number;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Configuration options for Grok CLI language model
|
||||
*/
|
||||
export interface GrokCliLanguageModelOptions {
|
||||
/** Model identifier */
|
||||
id: GrokCliModelId;
|
||||
/** Model settings */
|
||||
settings?: GrokCliSettings;
|
||||
}
|
||||
36
packages/ai-sdk-provider-grok-cli/tsconfig.json
Normal file
36
packages/ai-sdk-provider-grok-cli/tsconfig.json
Normal file
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "ES2022",
|
||||
"module": "ESNext",
|
||||
"lib": ["ES2022"],
|
||||
"declaration": true,
|
||||
"declarationMap": true,
|
||||
"sourceMap": true,
|
||||
"outDir": "./dist",
|
||||
"baseUrl": ".",
|
||||
"rootDir": "./src",
|
||||
"strict": true,
|
||||
"noImplicitAny": true,
|
||||
"strictNullChecks": true,
|
||||
"strictFunctionTypes": true,
|
||||
"strictBindCallApply": true,
|
||||
"strictPropertyInitialization": true,
|
||||
"noImplicitThis": true,
|
||||
"alwaysStrict": true,
|
||||
"noUnusedLocals": true,
|
||||
"noUnusedParameters": true,
|
||||
"noImplicitReturns": true,
|
||||
"noFallthroughCasesInSwitch": true,
|
||||
"esModuleInterop": true,
|
||||
"skipLibCheck": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"moduleResolution": "bundler",
|
||||
"moduleDetection": "force",
|
||||
"types": ["node"],
|
||||
"resolveJsonModule": true,
|
||||
"isolatedModules": true,
|
||||
"allowImportingTsExtensions": false
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"exclude": ["node_modules", "dist", "tests", "**/*.test.ts", "**/*.spec.ts"]
|
||||
}
|
||||
@@ -20,8 +20,7 @@
|
||||
"typecheck": "tsc --noEmit"
|
||||
},
|
||||
"devDependencies": {
|
||||
"dotenv-mono": "^1.5.1",
|
||||
"typescript": "^5.7.3"
|
||||
"typescript": "^5.9.2"
|
||||
},
|
||||
"dependencies": {
|
||||
"tsup": "^8.5.0"
|
||||
|
||||
@@ -43,9 +43,9 @@ export const baseConfig: Partial<UserConfig> = {
|
||||
export function mergeConfig(
|
||||
base: Partial<UserConfig>,
|
||||
overrides: Partial<UserConfig>
|
||||
): Partial<UserConfig> {
|
||||
): UserConfig {
|
||||
return {
|
||||
...base,
|
||||
...overrides
|
||||
};
|
||||
} as UserConfig;
|
||||
}
|
||||
|
||||
@@ -31,21 +31,13 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@supabase/supabase-js": "^2.57.4",
|
||||
"zod": "^3.23.8"
|
||||
"zod": "^4.1.11"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@biomejs/biome": "^1.9.4",
|
||||
"@tm/build-config": "*",
|
||||
"@types/node": "^22.10.5",
|
||||
"@vitest/coverage-v8": "^2.0.5",
|
||||
"dotenv-mono": "^1.5.1",
|
||||
"ts-node": "^10.9.2",
|
||||
"tsup": "^8.5.0",
|
||||
"typescript": "^5.7.3",
|
||||
"vitest": "^2.1.8"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18.0.0"
|
||||
"@vitest/coverage-v8": "^3.2.4",
|
||||
"typescript": "^5.9.2",
|
||||
"vitest": "^3.2.4"
|
||||
},
|
||||
"files": ["src", "README.md", "CHANGELOG.md"],
|
||||
"keywords": ["task-management", "typescript", "ai", "prd", "parser"],
|
||||
|
||||
@@ -33,6 +33,9 @@ export class TaskEntity implements Task {
|
||||
tags?: string[];
|
||||
assignee?: string;
|
||||
complexity?: Task['complexity'];
|
||||
recommendedSubtasks?: number;
|
||||
expansionPrompt?: string;
|
||||
complexityReasoning?: string;
|
||||
|
||||
constructor(data: Task | (Omit<Task, 'id'> & { id: number | string })) {
|
||||
this.validate(data);
|
||||
@@ -50,7 +53,7 @@ export class TaskEntity implements Task {
|
||||
// Normalize subtask IDs to strings
|
||||
this.subtasks = (data.subtasks || []).map((subtask) => ({
|
||||
...subtask,
|
||||
id: Number(subtask.id), // Keep subtask IDs as numbers per interface
|
||||
id: String(subtask.id),
|
||||
parentId: String(subtask.parentId)
|
||||
}));
|
||||
|
||||
@@ -62,6 +65,9 @@ export class TaskEntity implements Task {
|
||||
this.tags = data.tags;
|
||||
this.assignee = data.assignee;
|
||||
this.complexity = data.complexity;
|
||||
this.recommendedSubtasks = data.recommendedSubtasks;
|
||||
this.expansionPrompt = data.expansionPrompt;
|
||||
this.complexityReasoning = data.complexityReasoning;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -246,7 +252,10 @@ export class TaskEntity implements Task {
|
||||
actualEffort: this.actualEffort,
|
||||
tags: this.tags,
|
||||
assignee: this.assignee,
|
||||
complexity: this.complexity
|
||||
complexity: this.complexity,
|
||||
recommendedSubtasks: this.recommendedSubtasks,
|
||||
expansionPrompt: this.expansionPrompt,
|
||||
complexityReasoning: this.complexityReasoning
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -51,7 +51,8 @@ export const ERROR_CODES = {
|
||||
INTERNAL_ERROR: 'INTERNAL_ERROR',
|
||||
INVALID_INPUT: 'INVALID_INPUT',
|
||||
NOT_IMPLEMENTED: 'NOT_IMPLEMENTED',
|
||||
UNKNOWN_ERROR: 'UNKNOWN_ERROR'
|
||||
UNKNOWN_ERROR: 'UNKNOWN_ERROR',
|
||||
NOT_FOUND: 'NOT_FOUND'
|
||||
} as const;
|
||||
|
||||
export type ErrorCode = (typeof ERROR_CODES)[keyof typeof ERROR_CODES];
|
||||
|
||||
@@ -11,7 +11,9 @@ export {
|
||||
type ListTasksResult,
|
||||
type StartTaskOptions,
|
||||
type StartTaskResult,
|
||||
type ConflictCheckResult
|
||||
type ConflictCheckResult,
|
||||
type ExportTasksOptions,
|
||||
type ExportResult
|
||||
} from './task-master-core.js';
|
||||
|
||||
// Re-export types
|
||||
@@ -61,3 +63,23 @@ export { getLogger, createLogger, setGlobalLogger } from './logger/index.js';
|
||||
|
||||
// Re-export executors
|
||||
export * from './executors/index.js';
|
||||
|
||||
// Re-export reports
|
||||
export {
|
||||
ComplexityReportManager,
|
||||
type ComplexityReport,
|
||||
type ComplexityReportMetadata,
|
||||
type ComplexityAnalysis,
|
||||
type TaskComplexityData
|
||||
} from './reports/index.js';
|
||||
|
||||
// Re-export services
|
||||
export {
|
||||
PreflightChecker,
|
||||
TaskLoaderService,
|
||||
type CheckResult,
|
||||
type PreflightResult,
|
||||
type TaskValidationResult,
|
||||
type ValidationErrorType,
|
||||
type DependencyIssue
|
||||
} from './services/index.js';
|
||||
|
||||
@@ -3,7 +3,27 @@
|
||||
* This file defines the contract for all storage implementations
|
||||
*/
|
||||
|
||||
import type { Task, TaskMetadata } from '../types/index.js';
|
||||
import type { Task, TaskMetadata, TaskStatus } from '../types/index.js';
|
||||
|
||||
/**
|
||||
* Options for loading tasks from storage
|
||||
*/
|
||||
export interface LoadTasksOptions {
|
||||
/** Filter tasks by status */
|
||||
status?: TaskStatus;
|
||||
/** Exclude subtasks from loaded tasks (default: false) */
|
||||
excludeSubtasks?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Result type for updateTaskStatus operations
|
||||
*/
|
||||
export interface UpdateStatusResult {
|
||||
success: boolean;
|
||||
oldStatus: TaskStatus;
|
||||
newStatus: TaskStatus;
|
||||
taskId: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Interface for storage operations on tasks
|
||||
@@ -11,11 +31,12 @@ import type { Task, TaskMetadata } from '../types/index.js';
|
||||
*/
|
||||
export interface IStorage {
|
||||
/**
|
||||
* Load all tasks from storage, optionally filtered by tag
|
||||
* Load all tasks from storage, optionally filtered by tag and other criteria
|
||||
* @param tag - Optional tag to filter tasks by
|
||||
* @param options - Optional filtering options (status, excludeSubtasks)
|
||||
* @returns Promise that resolves to an array of tasks
|
||||
*/
|
||||
loadTasks(tag?: string): Promise<Task[]>;
|
||||
loadTasks(tag?: string, options?: LoadTasksOptions): Promise<Task[]>;
|
||||
|
||||
/**
|
||||
* Load a single task by ID
|
||||
@@ -54,6 +75,19 @@ export interface IStorage {
|
||||
tag?: string
|
||||
): Promise<void>;
|
||||
|
||||
/**
|
||||
* Update task or subtask status by ID
|
||||
* @param taskId - ID of the task or subtask (e.g., "1" or "1.2")
|
||||
* @param newStatus - New status to set
|
||||
* @param tag - Optional tag context for the task
|
||||
* @returns Promise that resolves to update result with old and new status
|
||||
*/
|
||||
updateTaskStatus(
|
||||
taskId: string,
|
||||
newStatus: TaskStatus,
|
||||
tag?: string
|
||||
): Promise<UpdateStatusResult>;
|
||||
|
||||
/**
|
||||
* Delete a task by ID
|
||||
* @param taskId - ID of the task to delete
|
||||
@@ -182,7 +216,7 @@ export abstract class BaseStorage implements IStorage {
|
||||
}
|
||||
|
||||
// Abstract methods that must be implemented by concrete classes
|
||||
abstract loadTasks(tag?: string): Promise<Task[]>;
|
||||
abstract loadTasks(tag?: string, options?: LoadTasksOptions): Promise<Task[]>;
|
||||
abstract loadTask(taskId: string, tag?: string): Promise<Task | null>;
|
||||
abstract saveTasks(tasks: Task[], tag?: string): Promise<void>;
|
||||
abstract appendTasks(tasks: Task[], tag?: string): Promise<void>;
|
||||
@@ -191,6 +225,11 @@ export abstract class BaseStorage implements IStorage {
|
||||
updates: Partial<Task>,
|
||||
tag?: string
|
||||
): Promise<void>;
|
||||
abstract updateTaskStatus(
|
||||
taskId: string,
|
||||
newStatus: TaskStatus,
|
||||
tag?: string
|
||||
): Promise<UpdateStatusResult>;
|
||||
abstract deleteTask(taskId: string, tag?: string): Promise<void>;
|
||||
abstract exists(tag?: string): Promise<boolean>;
|
||||
abstract loadMetadata(tag?: string): Promise<TaskMetadata | null>;
|
||||
|
||||
148
packages/tm-core/src/mappers/TaskMapper.test.ts
Normal file
148
packages/tm-core/src/mappers/TaskMapper.test.ts
Normal file
@@ -0,0 +1,148 @@
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
import { TaskMapper } from './TaskMapper.js';
|
||||
import type { Tables } from '../types/database.types.js';
|
||||
|
||||
type TaskRow = Tables<'tasks'>;
|
||||
|
||||
describe('TaskMapper', () => {
|
||||
describe('extractMetadataField', () => {
|
||||
it('should extract string field from metadata', () => {
|
||||
const taskRow: TaskRow = {
|
||||
id: '123',
|
||||
display_id: '1',
|
||||
title: 'Test Task',
|
||||
description: 'Test description',
|
||||
status: 'todo',
|
||||
priority: 'medium',
|
||||
parent_task_id: null,
|
||||
subtask_position: 0,
|
||||
created_at: new Date().toISOString(),
|
||||
updated_at: new Date().toISOString(),
|
||||
metadata: {
|
||||
details: 'Some details',
|
||||
testStrategy: 'Test with unit tests'
|
||||
},
|
||||
complexity: null,
|
||||
assignee_id: null,
|
||||
estimated_hours: null,
|
||||
actual_hours: null,
|
||||
due_date: null,
|
||||
completed_at: null
|
||||
};
|
||||
|
||||
const task = TaskMapper.mapDatabaseTaskToTask(taskRow, [], new Map());
|
||||
|
||||
expect(task.details).toBe('Some details');
|
||||
expect(task.testStrategy).toBe('Test with unit tests');
|
||||
});
|
||||
|
||||
it('should use default value when metadata field is missing', () => {
|
||||
const taskRow: TaskRow = {
|
||||
id: '123',
|
||||
display_id: '1',
|
||||
title: 'Test Task',
|
||||
description: 'Test description',
|
||||
status: 'todo',
|
||||
priority: 'medium',
|
||||
parent_task_id: null,
|
||||
subtask_position: 0,
|
||||
created_at: new Date().toISOString(),
|
||||
updated_at: new Date().toISOString(),
|
||||
metadata: {},
|
||||
complexity: null,
|
||||
assignee_id: null,
|
||||
estimated_hours: null,
|
||||
actual_hours: null,
|
||||
due_date: null,
|
||||
completed_at: null
|
||||
};
|
||||
|
||||
const task = TaskMapper.mapDatabaseTaskToTask(taskRow, [], new Map());
|
||||
|
||||
expect(task.details).toBe('');
|
||||
expect(task.testStrategy).toBe('');
|
||||
});
|
||||
|
||||
it('should use default value when metadata is null', () => {
|
||||
const taskRow: TaskRow = {
|
||||
id: '123',
|
||||
display_id: '1',
|
||||
title: 'Test Task',
|
||||
description: 'Test description',
|
||||
status: 'todo',
|
||||
priority: 'medium',
|
||||
parent_task_id: null,
|
||||
subtask_position: 0,
|
||||
created_at: new Date().toISOString(),
|
||||
updated_at: new Date().toISOString(),
|
||||
metadata: null,
|
||||
complexity: null,
|
||||
assignee_id: null,
|
||||
estimated_hours: null,
|
||||
actual_hours: null,
|
||||
due_date: null,
|
||||
completed_at: null
|
||||
};
|
||||
|
||||
const task = TaskMapper.mapDatabaseTaskToTask(taskRow, [], new Map());
|
||||
|
||||
expect(task.details).toBe('');
|
||||
expect(task.testStrategy).toBe('');
|
||||
});
|
||||
|
||||
it('should use default value and warn when metadata field has wrong type', () => {
|
||||
const consoleWarnSpy = vi
|
||||
.spyOn(console, 'warn')
|
||||
.mockImplementation(() => {});
|
||||
|
||||
const taskRow: TaskRow = {
|
||||
id: '123',
|
||||
display_id: '1',
|
||||
title: 'Test Task',
|
||||
description: 'Test description',
|
||||
status: 'todo',
|
||||
priority: 'medium',
|
||||
parent_task_id: null,
|
||||
subtask_position: 0,
|
||||
created_at: new Date().toISOString(),
|
||||
updated_at: new Date().toISOString(),
|
||||
metadata: {
|
||||
details: 12345, // Wrong type: number instead of string
|
||||
testStrategy: ['test1', 'test2'] // Wrong type: array instead of string
|
||||
},
|
||||
complexity: null,
|
||||
assignee_id: null,
|
||||
estimated_hours: null,
|
||||
actual_hours: null,
|
||||
due_date: null,
|
||||
completed_at: null
|
||||
};
|
||||
|
||||
const task = TaskMapper.mapDatabaseTaskToTask(taskRow, [], new Map());
|
||||
|
||||
// Should use empty string defaults when type doesn't match
|
||||
expect(task.details).toBe('');
|
||||
expect(task.testStrategy).toBe('');
|
||||
|
||||
// Should have logged warnings
|
||||
expect(consoleWarnSpy).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Type mismatch in metadata field "details"')
|
||||
);
|
||||
expect(consoleWarnSpy).toHaveBeenCalledWith(
|
||||
expect.stringContaining(
|
||||
'Type mismatch in metadata field "testStrategy"'
|
||||
)
|
||||
);
|
||||
|
||||
consoleWarnSpy.mockRestore();
|
||||
});
|
||||
});
|
||||
|
||||
describe('mapStatus', () => {
|
||||
it('should map database status to internal status', () => {
|
||||
expect(TaskMapper.mapStatus('todo')).toBe('pending');
|
||||
expect(TaskMapper.mapStatus('in_progress')).toBe('in-progress');
|
||||
expect(TaskMapper.mapStatus('done')).toBe('done');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -2,22 +2,32 @@ import { Task, Subtask } from '../types/index.js';
|
||||
import { Database, Tables } from '../types/database.types.js';
|
||||
|
||||
type TaskRow = Tables<'tasks'>;
|
||||
type DependencyRow = Tables<'task_dependencies'>;
|
||||
|
||||
// Legacy type for backward compatibility
|
||||
type DependencyRow = Tables<'task_dependencies'> & {
|
||||
depends_on_task?: { display_id: string } | null;
|
||||
depends_on_task_id?: string;
|
||||
};
|
||||
|
||||
export class TaskMapper {
|
||||
/**
|
||||
* Maps database tasks to internal Task format
|
||||
* @param dbTasks - Array of tasks from database
|
||||
* @param dependencies - Either a Map of task_id to display_ids or legacy array format
|
||||
*/
|
||||
static mapDatabaseTasksToTasks(
|
||||
dbTasks: TaskRow[],
|
||||
dbDependencies: DependencyRow[]
|
||||
dependencies: Map<string, string[]> | DependencyRow[]
|
||||
): Task[] {
|
||||
if (!dbTasks || dbTasks.length === 0) {
|
||||
return [];
|
||||
}
|
||||
|
||||
// Group dependencies by task_id
|
||||
const dependenciesByTaskId = this.groupDependenciesByTaskId(dbDependencies);
|
||||
// Handle both Map and array formats for backward compatibility
|
||||
const dependenciesByTaskId =
|
||||
dependencies instanceof Map
|
||||
? dependencies
|
||||
: this.groupDependenciesByTaskId(dependencies);
|
||||
|
||||
// Separate parent tasks and subtasks
|
||||
const parentTasks = dbTasks.filter((t) => !t.parent_task_id);
|
||||
@@ -43,21 +53,23 @@ export class TaskMapper {
|
||||
): Task {
|
||||
// Map subtasks
|
||||
const subtasks: Subtask[] = dbSubtasks.map((subtask, index) => ({
|
||||
id: index + 1, // Use numeric ID for subtasks
|
||||
id: subtask.display_id || String(index + 1), // Use display_id if available (API storage), fallback to numeric (file storage)
|
||||
parentId: dbTask.id,
|
||||
title: subtask.title,
|
||||
description: subtask.description || '',
|
||||
status: this.mapStatus(subtask.status),
|
||||
priority: this.mapPriority(subtask.priority),
|
||||
dependencies: dependenciesByTaskId.get(subtask.id) || [],
|
||||
details: (subtask.metadata as any)?.details || '',
|
||||
testStrategy: (subtask.metadata as any)?.testStrategy || '',
|
||||
details: this.extractMetadataField(subtask.metadata, 'details', ''),
|
||||
testStrategy: this.extractMetadataField(
|
||||
subtask.metadata,
|
||||
'testStrategy',
|
||||
''
|
||||
),
|
||||
createdAt: subtask.created_at,
|
||||
updatedAt: subtask.updated_at,
|
||||
assignee: subtask.assignee_id || undefined,
|
||||
complexity: subtask.complexity
|
||||
? this.mapComplexityToInternal(subtask.complexity)
|
||||
: undefined
|
||||
complexity: subtask.complexity ?? undefined
|
||||
}));
|
||||
|
||||
return {
|
||||
@@ -67,22 +79,25 @@ export class TaskMapper {
|
||||
status: this.mapStatus(dbTask.status),
|
||||
priority: this.mapPriority(dbTask.priority),
|
||||
dependencies: dependenciesByTaskId.get(dbTask.id) || [],
|
||||
details: (dbTask.metadata as any)?.details || '',
|
||||
testStrategy: (dbTask.metadata as any)?.testStrategy || '',
|
||||
details: this.extractMetadataField(dbTask.metadata, 'details', ''),
|
||||
testStrategy: this.extractMetadataField(
|
||||
dbTask.metadata,
|
||||
'testStrategy',
|
||||
''
|
||||
),
|
||||
subtasks,
|
||||
createdAt: dbTask.created_at,
|
||||
updatedAt: dbTask.updated_at,
|
||||
assignee: dbTask.assignee_id || undefined,
|
||||
complexity: dbTask.complexity
|
||||
? this.mapComplexityToInternal(dbTask.complexity)
|
||||
: undefined,
|
||||
complexity: dbTask.complexity ?? undefined,
|
||||
effort: dbTask.estimated_hours || undefined,
|
||||
actualEffort: dbTask.actual_hours || undefined
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Groups dependencies by task ID
|
||||
* Groups dependencies by task ID (legacy method for backward compatibility)
|
||||
* @deprecated Use DependencyFetcher.fetchDependenciesWithDisplayIds instead
|
||||
*/
|
||||
private static groupDependenciesByTaskId(
|
||||
dependencies: DependencyRow[]
|
||||
@@ -92,7 +107,14 @@ export class TaskMapper {
|
||||
if (dependencies) {
|
||||
for (const dep of dependencies) {
|
||||
const deps = dependenciesByTaskId.get(dep.task_id) || [];
|
||||
deps.push(dep.depends_on_task_id);
|
||||
// Handle both old format (UUID string) and new format (object with display_id)
|
||||
const dependencyId =
|
||||
typeof dep.depends_on_task === 'object'
|
||||
? dep.depends_on_task?.display_id
|
||||
: dep.depends_on_task_id;
|
||||
if (dependencyId) {
|
||||
deps.push(dependencyId);
|
||||
}
|
||||
dependenciesByTaskId.set(dep.task_id, deps);
|
||||
}
|
||||
}
|
||||
@@ -157,14 +179,38 @@ export class TaskMapper {
|
||||
}
|
||||
|
||||
/**
|
||||
* Maps numeric complexity to descriptive complexity
|
||||
* Safely extracts a field from metadata JSON with runtime type validation
|
||||
* @param metadata The metadata object (could be null or any type)
|
||||
* @param field The field to extract
|
||||
* @param defaultValue Default value if field doesn't exist
|
||||
* @returns The extracted value if it matches the expected type, otherwise defaultValue
|
||||
*/
|
||||
private static mapComplexityToInternal(
|
||||
complexity: number
|
||||
): Task['complexity'] {
|
||||
if (complexity <= 2) return 'simple';
|
||||
if (complexity <= 5) return 'moderate';
|
||||
if (complexity <= 8) return 'complex';
|
||||
return 'very-complex';
|
||||
private static extractMetadataField<T>(
|
||||
metadata: unknown,
|
||||
field: string,
|
||||
defaultValue: T
|
||||
): T {
|
||||
if (!metadata || typeof metadata !== 'object') {
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
const value = (metadata as Record<string, unknown>)[field];
|
||||
|
||||
if (value === undefined) {
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
// Runtime type validation: ensure value matches the type of defaultValue
|
||||
const expectedType = typeof defaultValue;
|
||||
const actualType = typeof value;
|
||||
|
||||
if (expectedType !== actualType) {
|
||||
console.warn(
|
||||
`Type mismatch in metadata field "${field}": expected ${expectedType}, got ${actualType}. Using default value.`
|
||||
);
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
return value as T;
|
||||
}
|
||||
}
|
||||
|
||||
185
packages/tm-core/src/reports/complexity-report-manager.ts
Normal file
185
packages/tm-core/src/reports/complexity-report-manager.ts
Normal file
@@ -0,0 +1,185 @@
|
||||
/**
|
||||
* @fileoverview ComplexityReportManager - Handles loading and managing complexity analysis reports
|
||||
* Follows the same pattern as ConfigManager and AuthManager
|
||||
*/
|
||||
|
||||
import { promises as fs } from 'fs';
|
||||
import path from 'path';
|
||||
import type {
|
||||
ComplexityReport,
|
||||
ComplexityAnalysis,
|
||||
TaskComplexityData
|
||||
} from './types.js';
|
||||
import { getLogger } from '../logger/index.js';
|
||||
|
||||
const logger = getLogger('ComplexityReportManager');
|
||||
|
||||
/**
|
||||
* Manages complexity analysis reports
|
||||
* Handles loading, caching, and providing complexity data for tasks
|
||||
*/
|
||||
export class ComplexityReportManager {
|
||||
private projectRoot: string;
|
||||
private reportCache: Map<string, ComplexityReport> = new Map();
|
||||
|
||||
constructor(projectRoot: string) {
|
||||
this.projectRoot = projectRoot;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the path to the complexity report file for a given tag
|
||||
*/
|
||||
private getReportPath(tag?: string): string {
|
||||
const reportsDir = path.join(this.projectRoot, '.taskmaster', 'reports');
|
||||
const tagSuffix = tag && tag !== 'master' ? `_${tag}` : '';
|
||||
return path.join(reportsDir, `task-complexity-report${tagSuffix}.json`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Load complexity report for a given tag
|
||||
* Results are cached to avoid repeated file reads
|
||||
*/
|
||||
async loadReport(tag?: string): Promise<ComplexityReport | null> {
|
||||
const resolvedTag = tag || 'master';
|
||||
const cacheKey = resolvedTag;
|
||||
|
||||
// Check cache first
|
||||
if (this.reportCache.has(cacheKey)) {
|
||||
return this.reportCache.get(cacheKey)!;
|
||||
}
|
||||
|
||||
const reportPath = this.getReportPath(tag);
|
||||
|
||||
try {
|
||||
// Check if file exists
|
||||
await fs.access(reportPath);
|
||||
|
||||
// Read and parse the report
|
||||
const content = await fs.readFile(reportPath, 'utf-8');
|
||||
const report = JSON.parse(content) as ComplexityReport;
|
||||
|
||||
// Validate basic structure
|
||||
if (!report.meta || !Array.isArray(report.complexityAnalysis)) {
|
||||
logger.warn(
|
||||
`Invalid complexity report structure at ${reportPath}, ignoring`
|
||||
);
|
||||
return null;
|
||||
}
|
||||
|
||||
// Cache the report
|
||||
this.reportCache.set(cacheKey, report);
|
||||
|
||||
logger.debug(
|
||||
`Loaded complexity report for tag '${resolvedTag}' with ${report.complexityAnalysis.length} analyses`
|
||||
);
|
||||
|
||||
return report;
|
||||
} catch (error: any) {
|
||||
if (error.code === 'ENOENT') {
|
||||
// File doesn't exist - this is normal, not all projects have complexity reports
|
||||
logger.debug(`No complexity report found for tag '${resolvedTag}'`);
|
||||
return null;
|
||||
}
|
||||
|
||||
// Other errors (parsing, permissions, etc.)
|
||||
logger.warn(
|
||||
`Failed to load complexity report for tag '${resolvedTag}': ${error.message}`
|
||||
);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get complexity data for a specific task ID
|
||||
*/
|
||||
async getComplexityForTask(
|
||||
taskId: string | number,
|
||||
tag?: string
|
||||
): Promise<TaskComplexityData | null> {
|
||||
const report = await this.loadReport(tag);
|
||||
if (!report) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Find the analysis for this task
|
||||
const analysis = report.complexityAnalysis.find(
|
||||
(a) => String(a.taskId) === String(taskId)
|
||||
);
|
||||
|
||||
if (!analysis) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Convert to TaskComplexityData format
|
||||
return {
|
||||
complexityScore: analysis.complexityScore,
|
||||
recommendedSubtasks: analysis.recommendedSubtasks,
|
||||
expansionPrompt: analysis.expansionPrompt,
|
||||
complexityReasoning: analysis.complexityReasoning
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get complexity data for multiple tasks at once
|
||||
* More efficient than calling getComplexityForTask multiple times
|
||||
*/
|
||||
async getComplexityForTasks(
|
||||
taskIds: (string | number)[],
|
||||
tag?: string
|
||||
): Promise<Map<string, TaskComplexityData>> {
|
||||
const result = new Map<string, TaskComplexityData>();
|
||||
const report = await this.loadReport(tag);
|
||||
|
||||
if (!report) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Create a map for fast lookups
|
||||
const analysisMap = new Map<string, ComplexityAnalysis>();
|
||||
report.complexityAnalysis.forEach((analysis) => {
|
||||
analysisMap.set(String(analysis.taskId), analysis);
|
||||
});
|
||||
|
||||
// Map each task ID to its complexity data
|
||||
taskIds.forEach((taskId) => {
|
||||
const analysis = analysisMap.get(String(taskId));
|
||||
if (analysis) {
|
||||
result.set(String(taskId), {
|
||||
complexityScore: analysis.complexityScore,
|
||||
recommendedSubtasks: analysis.recommendedSubtasks,
|
||||
expansionPrompt: analysis.expansionPrompt,
|
||||
complexityReasoning: analysis.complexityReasoning
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear the report cache
|
||||
* @param tag - Specific tag to clear, or undefined to clear all cached reports
|
||||
* Useful when reports are regenerated or modified externally
|
||||
*/
|
||||
clearCache(tag?: string): void {
|
||||
if (tag) {
|
||||
this.reportCache.delete(tag);
|
||||
} else {
|
||||
// Clear all cached reports
|
||||
this.reportCache.clear();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a complexity report exists for a tag
|
||||
*/
|
||||
async hasReport(tag?: string): Promise<boolean> {
|
||||
const reportPath = this.getReportPath(tag);
|
||||
try {
|
||||
await fs.access(reportPath);
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
11
packages/tm-core/src/reports/index.ts
Normal file
11
packages/tm-core/src/reports/index.ts
Normal file
@@ -0,0 +1,11 @@
|
||||
/**
|
||||
* @fileoverview Reports module exports
|
||||
*/
|
||||
|
||||
export { ComplexityReportManager } from './complexity-report-manager.js';
|
||||
export type {
|
||||
ComplexityReport,
|
||||
ComplexityReportMetadata,
|
||||
ComplexityAnalysis,
|
||||
TaskComplexityData
|
||||
} from './types.js';
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user