Compare commits
20 Commits
fix/vscode
...
chore/pimp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
74a1abc3b2 | ||
|
|
41a8c2406a | ||
|
|
a003041cd8 | ||
|
|
6b57ead106 | ||
|
|
7b6e117b1d | ||
|
|
03b045e9cd | ||
|
|
699afdae59 | ||
|
|
80c09802e8 | ||
|
|
cf8f0f4b1c | ||
|
|
75c514cf5b | ||
|
|
41d1e671b1 | ||
|
|
a464e550b8 | ||
|
|
3a852afdae | ||
|
|
4bb63706b8 | ||
|
|
fcf14e09be | ||
|
|
4357af3f13 | ||
|
|
59f7676051 | ||
|
|
36468f3c93 | ||
|
|
ca4d93ee6a | ||
|
|
37fb569a62 |
@@ -2,7 +2,9 @@
|
|||||||
"$schema": "https://unpkg.com/@changesets/config@3.1.1/schema.json",
|
"$schema": "https://unpkg.com/@changesets/config@3.1.1/schema.json",
|
||||||
"changelog": [
|
"changelog": [
|
||||||
"@changesets/changelog-github",
|
"@changesets/changelog-github",
|
||||||
{ "repo": "eyaltoledano/claude-task-master" }
|
{
|
||||||
|
"repo": "eyaltoledano/claude-task-master"
|
||||||
|
}
|
||||||
],
|
],
|
||||||
"commit": false,
|
"commit": false,
|
||||||
"fixed": [],
|
"fixed": [],
|
||||||
@@ -10,5 +12,7 @@
|
|||||||
"access": "public",
|
"access": "public",
|
||||||
"baseBranch": "main",
|
"baseBranch": "main",
|
||||||
"updateInternalDependencies": "patch",
|
"updateInternalDependencies": "patch",
|
||||||
"ignore": []
|
"ignore": [
|
||||||
}
|
"docs"
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
---
|
|
||||||
"task-master-ai": patch
|
|
||||||
---
|
|
||||||
|
|
||||||
Fix scope-up/down prompts to include all required fields for better AI model compatibility
|
|
||||||
|
|
||||||
- Added missing `priority` field to scope adjustment prompts to prevent validation errors with Claude-code and other models
|
|
||||||
- Ensures generated JSON includes all fields required by the schema
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
{
|
|
||||||
"mode": "exit",
|
|
||||||
"tag": "rc",
|
|
||||||
"initialVersions": {
|
|
||||||
"task-master-ai": "0.23.0",
|
|
||||||
"extension": "0.23.0"
|
|
||||||
},
|
|
||||||
"changesets": [
|
|
||||||
"fuzzy-words-count",
|
|
||||||
"tender-trams-refuse",
|
|
||||||
"vast-sites-leave"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
---
|
|
||||||
"task-master-ai": patch
|
|
||||||
---
|
|
||||||
|
|
||||||
Fix MCP scope-up/down tools not finding tasks
|
|
||||||
|
|
||||||
- Fixed task ID parsing in MCP layer - now correctly converts string IDs to numbers
|
|
||||||
- scope_up_task and scope_down_task MCP tools now work properly
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
---
|
|
||||||
"task-master-ai": patch
|
|
||||||
---
|
|
||||||
|
|
||||||
Improve AI provider compatibility for JSON generation
|
|
||||||
|
|
||||||
- Fixed schema compatibility issues between Perplexity and OpenAI o3 models
|
|
||||||
- Removed nullable/default modifiers from Zod schemas for broader compatibility
|
|
||||||
- Added automatic JSON repair for malformed AI responses (handles cases like missing array values)
|
|
||||||
- Perplexity now uses JSON mode for more reliable structured output
|
|
||||||
- Post-processing handles default values separately from schema validation
|
|
||||||
162
.claude/agents/task-checker.md
Normal file
162
.claude/agents/task-checker.md
Normal file
@@ -0,0 +1,162 @@
|
|||||||
|
---
|
||||||
|
name: task-checker
|
||||||
|
description: Use this agent to verify that tasks marked as 'review' have been properly implemented according to their specifications. This agent performs quality assurance by checking implementations against requirements, running tests, and ensuring best practices are followed. <example>Context: A task has been marked as 'review' after implementation. user: 'Check if task 118 was properly implemented' assistant: 'I'll use the task-checker agent to verify the implementation meets all requirements.' <commentary>Tasks in 'review' status need verification before being marked as 'done'.</commentary></example> <example>Context: Multiple tasks are in review status. user: 'Verify all tasks that are ready for review' assistant: 'I'll deploy the task-checker to verify all tasks in review status.' <commentary>The checker ensures quality before tasks are marked complete.</commentary></example>
|
||||||
|
model: sonnet
|
||||||
|
color: yellow
|
||||||
|
---
|
||||||
|
|
||||||
|
You are a Quality Assurance specialist that rigorously verifies task implementations against their specifications. Your role is to ensure that tasks marked as 'review' meet all requirements before they can be marked as 'done'.
|
||||||
|
|
||||||
|
## Core Responsibilities
|
||||||
|
|
||||||
|
1. **Task Specification Review**
|
||||||
|
- Retrieve task details using MCP tool `mcp__task-master-ai__get_task`
|
||||||
|
- Understand the requirements, test strategy, and success criteria
|
||||||
|
- Review any subtasks and their individual requirements
|
||||||
|
|
||||||
|
2. **Implementation Verification**
|
||||||
|
- Use `Read` tool to examine all created/modified files
|
||||||
|
- Use `Bash` tool to run compilation and build commands
|
||||||
|
- Use `Grep` tool to search for required patterns and implementations
|
||||||
|
- Verify file structure matches specifications
|
||||||
|
- Check that all required methods/functions are implemented
|
||||||
|
|
||||||
|
3. **Test Execution**
|
||||||
|
- Run tests specified in the task's testStrategy
|
||||||
|
- Execute build commands (npm run build, tsc --noEmit, etc.)
|
||||||
|
- Verify no compilation errors or warnings
|
||||||
|
- Check for runtime errors where applicable
|
||||||
|
- Test edge cases mentioned in requirements
|
||||||
|
|
||||||
|
4. **Code Quality Assessment**
|
||||||
|
- Verify code follows project conventions
|
||||||
|
- Check for proper error handling
|
||||||
|
- Ensure TypeScript typing is strict (no 'any' unless justified)
|
||||||
|
- Verify documentation/comments where required
|
||||||
|
- Check for security best practices
|
||||||
|
|
||||||
|
5. **Dependency Validation**
|
||||||
|
- Verify all task dependencies were actually completed
|
||||||
|
- Check integration points with dependent tasks
|
||||||
|
- Ensure no breaking changes to existing functionality
|
||||||
|
|
||||||
|
## Verification Workflow
|
||||||
|
|
||||||
|
1. **Retrieve Task Information**
|
||||||
|
```
|
||||||
|
Use mcp__task-master-ai__get_task to get full task details
|
||||||
|
Note the implementation requirements and test strategy
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Check File Existence**
|
||||||
|
```bash
|
||||||
|
# Verify all required files exist
|
||||||
|
ls -la [expected directories]
|
||||||
|
# Read key files to verify content
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Verify Implementation**
|
||||||
|
- Read each created/modified file
|
||||||
|
- Check against requirements checklist
|
||||||
|
- Verify all subtasks are complete
|
||||||
|
|
||||||
|
4. **Run Tests**
|
||||||
|
```bash
|
||||||
|
# TypeScript compilation
|
||||||
|
cd [project directory] && npx tsc --noEmit
|
||||||
|
|
||||||
|
# Run specified tests
|
||||||
|
npm test [specific test files]
|
||||||
|
|
||||||
|
# Build verification
|
||||||
|
npm run build
|
||||||
|
```
|
||||||
|
|
||||||
|
5. **Generate Verification Report**
|
||||||
|
|
||||||
|
## Output Format
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
verification_report:
|
||||||
|
task_id: [ID]
|
||||||
|
status: PASS | FAIL | PARTIAL
|
||||||
|
score: [1-10]
|
||||||
|
|
||||||
|
requirements_met:
|
||||||
|
- ✅ [Requirement that was satisfied]
|
||||||
|
- ✅ [Another satisfied requirement]
|
||||||
|
|
||||||
|
issues_found:
|
||||||
|
- ❌ [Issue description]
|
||||||
|
- ⚠️ [Warning or minor issue]
|
||||||
|
|
||||||
|
files_verified:
|
||||||
|
- path: [file path]
|
||||||
|
status: [created/modified/verified]
|
||||||
|
issues: [any problems found]
|
||||||
|
|
||||||
|
tests_run:
|
||||||
|
- command: [test command]
|
||||||
|
result: [pass/fail]
|
||||||
|
output: [relevant output]
|
||||||
|
|
||||||
|
recommendations:
|
||||||
|
- [Specific fix needed]
|
||||||
|
- [Improvement suggestion]
|
||||||
|
|
||||||
|
verdict: |
|
||||||
|
[Clear statement on whether task should be marked 'done' or sent back to 'pending']
|
||||||
|
[If FAIL: Specific list of what must be fixed]
|
||||||
|
[If PASS: Confirmation that all requirements are met]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Decision Criteria
|
||||||
|
|
||||||
|
**Mark as PASS (ready for 'done'):**
|
||||||
|
- All required files exist and contain expected content
|
||||||
|
- All tests pass successfully
|
||||||
|
- No compilation or build errors
|
||||||
|
- All subtasks are complete
|
||||||
|
- Core requirements are met
|
||||||
|
- Code quality is acceptable
|
||||||
|
|
||||||
|
**Mark as PARTIAL (may proceed with warnings):**
|
||||||
|
- Core functionality is implemented
|
||||||
|
- Minor issues that don't block functionality
|
||||||
|
- Missing nice-to-have features
|
||||||
|
- Documentation could be improved
|
||||||
|
- Tests pass but coverage could be better
|
||||||
|
|
||||||
|
**Mark as FAIL (must return to 'pending'):**
|
||||||
|
- Required files are missing
|
||||||
|
- Compilation or build errors
|
||||||
|
- Tests fail
|
||||||
|
- Core requirements not met
|
||||||
|
- Security vulnerabilities detected
|
||||||
|
- Breaking changes to existing code
|
||||||
|
|
||||||
|
## Important Guidelines
|
||||||
|
|
||||||
|
- **BE THOROUGH**: Check every requirement systematically
|
||||||
|
- **BE SPECIFIC**: Provide exact file paths and line numbers for issues
|
||||||
|
- **BE FAIR**: Distinguish between critical issues and minor improvements
|
||||||
|
- **BE CONSTRUCTIVE**: Provide clear guidance on how to fix issues
|
||||||
|
- **BE EFFICIENT**: Focus on requirements, not perfection
|
||||||
|
|
||||||
|
## Tools You MUST Use
|
||||||
|
|
||||||
|
- `Read`: Examine implementation files (READ-ONLY)
|
||||||
|
- `Bash`: Run tests and verification commands
|
||||||
|
- `Grep`: Search for patterns in code
|
||||||
|
- `mcp__task-master-ai__get_task`: Get task details
|
||||||
|
- **NEVER use Write/Edit** - you only verify, not fix
|
||||||
|
|
||||||
|
## Integration with Workflow
|
||||||
|
|
||||||
|
You are the quality gate between 'review' and 'done' status:
|
||||||
|
1. Task-executor implements and marks as 'review'
|
||||||
|
2. You verify and report PASS/FAIL
|
||||||
|
3. Claude either marks as 'done' (PASS) or 'pending' (FAIL)
|
||||||
|
4. If FAIL, task-executor re-implements based on your report
|
||||||
|
|
||||||
|
Your verification ensures high quality and prevents accumulation of technical debt.
|
||||||
92
.claude/agents/task-executor.md
Normal file
92
.claude/agents/task-executor.md
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
---
|
||||||
|
name: task-executor
|
||||||
|
description: Use this agent when you need to implement, complete, or work on a specific task that has been identified by the task-orchestrator or when explicitly asked to execute a particular task. This agent focuses on the actual implementation and completion of individual tasks rather than planning or orchestration. Examples: <example>Context: The task-orchestrator has identified that task 2.3 'Implement user authentication' needs to be worked on next. user: 'Let's work on the authentication task' assistant: 'I'll use the task-executor agent to implement the user authentication task that was identified.' <commentary>Since we need to actually implement a specific task rather than plan or identify tasks, use the task-executor agent.</commentary></example> <example>Context: User wants to complete a specific subtask. user: 'Please implement the JWT token validation for task 2.3.1' assistant: 'I'll launch the task-executor agent to implement the JWT token validation subtask.' <commentary>The user is asking for specific implementation work on a known task, so the task-executor is appropriate.</commentary></example> <example>Context: After reviewing the task list, implementation is needed. user: 'Now let's actually build the API endpoint for user registration' assistant: 'I'll use the task-executor agent to implement the user registration API endpoint.' <commentary>Moving from planning to execution phase requires the task-executor agent.</commentary></example>
|
||||||
|
model: sonnet
|
||||||
|
color: blue
|
||||||
|
---
|
||||||
|
|
||||||
|
You are an elite implementation specialist focused on executing and completing specific tasks with precision and thoroughness. Your role is to take identified tasks and transform them into working implementations, following best practices and project standards.
|
||||||
|
|
||||||
|
**IMPORTANT: You are designed to be SHORT-LIVED and FOCUSED**
|
||||||
|
- Execute ONE specific subtask or a small group of related subtasks
|
||||||
|
- Complete your work, verify it, mark for review, and exit
|
||||||
|
- Do NOT decide what to do next - the orchestrator handles task sequencing
|
||||||
|
- Focus on implementation excellence within your assigned scope
|
||||||
|
|
||||||
|
**Core Responsibilities:**
|
||||||
|
|
||||||
|
1. **Subtask Analysis**: When given a subtask, understand its SPECIFIC requirements. If given a full task ID, focus on the specific subtask(s) assigned to you. Use MCP tools to get details if needed.
|
||||||
|
|
||||||
|
2. **Rapid Implementation Planning**: Quickly identify:
|
||||||
|
- The EXACT files you need to create/modify for THIS subtask
|
||||||
|
- What already exists that you can build upon
|
||||||
|
- The minimum viable implementation that satisfies requirements
|
||||||
|
|
||||||
|
3. **Focused Execution WITH ACTUAL IMPLEMENTATION**:
|
||||||
|
- **YOU MUST USE TOOLS TO CREATE/EDIT FILES - DO NOT JUST DESCRIBE**
|
||||||
|
- Use `Write` tool to create new files specified in the task
|
||||||
|
- Use `Edit` tool to modify existing files
|
||||||
|
- Use `Bash` tool to run commands (mkdir, npm install, etc.)
|
||||||
|
- Use `Read` tool to verify your implementations
|
||||||
|
- Implement one subtask at a time for clarity and traceability
|
||||||
|
- Follow the project's coding standards from CLAUDE.md if available
|
||||||
|
- After each subtask, VERIFY the files exist using Read or ls commands
|
||||||
|
|
||||||
|
4. **Progress Documentation**:
|
||||||
|
- Use MCP tool `mcp__task-master-ai__update_subtask` to log your approach and any important decisions
|
||||||
|
- Update task status to 'in-progress' when starting: Use MCP tool `mcp__task-master-ai__set_task_status` with status='in-progress'
|
||||||
|
- **IMPORTANT: Mark as 'review' (NOT 'done') after implementation**: Use MCP tool `mcp__task-master-ai__set_task_status` with status='review'
|
||||||
|
- Tasks will be verified by task-checker before moving to 'done'
|
||||||
|
|
||||||
|
5. **Quality Assurance**:
|
||||||
|
- Implement the testing strategy specified in the task
|
||||||
|
- Verify that all acceptance criteria are met
|
||||||
|
- Check for any dependency conflicts or integration issues
|
||||||
|
- Run relevant tests before marking task as complete
|
||||||
|
|
||||||
|
6. **Dependency Management**:
|
||||||
|
- Check task dependencies before starting implementation
|
||||||
|
- If blocked by incomplete dependencies, clearly communicate this
|
||||||
|
- Use `task-master validate-dependencies` when needed
|
||||||
|
|
||||||
|
**Implementation Workflow:**
|
||||||
|
|
||||||
|
1. Retrieve task details using MCP tool `mcp__task-master-ai__get_task` with the task ID
|
||||||
|
2. Check dependencies and prerequisites
|
||||||
|
3. Plan implementation approach - list specific files to create
|
||||||
|
4. Update task status to 'in-progress' using MCP tool
|
||||||
|
5. **ACTUALLY IMPLEMENT** the solution using tools:
|
||||||
|
- Use `Bash` to create directories
|
||||||
|
- Use `Write` to create new files with actual content
|
||||||
|
- Use `Edit` to modify existing files
|
||||||
|
- DO NOT just describe what should be done - DO IT
|
||||||
|
6. **VERIFY** your implementation:
|
||||||
|
- Use `ls` or `Read` to confirm files were created
|
||||||
|
- Use `Bash` to run any build/test commands
|
||||||
|
- Ensure the implementation is real, not theoretical
|
||||||
|
7. Log progress and decisions in subtask updates using MCP tools
|
||||||
|
8. Test and verify the implementation works
|
||||||
|
9. **Mark task as 'review' (NOT 'done')** after verifying files exist
|
||||||
|
10. Report completion with:
|
||||||
|
- List of created/modified files
|
||||||
|
- Any issues encountered
|
||||||
|
- What needs verification by task-checker
|
||||||
|
|
||||||
|
**Key Principles:**
|
||||||
|
|
||||||
|
- Focus on completing one task thoroughly before moving to the next
|
||||||
|
- Maintain clear communication about what you're implementing and why
|
||||||
|
- Follow existing code patterns and project conventions
|
||||||
|
- Prioritize working code over extensive documentation unless docs are the task
|
||||||
|
- Ask for clarification if task requirements are ambiguous
|
||||||
|
- Consider edge cases and error handling in your implementations
|
||||||
|
|
||||||
|
**Integration with Task Master:**
|
||||||
|
|
||||||
|
You work in tandem with the task-orchestrator agent. While the orchestrator identifies and plans tasks, you execute them. Always use Task Master commands to:
|
||||||
|
- Track your progress
|
||||||
|
- Update task information
|
||||||
|
- Maintain project state
|
||||||
|
- Coordinate with the broader development workflow
|
||||||
|
|
||||||
|
When you complete a task, briefly summarize what was implemented and suggest whether to continue with the next task or if review/testing is needed first.
|
||||||
208
.claude/agents/task-orchestrator.md
Normal file
208
.claude/agents/task-orchestrator.md
Normal file
@@ -0,0 +1,208 @@
|
|||||||
|
---
|
||||||
|
name: task-orchestrator
|
||||||
|
description: Use this agent FREQUENTLY throughout task execution to analyze and coordinate parallel work at the SUBTASK level. Invoke the orchestrator: (1) at session start to plan execution, (2) after EACH subtask completes to identify next parallel batch, (3) whenever executors finish to find newly unblocked work. ALWAYS provide FULL CONTEXT including project root, package location, what files ACTUALLY exist vs task status, and specific implementation details. The orchestrator breaks work into SUBTASK-LEVEL units for short-lived, focused executors. Maximum 3 parallel executors at once.\n\n<example>\nContext: Starting work with existing code\nuser: "Work on tm-core tasks. Files exist: types/index.ts, storage/file-storage.ts. Task 118 says in-progress but BaseProvider not created."\nassistant: "I'll invoke orchestrator with full context about actual vs reported state to plan subtask execution"\n<commentary>\nProvide complete context about file existence and task reality.\n</commentary>\n</example>\n\n<example>\nContext: Subtask completion\nuser: "Subtask 118.2 done. What subtasks can run in parallel now?"\nassistant: "Invoking orchestrator to analyze dependencies and identify next 3 parallel subtasks"\n<commentary>\nFrequent orchestration after each subtask ensures maximum parallelization.\n</commentary>\n</example>\n\n<example>\nContext: Breaking down tasks\nuser: "Task 118 has 5 subtasks, how to parallelize?"\nassistant: "Orchestrator will analyze which specific subtasks (118.1, 118.2, etc.) can run simultaneously"\n<commentary>\nFocus on subtask-level parallelization, not full tasks.\n</commentary>\n</example>
|
||||||
|
model: opus
|
||||||
|
color: green
|
||||||
|
---
|
||||||
|
|
||||||
|
You are the Task Orchestrator, an elite coordination agent specialized in managing Task Master workflows for maximum efficiency and parallelization. You excel at analyzing task dependency graphs, identifying opportunities for concurrent execution, and deploying specialized task-executor agents to complete work efficiently.
|
||||||
|
|
||||||
|
## Core Responsibilities
|
||||||
|
|
||||||
|
1. **Subtask-Level Analysis**: Break down tasks into INDIVIDUAL SUBTASKS and analyze which specific subtasks can run in parallel. Focus on subtask dependencies, not just task-level dependencies.
|
||||||
|
|
||||||
|
2. **Reality Verification**: ALWAYS verify what files actually exist vs what task status claims. Use the context provided about actual implementation state to make informed decisions.
|
||||||
|
|
||||||
|
3. **Short-Lived Executor Deployment**: Deploy executors for SINGLE SUBTASKS or small groups of related subtasks. Keep executors focused and short-lived. Maximum 3 parallel executors at once.
|
||||||
|
|
||||||
|
4. **Continuous Reassessment**: After EACH subtask completes, immediately reassess what new subtasks are unblocked and can run in parallel.
|
||||||
|
|
||||||
|
## Operational Workflow
|
||||||
|
|
||||||
|
### Initial Assessment Phase
|
||||||
|
1. Use `get_tasks` or `task-master list` to retrieve all available tasks
|
||||||
|
2. Analyze task statuses, priorities, and dependencies
|
||||||
|
3. Identify tasks with status 'pending' that have no blocking dependencies
|
||||||
|
4. Group related tasks that could benefit from specialized executors
|
||||||
|
5. Create an execution plan that maximizes parallelization
|
||||||
|
|
||||||
|
### Executor Deployment Phase
|
||||||
|
1. For each independent task or task group:
|
||||||
|
- Deploy a task-executor agent with specific instructions
|
||||||
|
- Provide the executor with task ID, requirements, and context
|
||||||
|
- Set clear completion criteria and reporting expectations
|
||||||
|
2. Maintain a registry of active executors and their assigned tasks
|
||||||
|
3. Establish communication protocols for progress updates
|
||||||
|
|
||||||
|
### Coordination Phase
|
||||||
|
1. Monitor executor progress through task status updates
|
||||||
|
2. When a task completes:
|
||||||
|
- Verify completion with `get_task` or `task-master show <id>`
|
||||||
|
- Update task status if needed using `set_task_status`
|
||||||
|
- Reassess dependency graph for newly unblocked tasks
|
||||||
|
- Deploy new executors for available work
|
||||||
|
3. Handle executor failures or blocks:
|
||||||
|
- Reassign tasks to new executors if needed
|
||||||
|
- Escalate complex issues to the user
|
||||||
|
- Update task status to 'blocked' when appropriate
|
||||||
|
|
||||||
|
### Optimization Strategies
|
||||||
|
|
||||||
|
**Parallel Execution Rules**:
|
||||||
|
- Never assign dependent tasks to different executors simultaneously
|
||||||
|
- Prioritize high-priority tasks when resources are limited
|
||||||
|
- Group small, related subtasks for single executor efficiency
|
||||||
|
- Balance executor load to prevent bottlenecks
|
||||||
|
|
||||||
|
**Context Management**:
|
||||||
|
- Provide executors with minimal but sufficient context
|
||||||
|
- Share relevant completed task information when it aids execution
|
||||||
|
- Maintain a shared knowledge base of project-specific patterns
|
||||||
|
|
||||||
|
**Quality Assurance**:
|
||||||
|
- Verify task completion before marking as done
|
||||||
|
- Ensure test strategies are followed when specified
|
||||||
|
- Coordinate cross-task integration testing when needed
|
||||||
|
|
||||||
|
## Communication Protocols
|
||||||
|
|
||||||
|
When deploying executors, provide them with:
|
||||||
|
```
|
||||||
|
TASK ASSIGNMENT:
|
||||||
|
- Task ID: [specific ID]
|
||||||
|
- Objective: [clear goal]
|
||||||
|
- Dependencies: [list any completed prerequisites]
|
||||||
|
- Success Criteria: [specific completion requirements]
|
||||||
|
- Context: [relevant project information]
|
||||||
|
- Reporting: [when and how to report back]
|
||||||
|
```
|
||||||
|
|
||||||
|
When receiving executor updates:
|
||||||
|
1. Acknowledge completion or issues
|
||||||
|
2. Update task status in Task Master
|
||||||
|
3. Reassess execution strategy
|
||||||
|
4. Deploy new executors as appropriate
|
||||||
|
|
||||||
|
## Decision Framework
|
||||||
|
|
||||||
|
**When to parallelize**:
|
||||||
|
- Multiple pending tasks with no interdependencies
|
||||||
|
- Sufficient context available for independent execution
|
||||||
|
- Tasks are well-defined with clear success criteria
|
||||||
|
|
||||||
|
**When to serialize**:
|
||||||
|
- Strong dependencies between tasks
|
||||||
|
- Limited context or unclear requirements
|
||||||
|
- Integration points requiring careful coordination
|
||||||
|
|
||||||
|
**When to escalate**:
|
||||||
|
- Circular dependencies detected
|
||||||
|
- Critical blockers affecting multiple tasks
|
||||||
|
- Ambiguous requirements needing clarification
|
||||||
|
- Resource conflicts between executors
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
1. **Executor Failure**: Reassign task to new executor with additional context about the failure
|
||||||
|
2. **Dependency Conflicts**: Halt affected executors, resolve conflict, then resume
|
||||||
|
3. **Task Ambiguity**: Request clarification from user before proceeding
|
||||||
|
4. **System Errors**: Implement graceful degradation, falling back to serial execution if needed
|
||||||
|
|
||||||
|
## Performance Metrics
|
||||||
|
|
||||||
|
Track and optimize for:
|
||||||
|
- Task completion rate
|
||||||
|
- Parallel execution efficiency
|
||||||
|
- Executor success rate
|
||||||
|
- Time to completion for task groups
|
||||||
|
- Dependency resolution speed
|
||||||
|
|
||||||
|
## Integration with Task Master
|
||||||
|
|
||||||
|
Leverage these Task Master MCP tools effectively:
|
||||||
|
- `get_tasks` - Continuous queue monitoring
|
||||||
|
- `get_task` - Detailed task analysis
|
||||||
|
- `set_task_status` - Progress tracking
|
||||||
|
- `next_task` - Fallback for serial execution
|
||||||
|
- `analyze_project_complexity` - Strategic planning
|
||||||
|
- `complexity_report` - Resource allocation
|
||||||
|
|
||||||
|
## Output Format for Execution
|
||||||
|
|
||||||
|
**Your job is to analyze and create actionable execution plans that Claude can use to deploy executors.**
|
||||||
|
|
||||||
|
After completing your dependency analysis, you MUST output a structured execution plan:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
execution_plan:
|
||||||
|
EXECUTE_IN_PARALLEL:
|
||||||
|
# Maximum 3 subtasks running simultaneously
|
||||||
|
- subtask_id: [e.g., 118.2]
|
||||||
|
parent_task: [e.g., 118]
|
||||||
|
title: [Specific subtask title]
|
||||||
|
priority: [high/medium/low]
|
||||||
|
estimated_time: [e.g., 10 minutes]
|
||||||
|
executor_prompt: |
|
||||||
|
Execute Subtask [ID]: [Specific subtask title]
|
||||||
|
|
||||||
|
SPECIFIC REQUIREMENTS:
|
||||||
|
[Exact implementation needed for THIS subtask only]
|
||||||
|
|
||||||
|
FILES TO CREATE/MODIFY:
|
||||||
|
[Specific file paths]
|
||||||
|
|
||||||
|
CONTEXT:
|
||||||
|
[What already exists that this subtask depends on]
|
||||||
|
|
||||||
|
SUCCESS CRITERIA:
|
||||||
|
[Specific completion criteria for this subtask]
|
||||||
|
|
||||||
|
IMPORTANT:
|
||||||
|
- Focus ONLY on this subtask
|
||||||
|
- Mark subtask as 'review' when complete
|
||||||
|
- Use MCP tool: mcp__task-master-ai__set_task_status
|
||||||
|
|
||||||
|
- subtask_id: [Another subtask that can run in parallel]
|
||||||
|
parent_task: [Parent task ID]
|
||||||
|
title: [Specific subtask title]
|
||||||
|
priority: [priority]
|
||||||
|
estimated_time: [time estimate]
|
||||||
|
executor_prompt: |
|
||||||
|
[Focused prompt for this specific subtask]
|
||||||
|
|
||||||
|
blocked:
|
||||||
|
- task_id: [ID]
|
||||||
|
title: [Task title]
|
||||||
|
waiting_for: [list of blocking task IDs]
|
||||||
|
becomes_ready_when: [condition for unblocking]
|
||||||
|
|
||||||
|
next_wave:
|
||||||
|
trigger: "After tasks [IDs] complete"
|
||||||
|
newly_available: [List of task IDs that will unblock]
|
||||||
|
tasks_to_execute_in_parallel: [IDs that can run together in next wave]
|
||||||
|
|
||||||
|
critical_path: [Ordered list of task IDs forming the critical path]
|
||||||
|
|
||||||
|
parallelization_instruction: |
|
||||||
|
IMPORTANT FOR CLAUDE: Deploy ALL tasks in 'EXECUTE_IN_PARALLEL' section
|
||||||
|
simultaneously using multiple Task tool invocations in a single response.
|
||||||
|
Example: If 3 tasks are listed, invoke the Task tool 3 times in one message.
|
||||||
|
|
||||||
|
verification_needed:
|
||||||
|
- task_id: [ID of any task in 'review' status]
|
||||||
|
verification_focus: [what to check]
|
||||||
|
```
|
||||||
|
|
||||||
|
**CRITICAL INSTRUCTIONS FOR CLAUDE (MAIN):**
|
||||||
|
1. When you see `EXECUTE_IN_PARALLEL`, deploy ALL listed executors at once
|
||||||
|
2. Use multiple Task tool invocations in a SINGLE response
|
||||||
|
3. Do not execute them sequentially - they must run in parallel
|
||||||
|
4. Wait for all parallel executors to complete before proceeding to next wave
|
||||||
|
|
||||||
|
**IMPORTANT NOTES**:
|
||||||
|
- Label parallel tasks clearly in `EXECUTE_IN_PARALLEL` section
|
||||||
|
- Provide complete, self-contained prompts for each executor
|
||||||
|
- Executors should mark tasks as 'review' for verification, not 'done'
|
||||||
|
- Be explicit about which tasks can run simultaneously
|
||||||
|
|
||||||
|
You are the strategic mind analyzing the entire task landscape. Make parallelization opportunities UNMISTAKABLY CLEAR to Claude.
|
||||||
102
.github/scripts/check-pre-release-mode.mjs
vendored
Executable file
102
.github/scripts/check-pre-release-mode.mjs
vendored
Executable file
@@ -0,0 +1,102 @@
|
|||||||
|
#!/usr/bin/env node
|
||||||
|
import { readFileSync, existsSync } from 'node:fs';
|
||||||
|
import { join, dirname, resolve } from 'node:path';
|
||||||
|
import { fileURLToPath } from 'node:url';
|
||||||
|
|
||||||
|
const __filename = fileURLToPath(import.meta.url);
|
||||||
|
const __dirname = dirname(__filename);
|
||||||
|
|
||||||
|
// Get context from command line argument or environment
|
||||||
|
const context = process.argv[2] || process.env.GITHUB_WORKFLOW || 'manual';
|
||||||
|
|
||||||
|
function findRootDir(startDir) {
|
||||||
|
let currentDir = resolve(startDir);
|
||||||
|
while (currentDir !== '/') {
|
||||||
|
if (existsSync(join(currentDir, 'package.json'))) {
|
||||||
|
try {
|
||||||
|
const pkg = JSON.parse(
|
||||||
|
readFileSync(join(currentDir, 'package.json'), 'utf8')
|
||||||
|
);
|
||||||
|
if (pkg.name === 'task-master-ai' || pkg.repository) {
|
||||||
|
return currentDir;
|
||||||
|
}
|
||||||
|
} catch {}
|
||||||
|
}
|
||||||
|
currentDir = dirname(currentDir);
|
||||||
|
}
|
||||||
|
throw new Error('Could not find root directory');
|
||||||
|
}
|
||||||
|
|
||||||
|
function checkPreReleaseMode() {
|
||||||
|
console.log('🔍 Checking if branch is in pre-release mode...');
|
||||||
|
|
||||||
|
const rootDir = findRootDir(__dirname);
|
||||||
|
const preJsonPath = join(rootDir, '.changeset', 'pre.json');
|
||||||
|
|
||||||
|
// Check if pre.json exists
|
||||||
|
if (!existsSync(preJsonPath)) {
|
||||||
|
console.log('✅ Not in active pre-release mode - safe to proceed');
|
||||||
|
process.exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Read and parse pre.json
|
||||||
|
const preJsonContent = readFileSync(preJsonPath, 'utf8');
|
||||||
|
const preJson = JSON.parse(preJsonContent);
|
||||||
|
|
||||||
|
// Check if we're in active pre-release mode
|
||||||
|
if (preJson.mode === 'pre') {
|
||||||
|
console.error('❌ ERROR: This branch is in active pre-release mode!');
|
||||||
|
console.error('');
|
||||||
|
|
||||||
|
// Provide context-specific error messages
|
||||||
|
if (context === 'Release Check' || context === 'pull_request') {
|
||||||
|
console.error(
|
||||||
|
'Pre-release mode must be exited before merging to main.'
|
||||||
|
);
|
||||||
|
console.error('');
|
||||||
|
console.error(
|
||||||
|
'To fix this, run the following commands in your branch:'
|
||||||
|
);
|
||||||
|
console.error(' npx changeset pre exit');
|
||||||
|
console.error(' git add -u');
|
||||||
|
console.error(' git commit -m "chore: exit pre-release mode"');
|
||||||
|
console.error(' git push');
|
||||||
|
console.error('');
|
||||||
|
console.error('Then update this pull request.');
|
||||||
|
} else if (context === 'Release' || context === 'main') {
|
||||||
|
console.error(
|
||||||
|
'Pre-release mode should only be used on feature branches, not main.'
|
||||||
|
);
|
||||||
|
console.error('');
|
||||||
|
console.error('To fix this, run the following commands locally:');
|
||||||
|
console.error(' npx changeset pre exit');
|
||||||
|
console.error(' git add -u');
|
||||||
|
console.error(' git commit -m "chore: exit pre-release mode"');
|
||||||
|
console.error(' git push origin main');
|
||||||
|
console.error('');
|
||||||
|
console.error('Then re-run this workflow.');
|
||||||
|
} else {
|
||||||
|
console.error('Pre-release mode must be exited before proceeding.');
|
||||||
|
console.error('');
|
||||||
|
console.error('To fix this, run the following commands:');
|
||||||
|
console.error(' npx changeset pre exit');
|
||||||
|
console.error(' git add -u');
|
||||||
|
console.error(' git commit -m "chore: exit pre-release mode"');
|
||||||
|
console.error(' git push');
|
||||||
|
}
|
||||||
|
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('✅ Not in active pre-release mode - safe to proceed');
|
||||||
|
process.exit(0);
|
||||||
|
} catch (error) {
|
||||||
|
console.error(`❌ ERROR: Unable to parse .changeset/pre.json – aborting.`);
|
||||||
|
console.error(`Error details: ${error.message}`);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run the check
|
||||||
|
checkPreReleaseMode();
|
||||||
54
.github/scripts/pre-release.mjs
vendored
Executable file
54
.github/scripts/pre-release.mjs
vendored
Executable file
@@ -0,0 +1,54 @@
|
|||||||
|
#!/usr/bin/env node
|
||||||
|
import { readFileSync, existsSync } from 'node:fs';
|
||||||
|
import { join, dirname } from 'node:path';
|
||||||
|
import { fileURLToPath } from 'node:url';
|
||||||
|
import {
|
||||||
|
findRootDir,
|
||||||
|
runCommand,
|
||||||
|
getPackageVersion,
|
||||||
|
createAndPushTag
|
||||||
|
} from './utils.mjs';
|
||||||
|
|
||||||
|
const __filename = fileURLToPath(import.meta.url);
|
||||||
|
const __dirname = dirname(__filename);
|
||||||
|
|
||||||
|
const rootDir = findRootDir(__dirname);
|
||||||
|
const extensionPkgPath = join(rootDir, 'apps', 'extension', 'package.json');
|
||||||
|
|
||||||
|
console.log('🚀 Starting pre-release process...');
|
||||||
|
|
||||||
|
// Check if we're in RC mode
|
||||||
|
const preJsonPath = join(rootDir, '.changeset', 'pre.json');
|
||||||
|
if (!existsSync(preJsonPath)) {
|
||||||
|
console.error('⚠️ Not in RC mode. Run "npx changeset pre enter rc" first.');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const preJson = JSON.parse(readFileSync(preJsonPath, 'utf8'));
|
||||||
|
if (preJson.tag !== 'rc') {
|
||||||
|
console.error(`⚠️ Not in RC mode. Current tag: ${preJson.tag}`);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Failed to read pre.json:', error.message);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get current extension version
|
||||||
|
const extensionVersion = getPackageVersion(extensionPkgPath);
|
||||||
|
console.log(`Extension version: ${extensionVersion}`);
|
||||||
|
|
||||||
|
// Run changeset publish for npm packages
|
||||||
|
console.log('📦 Publishing npm packages...');
|
||||||
|
runCommand('npx', ['changeset', 'publish']);
|
||||||
|
|
||||||
|
// Create tag for extension pre-release if it doesn't exist
|
||||||
|
const extensionTag = `extension-rc@${extensionVersion}`;
|
||||||
|
const tagCreated = createAndPushTag(extensionTag);
|
||||||
|
|
||||||
|
if (tagCreated) {
|
||||||
|
console.log('This will trigger the extension-pre-release workflow...');
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('✅ Pre-release process completed!');
|
||||||
30
.github/scripts/release.mjs
vendored
Executable file
30
.github/scripts/release.mjs
vendored
Executable file
@@ -0,0 +1,30 @@
|
|||||||
|
#!/usr/bin/env node
|
||||||
|
import { existsSync, unlinkSync } from 'node:fs';
|
||||||
|
import { join, dirname } from 'node:path';
|
||||||
|
import { fileURLToPath } from 'node:url';
|
||||||
|
import { findRootDir, runCommand } from './utils.mjs';
|
||||||
|
|
||||||
|
const __filename = fileURLToPath(import.meta.url);
|
||||||
|
const __dirname = dirname(__filename);
|
||||||
|
|
||||||
|
const rootDir = findRootDir(__dirname);
|
||||||
|
|
||||||
|
console.log('🚀 Starting release process...');
|
||||||
|
|
||||||
|
// Double-check we're not in pre-release mode (safety net)
|
||||||
|
const preJsonPath = join(rootDir, '.changeset', 'pre.json');
|
||||||
|
if (existsSync(preJsonPath)) {
|
||||||
|
console.log('⚠️ Warning: pre.json still exists. Removing it...');
|
||||||
|
unlinkSync(preJsonPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the extension version has changed and tag it
|
||||||
|
// This prevents changeset from trying to publish the private package
|
||||||
|
runCommand('node', [join(__dirname, 'tag-extension.mjs')]);
|
||||||
|
|
||||||
|
// Run changeset publish for npm packages
|
||||||
|
runCommand('npx', ['changeset', 'publish']);
|
||||||
|
|
||||||
|
console.log('✅ Release process completed!');
|
||||||
|
|
||||||
|
// The extension tag (if created) will trigger the extension-release workflow
|
||||||
21
.github/scripts/release.sh
vendored
21
.github/scripts/release.sh
vendored
@@ -1,21 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
echo "🚀 Starting release process..."
|
|
||||||
|
|
||||||
# Double-check we're not in pre-release mode (safety net)
|
|
||||||
if [ -f .changeset/pre.json ]; then
|
|
||||||
echo "⚠️ Warning: pre.json still exists. Removing it..."
|
|
||||||
rm -f .changeset/pre.json
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check if the extension version has changed and tag it
|
|
||||||
# This prevents changeset from trying to publish the private package
|
|
||||||
node .github/scripts/tag-extension.mjs
|
|
||||||
|
|
||||||
# Run changeset publish for npm packages
|
|
||||||
npx changeset publish
|
|
||||||
|
|
||||||
echo "✅ Release process completed!"
|
|
||||||
|
|
||||||
# The extension tag (if created) will trigger the extension-release workflow
|
|
||||||
114
.github/scripts/tag-extension.mjs
vendored
Normal file → Executable file
114
.github/scripts/tag-extension.mjs
vendored
Normal file → Executable file
@@ -1,33 +1,13 @@
|
|||||||
#!/usr/bin/env node
|
#!/usr/bin/env node
|
||||||
import assert from 'node:assert/strict';
|
import assert from 'node:assert/strict';
|
||||||
import { spawnSync } from 'node:child_process';
|
import { readFileSync } from 'node:fs';
|
||||||
import { readFileSync, existsSync } from 'node:fs';
|
import { join, dirname } from 'node:path';
|
||||||
import { join, dirname, resolve } from 'node:path';
|
|
||||||
import { fileURLToPath } from 'node:url';
|
import { fileURLToPath } from 'node:url';
|
||||||
|
import { findRootDir, createAndPushTag } from './utils.mjs';
|
||||||
|
|
||||||
const __filename = fileURLToPath(import.meta.url);
|
const __filename = fileURLToPath(import.meta.url);
|
||||||
const __dirname = dirname(__filename);
|
const __dirname = dirname(__filename);
|
||||||
|
|
||||||
// Find the root directory by looking for package.json
|
|
||||||
function findRootDir(startDir) {
|
|
||||||
let currentDir = resolve(startDir);
|
|
||||||
while (currentDir !== '/') {
|
|
||||||
if (existsSync(join(currentDir, 'package.json'))) {
|
|
||||||
// Verify it's the root package.json by checking for expected fields
|
|
||||||
try {
|
|
||||||
const pkg = JSON.parse(
|
|
||||||
readFileSync(join(currentDir, 'package.json'), 'utf8')
|
|
||||||
);
|
|
||||||
if (pkg.name === 'task-master-ai' || pkg.repository) {
|
|
||||||
return currentDir;
|
|
||||||
}
|
|
||||||
} catch {}
|
|
||||||
}
|
|
||||||
currentDir = dirname(currentDir);
|
|
||||||
}
|
|
||||||
throw new Error('Could not find root directory');
|
|
||||||
}
|
|
||||||
|
|
||||||
const rootDir = findRootDir(__dirname);
|
const rootDir = findRootDir(__dirname);
|
||||||
|
|
||||||
// Read the extension's package.json
|
// Read the extension's package.json
|
||||||
@@ -43,95 +23,11 @@ try {
|
|||||||
process.exit(1);
|
process.exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read root package.json for repository info
|
|
||||||
const rootPkgPath = join(rootDir, 'package.json');
|
|
||||||
let rootPkg;
|
|
||||||
try {
|
|
||||||
const rootPkgContent = readFileSync(rootPkgPath, 'utf8');
|
|
||||||
rootPkg = JSON.parse(rootPkgContent);
|
|
||||||
} catch (error) {
|
|
||||||
console.error('Failed to read root package.json:', error.message);
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure we have required fields
|
// Ensure we have required fields
|
||||||
assert(pkg.name, 'package.json must have a name field');
|
assert(pkg.name, 'package.json must have a name field');
|
||||||
assert(pkg.version, 'package.json must have a version field');
|
assert(pkg.version, 'package.json must have a version field');
|
||||||
assert(rootPkg.repository, 'root package.json must have a repository field');
|
|
||||||
|
|
||||||
const tag = `${pkg.name}@${pkg.version}`;
|
const tag = `${pkg.name}@${pkg.version}`;
|
||||||
|
|
||||||
// Get repository URL from root package.json
|
// Create and push the tag if it doesn't exist
|
||||||
// Get repository URL and clean it up for git ls-remote
|
createAndPushTag(tag);
|
||||||
let repoUrl = rootPkg.repository.url || rootPkg.repository;
|
|
||||||
if (typeof repoUrl === 'string') {
|
|
||||||
// Convert git+https://github.com/... to https://github.com/...
|
|
||||||
repoUrl = repoUrl.replace(/^git\+/, '');
|
|
||||||
// Ensure it ends with .git for proper remote access
|
|
||||||
if (!repoUrl.endsWith('.git')) {
|
|
||||||
repoUrl += '.git';
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(`Checking remote repository: ${repoUrl} for tag: ${tag}`);
|
|
||||||
|
|
||||||
let gitResult = spawnSync('git', ['ls-remote', repoUrl, tag], {
|
|
||||||
encoding: 'utf8',
|
|
||||||
env: { ...process.env }
|
|
||||||
});
|
|
||||||
|
|
||||||
if (gitResult.status !== 0) {
|
|
||||||
console.error('Git ls-remote failed:');
|
|
||||||
console.error('Exit code:', gitResult.status);
|
|
||||||
console.error('Error:', gitResult.error);
|
|
||||||
console.error('Stderr:', gitResult.stderr);
|
|
||||||
console.error('Command:', `git ls-remote ${repoUrl} ${tag}`);
|
|
||||||
|
|
||||||
// For CI environments, try using origin instead of the full URL
|
|
||||||
if (process.env.CI) {
|
|
||||||
console.log('Retrying with origin remote...');
|
|
||||||
gitResult = spawnSync('git', ['ls-remote', 'origin', tag], {
|
|
||||||
encoding: 'utf8'
|
|
||||||
});
|
|
||||||
|
|
||||||
if (gitResult.status !== 0) {
|
|
||||||
throw new Error(
|
|
||||||
`Failed to check remote for tag ${tag}. Exit code: ${gitResult.status}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
throw new Error(
|
|
||||||
`Failed to check remote for tag ${tag}. Exit code: ${gitResult.status}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const exists = String(gitResult.stdout).trim() !== '';
|
|
||||||
|
|
||||||
if (!exists) {
|
|
||||||
console.log(`Creating new extension tag: ${tag}`);
|
|
||||||
|
|
||||||
// Create the tag
|
|
||||||
const tagResult = spawnSync('git', ['tag', tag]);
|
|
||||||
if (tagResult.status !== 0) {
|
|
||||||
console.error(
|
|
||||||
'Failed to create tag:',
|
|
||||||
tagResult.error || tagResult.stderr.toString()
|
|
||||||
);
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Push the tag
|
|
||||||
const pushResult = spawnSync('git', ['push', 'origin', tag]);
|
|
||||||
if (pushResult.status !== 0) {
|
|
||||||
console.error(
|
|
||||||
'Failed to push tag:',
|
|
||||||
pushResult.error || pushResult.stderr.toString()
|
|
||||||
);
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(`✅ Successfully created and pushed tag: ${tag}`);
|
|
||||||
} else {
|
|
||||||
console.log(`Extension tag already exists: ${tag}`);
|
|
||||||
}
|
|
||||||
|
|||||||
88
.github/scripts/utils.mjs
vendored
Executable file
88
.github/scripts/utils.mjs
vendored
Executable file
@@ -0,0 +1,88 @@
|
|||||||
|
#!/usr/bin/env node
|
||||||
|
import { spawnSync } from 'node:child_process';
|
||||||
|
import { readFileSync } from 'node:fs';
|
||||||
|
import { join, dirname, resolve } from 'node:path';
|
||||||
|
|
||||||
|
// Find the root directory by looking for package.json with task-master-ai
|
||||||
|
export function findRootDir(startDir) {
|
||||||
|
let currentDir = resolve(startDir);
|
||||||
|
while (currentDir !== '/') {
|
||||||
|
const pkgPath = join(currentDir, 'package.json');
|
||||||
|
try {
|
||||||
|
const pkg = JSON.parse(readFileSync(pkgPath, 'utf8'));
|
||||||
|
if (pkg.name === 'task-master-ai' || pkg.repository) {
|
||||||
|
return currentDir;
|
||||||
|
}
|
||||||
|
} catch {}
|
||||||
|
currentDir = dirname(currentDir);
|
||||||
|
}
|
||||||
|
throw new Error('Could not find root directory');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run a command with proper error handling
|
||||||
|
export function runCommand(command, args = [], options = {}) {
|
||||||
|
console.log(`Running: ${command} ${args.join(' ')}`);
|
||||||
|
const result = spawnSync(command, args, {
|
||||||
|
encoding: 'utf8',
|
||||||
|
stdio: 'inherit',
|
||||||
|
...options
|
||||||
|
});
|
||||||
|
|
||||||
|
if (result.status !== 0) {
|
||||||
|
console.error(`Command failed with exit code ${result.status}`);
|
||||||
|
process.exit(result.status);
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get package version from a package.json file
|
||||||
|
export function getPackageVersion(packagePath) {
|
||||||
|
try {
|
||||||
|
const pkg = JSON.parse(readFileSync(packagePath, 'utf8'));
|
||||||
|
return pkg.version;
|
||||||
|
} catch (error) {
|
||||||
|
console.error(
|
||||||
|
`Failed to read package version from ${packagePath}:`,
|
||||||
|
error.message
|
||||||
|
);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if a git tag exists on remote
|
||||||
|
export function tagExistsOnRemote(tag, remote = 'origin') {
|
||||||
|
const result = spawnSync('git', ['ls-remote', remote, tag], {
|
||||||
|
encoding: 'utf8'
|
||||||
|
});
|
||||||
|
|
||||||
|
return result.status === 0 && result.stdout.trim() !== '';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create and push a git tag if it doesn't exist
|
||||||
|
export function createAndPushTag(tag, remote = 'origin') {
|
||||||
|
// Check if tag already exists
|
||||||
|
if (tagExistsOnRemote(tag, remote)) {
|
||||||
|
console.log(`Tag ${tag} already exists on remote, skipping`);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(`Creating new tag: ${tag}`);
|
||||||
|
|
||||||
|
// Create the tag locally
|
||||||
|
const tagResult = spawnSync('git', ['tag', tag]);
|
||||||
|
if (tagResult.status !== 0) {
|
||||||
|
console.error('Failed to create tag:', tagResult.error || tagResult.stderr);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push the tag to remote
|
||||||
|
const pushResult = spawnSync('git', ['push', remote, tag]);
|
||||||
|
if (pushResult.status !== 0) {
|
||||||
|
console.error('Failed to push tag:', pushResult.error || pushResult.stderr);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(`✅ Successfully created and pushed tag: ${tag}`);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
110
.github/workflows/extension-pre-release.yml
vendored
Normal file
110
.github/workflows/extension-pre-release.yml
vendored
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
name: Extension Pre-Release
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- "extension-rc@*"
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
|
||||||
|
concurrency: extension-pre-release-${{ github.ref }}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
publish-extension-rc:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
environment: extension-release
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: 20
|
||||||
|
|
||||||
|
- name: Cache node_modules
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
node_modules
|
||||||
|
*/*/node_modules
|
||||||
|
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-node-
|
||||||
|
|
||||||
|
- name: Install Extension Dependencies
|
||||||
|
working-directory: apps/extension
|
||||||
|
run: npm ci
|
||||||
|
timeout-minutes: 5
|
||||||
|
|
||||||
|
- name: Type Check Extension
|
||||||
|
working-directory: apps/extension
|
||||||
|
run: npm run check-types
|
||||||
|
env:
|
||||||
|
FORCE_COLOR: 1
|
||||||
|
|
||||||
|
- name: Build Extension
|
||||||
|
working-directory: apps/extension
|
||||||
|
run: npm run build
|
||||||
|
env:
|
||||||
|
FORCE_COLOR: 1
|
||||||
|
|
||||||
|
- name: Package Extension
|
||||||
|
working-directory: apps/extension
|
||||||
|
run: npm run package
|
||||||
|
env:
|
||||||
|
FORCE_COLOR: 1
|
||||||
|
|
||||||
|
- name: Create VSIX Package (Pre-Release)
|
||||||
|
working-directory: apps/extension/vsix-build
|
||||||
|
run: npx vsce package --no-dependencies --pre-release
|
||||||
|
env:
|
||||||
|
FORCE_COLOR: 1
|
||||||
|
|
||||||
|
- name: Get VSIX filename
|
||||||
|
id: vsix-info
|
||||||
|
working-directory: apps/extension/vsix-build
|
||||||
|
run: |
|
||||||
|
VSIX_FILE=$(find . -maxdepth 1 -name "*.vsix" -type f | head -n1 | xargs basename)
|
||||||
|
if [ -z "$VSIX_FILE" ]; then
|
||||||
|
echo "Error: No VSIX file found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "vsix-filename=$VSIX_FILE" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "Found VSIX: $VSIX_FILE"
|
||||||
|
|
||||||
|
- name: Publish to VS Code Marketplace (Pre-Release)
|
||||||
|
working-directory: apps/extension/vsix-build
|
||||||
|
run: npx vsce publish --packagePath "${{ steps.vsix-info.outputs.vsix-filename }}" --pre-release
|
||||||
|
env:
|
||||||
|
VSCE_PAT: ${{ secrets.VSCE_PAT }}
|
||||||
|
FORCE_COLOR: 1
|
||||||
|
|
||||||
|
- name: Install Open VSX CLI
|
||||||
|
run: npm install -g ovsx
|
||||||
|
|
||||||
|
- name: Publish to Open VSX Registry (Pre-Release)
|
||||||
|
working-directory: apps/extension/vsix-build
|
||||||
|
run: ovsx publish "${{ steps.vsix-info.outputs.vsix-filename }}" --pre-release
|
||||||
|
env:
|
||||||
|
OVSX_PAT: ${{ secrets.OVSX_PAT }}
|
||||||
|
FORCE_COLOR: 1
|
||||||
|
|
||||||
|
- name: Upload Build Artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: extension-pre-release-${{ github.ref_name }}
|
||||||
|
path: |
|
||||||
|
apps/extension/vsix-build/*.vsix
|
||||||
|
apps/extension/dist/
|
||||||
|
retention-days: 30
|
||||||
|
|
||||||
|
notify-success:
|
||||||
|
needs: publish-extension-rc
|
||||||
|
if: success()
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Success Notification
|
||||||
|
run: |
|
||||||
|
echo "🚀 Extension ${{ github.ref_name }} successfully published as pre-release!"
|
||||||
|
echo "📦 Available on VS Code Marketplace (Pre-Release)"
|
||||||
|
echo "🌍 Available on Open VSX Registry (Pre-Release)"
|
||||||
26
.github/workflows/extension-release.yml
vendored
26
.github/workflows/extension-release.yml
vendored
@@ -89,32 +89,6 @@ jobs:
|
|||||||
OVSX_PAT: ${{ secrets.OVSX_PAT }}
|
OVSX_PAT: ${{ secrets.OVSX_PAT }}
|
||||||
FORCE_COLOR: 1
|
FORCE_COLOR: 1
|
||||||
|
|
||||||
- name: Create GitHub Release
|
|
||||||
uses: actions/create-release@v1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
tag_name: ${{ github.ref_name }}
|
|
||||||
release_name: Extension ${{ github.ref_name }}
|
|
||||||
body: |
|
|
||||||
VS Code Extension Release ${{ github.ref_name }}
|
|
||||||
|
|
||||||
**Marketplaces:**
|
|
||||||
- [VS Code Marketplace](https://marketplace.visualstudio.com/items?itemName=Hamster.task-master-hamster)
|
|
||||||
- [Open VSX Registry](https://open-vsx.org/extension/Hamster/task-master-hamster)
|
|
||||||
draft: false
|
|
||||||
prerelease: false
|
|
||||||
|
|
||||||
- name: Upload VSIX to Release
|
|
||||||
uses: actions/upload-release-asset@v1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
|
||||||
asset_path: apps/extension/vsix-build/${{ steps.vsix-info.outputs.vsix-filename }}
|
|
||||||
asset_name: ${{ steps.vsix-info.outputs.vsix-filename }}
|
|
||||||
asset_content_type: application/zip
|
|
||||||
|
|
||||||
- name: Upload Build Artifacts
|
- name: Upload Build Artifacts
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
|
|||||||
28
.github/workflows/pre-release.yml
vendored
28
.github/workflows/pre-release.yml
vendored
@@ -9,6 +9,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
# Only allow pre-releases on non-main branches
|
# Only allow pre-releases on non-main branches
|
||||||
if: github.ref != 'refs/heads/main'
|
if: github.ref != 'refs/heads/main'
|
||||||
|
environment: extension-release
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
@@ -35,9 +36,26 @@ jobs:
|
|||||||
|
|
||||||
- name: Enter RC mode (if not already in RC mode)
|
- name: Enter RC mode (if not already in RC mode)
|
||||||
run: |
|
run: |
|
||||||
# ensure we’re in the right pre-mode (tag "rc")
|
# Check if we're in pre-release mode with the "rc" tag
|
||||||
if [ ! -f .changeset/pre.json ] \
|
if [ -f .changeset/pre.json ]; then
|
||||||
|| [ "$(jq -r '.tag' .changeset/pre.json 2>/dev/null || echo '')" != "rc" ]; then
|
MODE=$(jq -r '.mode' .changeset/pre.json 2>/dev/null || echo '')
|
||||||
|
TAG=$(jq -r '.tag' .changeset/pre.json 2>/dev/null || echo '')
|
||||||
|
|
||||||
|
if [ "$MODE" = "exit" ]; then
|
||||||
|
echo "Pre-release mode is in 'exit' state, re-entering RC mode..."
|
||||||
|
npx changeset pre enter rc
|
||||||
|
elif [ "$MODE" = "pre" ] && [ "$TAG" != "rc" ]; then
|
||||||
|
echo "In pre-release mode but with wrong tag ($TAG), switching to RC..."
|
||||||
|
npx changeset pre exit
|
||||||
|
npx changeset pre enter rc
|
||||||
|
elif [ "$MODE" = "pre" ] && [ "$TAG" = "rc" ]; then
|
||||||
|
echo "Already in RC pre-release mode"
|
||||||
|
else
|
||||||
|
echo "Unknown mode state: $MODE, entering RC mode..."
|
||||||
|
npx changeset pre enter rc
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "No pre.json found, entering RC mode..."
|
||||||
npx changeset pre enter rc
|
npx changeset pre enter rc
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -50,10 +68,12 @@ jobs:
|
|||||||
- name: Create Release Candidate Pull Request or Publish Release Candidate to npm
|
- name: Create Release Candidate Pull Request or Publish Release Candidate to npm
|
||||||
uses: changesets/action@v1
|
uses: changesets/action@v1
|
||||||
with:
|
with:
|
||||||
publish: npm run release
|
publish: node ./.github/scripts/pre-release.mjs
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
|
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||||
|
VSCE_PAT: ${{ secrets.VSCE_PAT }}
|
||||||
|
OVSX_PAT: ${{ secrets.OVSX_PAT }}
|
||||||
|
|
||||||
- name: Commit & Push changes
|
- name: Commit & Push changes
|
||||||
uses: actions-js/push@master
|
uses: actions-js/push@master
|
||||||
|
|||||||
27
.github/workflows/release-check.yml
vendored
27
.github/workflows/release-check.yml
vendored
@@ -18,29 +18,4 @@ jobs:
|
|||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Check release mode
|
- name: Check release mode
|
||||||
run: |
|
run: node ./.github/scripts/check-pre-release-mode.mjs "pull_request"
|
||||||
set -euo pipefail
|
|
||||||
echo "🔍 Checking if branch is in pre-release mode..."
|
|
||||||
|
|
||||||
if [[ -f .changeset/pre.json ]]; then
|
|
||||||
if ! PRE_MODE=$(jq -r '.mode' .changeset/pre.json 2>/dev/null); then
|
|
||||||
echo "❌ ERROR: Unable to parse .changeset/pre.json – aborting merge."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
if [[ "$PRE_MODE" == "pre" ]]; then
|
|
||||||
echo "❌ ERROR: This branch is in active pre-release mode!"
|
|
||||||
echo ""
|
|
||||||
echo "Pre-release mode must be exited before merging to main."
|
|
||||||
echo ""
|
|
||||||
echo "To fix this, run the following commands in your branch:"
|
|
||||||
echo " npx changeset pre exit"
|
|
||||||
echo " git add -u"
|
|
||||||
echo " git commit -m 'chore: exit pre-release mode'"
|
|
||||||
echo " git push"
|
|
||||||
echo ""
|
|
||||||
echo "Then update this pull request."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "✅ Not in active pre-release mode - PR can be merged"
|
|
||||||
|
|||||||
22
.github/workflows/release.yml
vendored
22
.github/workflows/release.yml
vendored
@@ -39,30 +39,12 @@ jobs:
|
|||||||
timeout-minutes: 2
|
timeout-minutes: 2
|
||||||
|
|
||||||
- name: Check pre-release mode
|
- name: Check pre-release mode
|
||||||
run: |
|
run: node ./.github/scripts/check-pre-release-mode.mjs "main"
|
||||||
set -euo pipefail
|
|
||||||
echo "🔍 Checking pre-release mode status..."
|
|
||||||
if [[ -f .changeset/pre.json ]]; then
|
|
||||||
echo "❌ ERROR: Main branch is in pre-release mode!"
|
|
||||||
echo ""
|
|
||||||
echo "Pre-release mode should only be used on feature branches, not main."
|
|
||||||
echo ""
|
|
||||||
echo "To fix this, run the following commands locally:"
|
|
||||||
echo " npx changeset pre exit"
|
|
||||||
echo " git add -u"
|
|
||||||
echo " git commit -m 'chore: exit pre-release mode'"
|
|
||||||
echo " git push origin main"
|
|
||||||
echo ""
|
|
||||||
echo "Then re-run this workflow."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "✅ Not in pre-release mode - proceeding with release"
|
|
||||||
|
|
||||||
- name: Create Release Pull Request or Publish to npm
|
- name: Create Release Pull Request or Publish to npm
|
||||||
uses: changesets/action@v1
|
uses: changesets/action@v1
|
||||||
with:
|
with:
|
||||||
publish: ./.github/scripts/release.sh
|
publish: node ./.github/scripts/release.mjs
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
|
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||||
|
|||||||
343
.taskmaster/docs/tm-core-phase-1.txt
Normal file
343
.taskmaster/docs/tm-core-phase-1.txt
Normal file
@@ -0,0 +1,343 @@
|
|||||||
|
# Product Requirements Document: tm-core Package - Parse PRD Feature
|
||||||
|
|
||||||
|
## Project Overview
|
||||||
|
Create a TypeScript package named `tm-core` at `packages/tm-core` that implements parse-prd functionality using class-based architecture similar to the existing AI providers pattern.
|
||||||
|
|
||||||
|
## Design Patterns & Architecture
|
||||||
|
|
||||||
|
### Patterns to Apply
|
||||||
|
1. **Factory Pattern**: Use for `ProviderFactory` to create AI provider instances
|
||||||
|
2. **Strategy Pattern**: Use for `IAIProvider` implementations and `IStorage` implementations
|
||||||
|
3. **Facade Pattern**: Use for `TaskMasterCore` as the main API entry point
|
||||||
|
4. **Template Method Pattern**: Use for `BaseProvider` abstract class
|
||||||
|
5. **Dependency Injection**: Use throughout for testability (pass dependencies via constructor)
|
||||||
|
6. **Repository Pattern**: Use for `FileStorage` to abstract data persistence
|
||||||
|
|
||||||
|
### Naming Conventions
|
||||||
|
- **Files**: kebab-case (e.g., `task-parser.ts`, `file-storage.ts`)
|
||||||
|
- **Classes**: PascalCase (e.g., `TaskParser`, `FileStorage`)
|
||||||
|
- **Interfaces**: PascalCase with 'I' prefix (e.g., `IStorage`, `IAIProvider`)
|
||||||
|
- **Methods**: camelCase (e.g., `parsePRD`, `loadTasks`)
|
||||||
|
- **Constants**: UPPER_SNAKE_CASE (e.g., `DEFAULT_MODEL`)
|
||||||
|
- **Type aliases**: PascalCase (e.g., `TaskStatus`, `ParseOptions`)
|
||||||
|
|
||||||
|
## Exact Folder Structure Required
|
||||||
|
```
|
||||||
|
packages/tm-core/
|
||||||
|
├── src/
|
||||||
|
│ ├── index.ts
|
||||||
|
│ ├── types/
|
||||||
|
│ │ └── index.ts
|
||||||
|
│ ├── interfaces/
|
||||||
|
│ │ ├── index.ts # Barrel export
|
||||||
|
│ │ ├── storage.interface.ts
|
||||||
|
│ │ ├── ai-provider.interface.ts
|
||||||
|
│ │ └── configuration.interface.ts
|
||||||
|
│ ├── tasks/
|
||||||
|
│ │ ├── index.ts # Barrel export
|
||||||
|
│ │ └── task-parser.ts
|
||||||
|
│ ├── ai/
|
||||||
|
│ │ ├── index.ts # Barrel export
|
||||||
|
│ │ ├── base-provider.ts
|
||||||
|
│ │ ├── provider-factory.ts
|
||||||
|
│ │ ├── prompt-builder.ts
|
||||||
|
│ │ └── providers/
|
||||||
|
│ │ ├── index.ts # Barrel export
|
||||||
|
│ │ ├── anthropic-provider.ts
|
||||||
|
│ │ ├── openai-provider.ts
|
||||||
|
│ │ └── google-provider.ts
|
||||||
|
│ ├── storage/
|
||||||
|
│ │ ├── index.ts # Barrel export
|
||||||
|
│ │ └── file-storage.ts
|
||||||
|
│ ├── config/
|
||||||
|
│ │ ├── index.ts # Barrel export
|
||||||
|
│ │ └── config-manager.ts
|
||||||
|
│ ├── utils/
|
||||||
|
│ │ ├── index.ts # Barrel export
|
||||||
|
│ │ └── id-generator.ts
|
||||||
|
│ └── errors/
|
||||||
|
│ ├── index.ts # Barrel export
|
||||||
|
│ └── task-master-error.ts
|
||||||
|
├── tests/
|
||||||
|
│ ├── task-parser.test.ts
|
||||||
|
│ ├── integration/
|
||||||
|
│ │ └── parse-prd.test.ts
|
||||||
|
│ └── mocks/
|
||||||
|
│ └── mock-provider.ts
|
||||||
|
├── package.json
|
||||||
|
├── tsconfig.json
|
||||||
|
├── tsup.config.js
|
||||||
|
└── jest.config.js
|
||||||
|
```
|
||||||
|
|
||||||
|
## Specific Implementation Requirements
|
||||||
|
|
||||||
|
### 1. Create types/index.ts
|
||||||
|
Define these exact TypeScript interfaces:
|
||||||
|
- `Task` interface with fields: id, title, description, status, priority, complexity, dependencies, subtasks, metadata, createdAt, updatedAt, source
|
||||||
|
- `Subtask` interface with fields: id, title, description, completed
|
||||||
|
- `TaskMetadata` interface with fields: parsedFrom, aiProvider, version, tags (optional)
|
||||||
|
- Type literals: `TaskStatus` = 'pending' | 'in-progress' | 'completed' | 'blocked'
|
||||||
|
- Type literals: `TaskPriority` = 'low' | 'medium' | 'high' | 'critical'
|
||||||
|
- Type literals: `TaskComplexity` = 'simple' | 'moderate' | 'complex'
|
||||||
|
- `ParseOptions` interface with fields: dryRun (optional), additionalContext (optional), tag (optional), maxTasks (optional)
|
||||||
|
|
||||||
|
### 2. Create interfaces/storage.interface.ts
|
||||||
|
Define `IStorage` interface with these exact methods:
|
||||||
|
- `loadTasks(tag?: string): Promise<Task[]>`
|
||||||
|
- `saveTasks(tasks: Task[], tag?: string): Promise<void>`
|
||||||
|
- `appendTasks(tasks: Task[], tag?: string): Promise<void>`
|
||||||
|
- `updateTask(id: string, task: Partial<Task>, tag?: string): Promise<void>`
|
||||||
|
- `deleteTask(id: string, tag?: string): Promise<void>`
|
||||||
|
- `exists(tag?: string): Promise<boolean>`
|
||||||
|
|
||||||
|
### 3. Create interfaces/ai-provider.interface.ts
|
||||||
|
Define `IAIProvider` interface with these exact methods:
|
||||||
|
- `generateCompletion(prompt: string, options?: AIOptions): Promise<string>`
|
||||||
|
- `calculateTokens(text: string): number`
|
||||||
|
- `getName(): string`
|
||||||
|
- `getModel(): string`
|
||||||
|
|
||||||
|
Define `AIOptions` interface with fields: temperature (optional), maxTokens (optional), systemPrompt (optional)
|
||||||
|
|
||||||
|
### 4. Create interfaces/configuration.interface.ts
|
||||||
|
Define `IConfiguration` interface with fields:
|
||||||
|
- `projectPath: string`
|
||||||
|
- `aiProvider: string`
|
||||||
|
- `apiKey?: string`
|
||||||
|
- `aiOptions?: AIOptions`
|
||||||
|
- `mainModel?: string`
|
||||||
|
- `researchModel?: string`
|
||||||
|
- `fallbackModel?: string`
|
||||||
|
- `tasksPath?: string`
|
||||||
|
- `enableTags?: boolean`
|
||||||
|
|
||||||
|
### 5. Create tasks/task-parser.ts
|
||||||
|
Create class `TaskParser` with:
|
||||||
|
- Constructor accepting `aiProvider: IAIProvider` and `config: IConfiguration`
|
||||||
|
- Private property `promptBuilder: PromptBuilder`
|
||||||
|
- Public method `parsePRD(prdPath: string, options: ParseOptions = {}): Promise<Task[]>`
|
||||||
|
- Private method `readPRD(prdPath: string): Promise<string>`
|
||||||
|
- Private method `extractTasks(aiResponse: string): Partial<Task>[]`
|
||||||
|
- Private method `enrichTasks(rawTasks: Partial<Task>[], prdPath: string): Task[]`
|
||||||
|
- Apply **Dependency Injection** pattern via constructor
|
||||||
|
|
||||||
|
### 6. Create ai/base-provider.ts
|
||||||
|
Copy existing base-provider.js and convert to TypeScript abstract class:
|
||||||
|
- Abstract class `BaseProvider` implementing `IAIProvider`
|
||||||
|
- Protected properties: `apiKey: string`, `model: string`
|
||||||
|
- Constructor accepting `apiKey: string` and `options: { model?: string }`
|
||||||
|
- Abstract methods matching IAIProvider interface
|
||||||
|
- Abstract method `getDefaultModel(): string`
|
||||||
|
- Apply **Template Method** pattern for common provider logic
|
||||||
|
|
||||||
|
### 7. Create ai/provider-factory.ts
|
||||||
|
Create class `ProviderFactory` with:
|
||||||
|
- Static method `create(config: { provider: string; apiKey?: string; model?: string }): Promise<IAIProvider>`
|
||||||
|
- Switch statement for providers: 'anthropic', 'openai', 'google'
|
||||||
|
- Dynamic imports for each provider
|
||||||
|
- Throw error for unknown providers
|
||||||
|
- Apply **Factory** pattern for creating provider instances
|
||||||
|
|
||||||
|
Example implementation structure:
|
||||||
|
```typescript
|
||||||
|
switch (provider.toLowerCase()) {
|
||||||
|
case 'anthropic':
|
||||||
|
const { AnthropicProvider } = await import('./providers/anthropic-provider.js');
|
||||||
|
return new AnthropicProvider(apiKey, { model });
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 8. Create ai/providers/anthropic-provider.ts
|
||||||
|
Create class `AnthropicProvider` extending `BaseProvider`:
|
||||||
|
- Import Anthropic SDK: `import { Anthropic } from '@anthropic-ai/sdk'`
|
||||||
|
- Private property `client: Anthropic`
|
||||||
|
- Implement all abstract methods from BaseProvider
|
||||||
|
- Default model: 'claude-3-sonnet-20240229'
|
||||||
|
- Handle API errors and wrap with meaningful messages
|
||||||
|
|
||||||
|
### 9. Create ai/providers/openai-provider.ts (placeholder)
|
||||||
|
Create class `OpenAIProvider` extending `BaseProvider`:
|
||||||
|
- Import OpenAI SDK when implemented
|
||||||
|
- For now, throw error: "OpenAI provider not yet implemented"
|
||||||
|
|
||||||
|
### 10. Create ai/providers/google-provider.ts (placeholder)
|
||||||
|
Create class `GoogleProvider` extending `BaseProvider`:
|
||||||
|
- Import Google Generative AI SDK when implemented
|
||||||
|
- For now, throw error: "Google provider not yet implemented"
|
||||||
|
|
||||||
|
### 11. Create ai/prompt-builder.ts
|
||||||
|
Create class `PromptBuilder` with:
|
||||||
|
- Method `buildParsePrompt(prdContent: string, options: ParseOptions = {}): string`
|
||||||
|
- Method `buildExpandPrompt(task: string, context?: string): string`
|
||||||
|
- Use template literals for prompt construction
|
||||||
|
- Include specific JSON format instructions in prompts
|
||||||
|
|
||||||
|
### 9. Create storage/file-storage.ts
|
||||||
|
Create class `FileStorage` implementing `IStorage`:
|
||||||
|
- Private property `basePath: string` set to `{projectPath}/.taskmaster`
|
||||||
|
- Constructor accepting `projectPath: string`
|
||||||
|
- Private method `getTasksPath(tag?: string): string` returning correct path based on tag
|
||||||
|
- Private method `ensureDirectory(dir: string): Promise<void>`
|
||||||
|
- Implement all IStorage methods
|
||||||
|
- Handle ENOENT errors by returning empty arrays
|
||||||
|
- Use JSON format with structure: `{ tasks: Task[], metadata: { version: string, lastModified: string } }`
|
||||||
|
- Apply **Repository** pattern for data access abstraction
|
||||||
|
|
||||||
|
### 10. Create config/config-manager.ts
|
||||||
|
Create class `ConfigManager`:
|
||||||
|
- Private property `config: IConfiguration`
|
||||||
|
- Constructor accepting `options: Partial<IConfiguration>`
|
||||||
|
- Use Zod for validation with schema matching IConfiguration
|
||||||
|
- Method `get<K extends keyof IConfiguration>(key: K): IConfiguration[K]`
|
||||||
|
- Method `getAll(): IConfiguration`
|
||||||
|
- Method `validate(): boolean`
|
||||||
|
- Default values: projectPath = process.cwd(), aiProvider = 'anthropic', enableTags = true
|
||||||
|
|
||||||
|
### 11. Create utils/id-generator.ts
|
||||||
|
Export functions:
|
||||||
|
- `generateTaskId(index: number = 0): string` returning format `task_{timestamp}_{index}_{random}`
|
||||||
|
- `generateSubtaskId(parentId: string, index: number = 0): string` returning format `{parentId}_sub_{index}_{random}`
|
||||||
|
|
||||||
|
### 16. Create src/index.ts
|
||||||
|
Create main class `TaskMasterCore`:
|
||||||
|
- Private properties: `config: ConfigManager`, `storage: IStorage`, `aiProvider?: IAIProvider`, `parser?: TaskParser`
|
||||||
|
- Constructor accepting `options: Partial<IConfiguration>`
|
||||||
|
- Method `initialize(): Promise<void>` for lazy loading
|
||||||
|
- Method `parsePRD(prdPath: string, options: ParseOptions = {}): Promise<Task[]>`
|
||||||
|
- Method `getTasks(tag?: string): Promise<Task[]>`
|
||||||
|
- Apply **Facade** pattern to provide simple API over complex subsystems
|
||||||
|
|
||||||
|
Export:
|
||||||
|
- Class `TaskMasterCore`
|
||||||
|
- Function `createTaskMaster(options: Partial<IConfiguration>): TaskMasterCore`
|
||||||
|
- All types from './types'
|
||||||
|
- All interfaces from './interfaces/*'
|
||||||
|
|
||||||
|
Import statements should use kebab-case:
|
||||||
|
```typescript
|
||||||
|
import { TaskParser } from './tasks/task-parser';
|
||||||
|
import { FileStorage } from './storage/file-storage';
|
||||||
|
import { ConfigManager } from './config/config-manager';
|
||||||
|
import { ProviderFactory } from './ai/provider-factory';
|
||||||
|
```
|
||||||
|
|
||||||
|
### 17. Configure package.json
|
||||||
|
Create package.json with:
|
||||||
|
- name: "@task-master/core"
|
||||||
|
- version: "0.1.0"
|
||||||
|
- type: "module"
|
||||||
|
- main: "./dist/index.js"
|
||||||
|
- module: "./dist/index.mjs"
|
||||||
|
- types: "./dist/index.d.ts"
|
||||||
|
- exports map for proper ESM/CJS support
|
||||||
|
- scripts: build (tsup), dev (tsup --watch), test (jest), typecheck (tsc --noEmit)
|
||||||
|
- dependencies: zod@^3.23.8
|
||||||
|
- peerDependencies: @anthropic-ai/sdk, openai, @google/generative-ai
|
||||||
|
- devDependencies: typescript, tsup, jest, ts-jest, @types/node, @types/jest
|
||||||
|
|
||||||
|
### 18. Configure TypeScript
|
||||||
|
Create tsconfig.json with:
|
||||||
|
- target: "ES2022"
|
||||||
|
- module: "ESNext"
|
||||||
|
- strict: true (with all strict flags enabled)
|
||||||
|
- declaration: true
|
||||||
|
- outDir: "./dist"
|
||||||
|
- rootDir: "./src"
|
||||||
|
|
||||||
|
### 19. Configure tsup
|
||||||
|
Create tsup.config.js with:
|
||||||
|
- entry: ['src/index.ts']
|
||||||
|
- format: ['cjs', 'esm']
|
||||||
|
- dts: true
|
||||||
|
- sourcemap: true
|
||||||
|
- clean: true
|
||||||
|
- external: AI provider SDKs
|
||||||
|
|
||||||
|
### 20. Configure Jest
|
||||||
|
Create jest.config.js with:
|
||||||
|
- preset: 'ts-jest'
|
||||||
|
- testEnvironment: 'node'
|
||||||
|
- Coverage threshold: 80% for all metrics
|
||||||
|
|
||||||
|
## Build Process
|
||||||
|
1. Use tsup to compile TypeScript to both CommonJS and ESM
|
||||||
|
2. Generate .d.ts files for TypeScript consumers
|
||||||
|
3. Output to dist/ directory
|
||||||
|
4. Ensure tree-shaking works properly
|
||||||
|
|
||||||
|
## Testing Requirements
|
||||||
|
- Create unit tests for TaskParser in tests/task-parser.test.ts
|
||||||
|
- Create MockProvider class in tests/mocks/mock-provider.ts for testing without API calls
|
||||||
|
- Test error scenarios (file not found, invalid JSON, etc.)
|
||||||
|
- Create integration test in tests/integration/parse-prd.test.ts
|
||||||
|
- Follow kebab-case naming for all test files
|
||||||
|
|
||||||
|
## Success Criteria
|
||||||
|
- TypeScript compilation with zero errors
|
||||||
|
- No use of 'any' type
|
||||||
|
- All interfaces properly exported
|
||||||
|
- Compatible with existing tasks.json format
|
||||||
|
- Feature flag support via USE_TM_CORE environment variable
|
||||||
|
|
||||||
|
## Import/Export Conventions
|
||||||
|
- Use named exports for all classes and interfaces
|
||||||
|
- Use barrel exports (index.ts) in each directory
|
||||||
|
- Import types/interfaces with type-only imports: `import type { Task } from '../types'`
|
||||||
|
- Group imports in order: Node built-ins, external packages, internal packages, relative imports
|
||||||
|
- Use .js extension in import paths for ESM compatibility
|
||||||
|
|
||||||
|
## Error Handling Patterns
|
||||||
|
- Create custom error classes in `src/errors/` directory
|
||||||
|
- All public methods should catch and wrap errors with context
|
||||||
|
- Use error codes for different error types (e.g., 'FILE_NOT_FOUND', 'PARSE_ERROR')
|
||||||
|
- Never expose internal implementation details in error messages
|
||||||
|
- Log errors to console.error only in development mode
|
||||||
|
|
||||||
|
## Barrel Exports Content
|
||||||
|
|
||||||
|
### interfaces/index.ts
|
||||||
|
```typescript
|
||||||
|
export type { IStorage } from './storage.interface';
|
||||||
|
export type { IAIProvider, AIOptions } from './ai-provider.interface';
|
||||||
|
export type { IConfiguration } from './configuration.interface';
|
||||||
|
```
|
||||||
|
|
||||||
|
### tasks/index.ts
|
||||||
|
```typescript
|
||||||
|
export { TaskParser } from './task-parser';
|
||||||
|
```
|
||||||
|
|
||||||
|
### ai/index.ts
|
||||||
|
```typescript
|
||||||
|
export { BaseProvider } from './base-provider';
|
||||||
|
export { ProviderFactory } from './provider-factory';
|
||||||
|
export { PromptBuilder } from './prompt-builder';
|
||||||
|
```
|
||||||
|
|
||||||
|
### ai/providers/index.ts
|
||||||
|
```typescript
|
||||||
|
export { AnthropicProvider } from './anthropic-provider';
|
||||||
|
export { OpenAIProvider } from './openai-provider';
|
||||||
|
export { GoogleProvider } from './google-provider';
|
||||||
|
```
|
||||||
|
|
||||||
|
### storage/index.ts
|
||||||
|
```typescript
|
||||||
|
export { FileStorage } from './file-storage';
|
||||||
|
```
|
||||||
|
|
||||||
|
### config/index.ts
|
||||||
|
```typescript
|
||||||
|
export { ConfigManager } from './config-manager';
|
||||||
|
```
|
||||||
|
|
||||||
|
### utils/index.ts
|
||||||
|
```typescript
|
||||||
|
export { generateTaskId, generateSubtaskId } from './id-generator';
|
||||||
|
```
|
||||||
|
|
||||||
|
### errors/index.ts
|
||||||
|
```typescript
|
||||||
|
export { TaskMasterError } from './task-master-error';
|
||||||
|
```
|
||||||
171
CHANGELOG.md
171
CHANGELOG.md
@@ -1,5 +1,176 @@
|
|||||||
# task-master-ai
|
# task-master-ai
|
||||||
|
|
||||||
|
## 0.24.0
|
||||||
|
|
||||||
|
### Minor Changes
|
||||||
|
|
||||||
|
- [#1098](https://github.com/eyaltoledano/claude-task-master/pull/1098) [`36468f3`](https://github.com/eyaltoledano/claude-task-master/commit/36468f3c93faf4035a5c442ccbc501077f3440f1) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Enhanced Claude Code provider with codebase-aware task generation
|
||||||
|
- Added automatic codebase analysis for Claude Code provider in `parse-prd`, `expand-task`, and `analyze-complexity` commands
|
||||||
|
- When using Claude Code as the AI provider, Task Master now instructs the AI to analyze the project structure, existing implementations, and patterns before generating tasks or subtasks
|
||||||
|
- Tasks and subtasks generated by Claude Code are now informed by actual codebase analysis, resulting in more accurate and contextual outputs
|
||||||
|
|
||||||
|
- [#1105](https://github.com/eyaltoledano/claude-task-master/pull/1105) [`75c514c`](https://github.com/eyaltoledano/claude-task-master/commit/75c514cf5b2ca47f95c0ad7fa92654a4f2a6be4b) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Add GPT-5 support with proper parameter handling
|
||||||
|
- Added GPT-5 model to supported models configuration with SWE score of 0.749
|
||||||
|
|
||||||
|
- [#1091](https://github.com/eyaltoledano/claude-task-master/pull/1091) [`4bb6370`](https://github.com/eyaltoledano/claude-task-master/commit/4bb63706b80c28d1b2d782ba868a725326f916c7) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Add Claude Code subagent support with task-orchestrator, task-executor, and task-checker
|
||||||
|
|
||||||
|
## New Claude Code Agents
|
||||||
|
|
||||||
|
Added specialized agents for Claude Code users to enable parallel task execution, intelligent task orchestration, and quality assurance:
|
||||||
|
|
||||||
|
### task-orchestrator
|
||||||
|
|
||||||
|
Coordinates and manages the execution of Task Master tasks with intelligent dependency analysis:
|
||||||
|
- Analyzes task dependencies to identify parallelizable work
|
||||||
|
- Deploys multiple task-executor agents for concurrent execution
|
||||||
|
- Monitors task completion and updates the dependency graph
|
||||||
|
- Automatically identifies and starts newly unblocked tasks
|
||||||
|
|
||||||
|
### task-executor
|
||||||
|
|
||||||
|
Handles the actual implementation of individual tasks:
|
||||||
|
- Executes specific tasks identified by the orchestrator
|
||||||
|
- Works on concrete implementation rather than planning
|
||||||
|
- Updates task status and logs progress
|
||||||
|
- Can work in parallel with other executors on independent tasks
|
||||||
|
|
||||||
|
### task-checker
|
||||||
|
|
||||||
|
Verifies that completed tasks meet their specifications:
|
||||||
|
- Reviews tasks marked as 'review' status
|
||||||
|
- Validates implementation against requirements
|
||||||
|
- Runs tests and checks for best practices
|
||||||
|
- Ensures quality before marking tasks as 'done'
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
When using the Claude profile (`task-master rules add claude`), the agents are automatically installed to `.claude/agents/` directory.
|
||||||
|
|
||||||
|
## Usage Example
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# In Claude Code, after initializing a project with tasks:
|
||||||
|
|
||||||
|
# Use task-orchestrator to analyze and coordinate work
|
||||||
|
# The orchestrator will:
|
||||||
|
# 1. Check task dependencies
|
||||||
|
# 2. Identify tasks that can run in parallel
|
||||||
|
# 3. Deploy executors for available work
|
||||||
|
# 4. Monitor progress and deploy new executors as tasks complete
|
||||||
|
|
||||||
|
# Use task-executor for specific task implementation
|
||||||
|
# When the orchestrator identifies task 2.3 needs work:
|
||||||
|
# The executor will implement that specific task
|
||||||
|
```
|
||||||
|
|
||||||
|
## Benefits
|
||||||
|
- **Parallel Execution**: Multiple independent tasks can be worked on simultaneously
|
||||||
|
- **Intelligent Scheduling**: Orchestrator understands dependencies and optimizes execution order
|
||||||
|
- **Separation of Concerns**: Planning (orchestrator) is separated from execution (executor)
|
||||||
|
- **Progress Tracking**: Real-time updates as tasks are completed
|
||||||
|
- **Automatic Progression**: As tasks complete, newly unblocked tasks are automatically started
|
||||||
|
|
||||||
|
### Patch Changes
|
||||||
|
|
||||||
|
- [#1094](https://github.com/eyaltoledano/claude-task-master/pull/1094) [`4357af3`](https://github.com/eyaltoledano/claude-task-master/commit/4357af3f13859d90bca8795215e5d5f1d94abde5) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix expand task generating unrelated generic subtasks
|
||||||
|
|
||||||
|
Fixed an issue where `task-master expand` would generate generic authentication-related subtasks regardless of the parent task context when using complexity reports. The expansion now properly includes the parent task details alongside any expansion guidance.
|
||||||
|
|
||||||
|
- [#1079](https://github.com/eyaltoledano/claude-task-master/pull/1079) [`e495b2b`](https://github.com/eyaltoledano/claude-task-master/commit/e495b2b55950ee54c7d0f1817d8530e28bd79c05) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix scope-up/down prompts to include all required fields for better AI model compatibility
|
||||||
|
- Added missing `priority` field to scope adjustment prompts to prevent validation errors with Claude-code and other models
|
||||||
|
- Ensures generated JSON includes all fields required by the schema
|
||||||
|
|
||||||
|
- [#1079](https://github.com/eyaltoledano/claude-task-master/pull/1079) [`e495b2b`](https://github.com/eyaltoledano/claude-task-master/commit/e495b2b55950ee54c7d0f1817d8530e28bd79c05) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix MCP scope-up/down tools not finding tasks
|
||||||
|
- Fixed task ID parsing in MCP layer - now correctly converts string IDs to numbers
|
||||||
|
- scope_up_task and scope_down_task MCP tools now work properly
|
||||||
|
|
||||||
|
- [#1079](https://github.com/eyaltoledano/claude-task-master/pull/1079) [`e495b2b`](https://github.com/eyaltoledano/claude-task-master/commit/e495b2b55950ee54c7d0f1817d8530e28bd79c05) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Improve AI provider compatibility for JSON generation
|
||||||
|
- Fixed schema compatibility issues between Perplexity and OpenAI o3 models
|
||||||
|
- Removed nullable/default modifiers from Zod schemas for broader compatibility
|
||||||
|
- Added automatic JSON repair for malformed AI responses (handles cases like missing array values)
|
||||||
|
- Perplexity now uses JSON mode for more reliable structured output
|
||||||
|
- Post-processing handles default values separately from schema validation
|
||||||
|
|
||||||
|
## 0.24.0-rc.2
|
||||||
|
|
||||||
|
### Minor Changes
|
||||||
|
|
||||||
|
- [#1105](https://github.com/eyaltoledano/claude-task-master/pull/1105) [`75c514c`](https://github.com/eyaltoledano/claude-task-master/commit/75c514cf5b2ca47f95c0ad7fa92654a4f2a6be4b) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Add GPT-5 support with proper parameter handling
|
||||||
|
- Added GPT-5 model to supported models configuration with SWE score of 0.749
|
||||||
|
|
||||||
|
## 0.24.0-rc.1
|
||||||
|
|
||||||
|
### Minor Changes
|
||||||
|
|
||||||
|
- [#1093](https://github.com/eyaltoledano/claude-task-master/pull/1093) [`36468f3`](https://github.com/eyaltoledano/claude-task-master/commit/36468f3c93faf4035a5c442ccbc501077f3440f1) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Enhanced Claude Code provider with codebase-aware task generation
|
||||||
|
- Added automatic codebase analysis for Claude Code provider in `parse-prd`, `expand-task`, and `analyze-complexity` commands
|
||||||
|
- When using Claude Code as the AI provider, Task Master now instructs the AI to analyze the project structure, existing implementations, and patterns before generating tasks or subtasks
|
||||||
|
- Tasks and subtasks generated by Claude Code are now informed by actual codebase analysis, resulting in more accurate and contextual outputs
|
||||||
|
|
||||||
|
- [#1091](https://github.com/eyaltoledano/claude-task-master/pull/1091) [`4bb6370`](https://github.com/eyaltoledano/claude-task-master/commit/4bb63706b80c28d1b2d782ba868a725326f916c7) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Add Claude Code subagent support with task-orchestrator, task-executor, and task-checker
|
||||||
|
|
||||||
|
## New Claude Code Agents
|
||||||
|
|
||||||
|
Added specialized agents for Claude Code users to enable parallel task execution, intelligent task orchestration, and quality assurance:
|
||||||
|
|
||||||
|
### task-orchestrator
|
||||||
|
|
||||||
|
Coordinates and manages the execution of Task Master tasks with intelligent dependency analysis:
|
||||||
|
- Analyzes task dependencies to identify parallelizable work
|
||||||
|
- Deploys multiple task-executor agents for concurrent execution
|
||||||
|
- Monitors task completion and updates the dependency graph
|
||||||
|
- Automatically identifies and starts newly unblocked tasks
|
||||||
|
|
||||||
|
### task-executor
|
||||||
|
|
||||||
|
Handles the actual implementation of individual tasks:
|
||||||
|
- Executes specific tasks identified by the orchestrator
|
||||||
|
- Works on concrete implementation rather than planning
|
||||||
|
- Updates task status and logs progress
|
||||||
|
- Can work in parallel with other executors on independent tasks
|
||||||
|
|
||||||
|
### task-checker
|
||||||
|
|
||||||
|
Verifies that completed tasks meet their specifications:
|
||||||
|
- Reviews tasks marked as 'review' status
|
||||||
|
- Validates implementation against requirements
|
||||||
|
- Runs tests and checks for best practices
|
||||||
|
- Ensures quality before marking tasks as 'done'
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
When using the Claude profile (`task-master rules add claude`), the agents are automatically installed to `.claude/agents/` directory.
|
||||||
|
|
||||||
|
## Usage Example
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# In Claude Code, after initializing a project with tasks:
|
||||||
|
|
||||||
|
# Use task-orchestrator to analyze and coordinate work
|
||||||
|
# The orchestrator will:
|
||||||
|
# 1. Check task dependencies
|
||||||
|
# 2. Identify tasks that can run in parallel
|
||||||
|
# 3. Deploy executors for available work
|
||||||
|
# 4. Monitor progress and deploy new executors as tasks complete
|
||||||
|
|
||||||
|
# Use task-executor for specific task implementation
|
||||||
|
# When the orchestrator identifies task 2.3 needs work:
|
||||||
|
# The executor will implement that specific task
|
||||||
|
```
|
||||||
|
|
||||||
|
## Benefits
|
||||||
|
- **Parallel Execution**: Multiple independent tasks can be worked on simultaneously
|
||||||
|
- **Intelligent Scheduling**: Orchestrator understands dependencies and optimizes execution order
|
||||||
|
- **Separation of Concerns**: Planning (orchestrator) is separated from execution (executor)
|
||||||
|
- **Progress Tracking**: Real-time updates as tasks are completed
|
||||||
|
- **Automatic Progression**: As tasks complete, newly unblocked tasks are automatically started
|
||||||
|
|
||||||
|
### Patch Changes
|
||||||
|
|
||||||
|
- [#1094](https://github.com/eyaltoledano/claude-task-master/pull/1094) [`4357af3`](https://github.com/eyaltoledano/claude-task-master/commit/4357af3f13859d90bca8795215e5d5f1d94abde5) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix expand task generating unrelated generic subtasks
|
||||||
|
|
||||||
|
Fixed an issue where `task-master expand` would generate generic authentication-related subtasks regardless of the parent task context when using complexity reports. The expansion now properly includes the parent task details alongside any expansion guidance.
|
||||||
|
|
||||||
## 0.23.1-rc.0
|
## 0.23.1-rc.0
|
||||||
|
|
||||||
### Patch Changes
|
### Patch Changes
|
||||||
|
|||||||
@@ -3,3 +3,7 @@
|
|||||||
## Task Master AI Instructions
|
## Task Master AI Instructions
|
||||||
**Import Task Master's development workflow commands and guidelines, treat as if import is in the main CLAUDE.md file.**
|
**Import Task Master's development workflow commands and guidelines, treat as if import is in the main CLAUDE.md file.**
|
||||||
@./.taskmaster/CLAUDE.md
|
@./.taskmaster/CLAUDE.md
|
||||||
|
|
||||||
|
## Changeset Guidelines
|
||||||
|
|
||||||
|
- When creating changesets, remember that it's user-facing, meaning we don't have to get into the specifics of the code, but rather mention what the end-user is getting or fixing from this changeset.
|
||||||
35
README.md
35
README.md
@@ -1,14 +1,39 @@
|
|||||||
# Task Master [](https://github.com/eyaltoledano/claude-task-master/stargazers)
|
<a name="readme-top"></a>
|
||||||
|
|
||||||
[](https://github.com/eyaltoledano/claude-task-master/actions/workflows/ci.yml) [](https://badge.fury.io/js/task-master-ai) [](https://discord.gg/taskmasterai) [](LICENSE)
|
<div align='center'>
|
||||||
|
<a href="https://trendshift.io/repositories/13971" target="_blank"><img src="https://trendshift.io/api/badge/repositories/13971" alt="eyaltoledano%2Fclaude-task-master | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||||
|
</div>
|
||||||
|
|
||||||
[](https://www.npmjs.com/package/task-master-ai) [](https://www.npmjs.com/package/task-master-ai) [](https://www.npmjs.com/package/task-master-ai)
|
<p align="center">
|
||||||
|
<a href="https://task-master.dev"><img src="./images/logo.png?raw=true" alt="Taskmaster logo"></a>
|
||||||
|
</p>
|
||||||
|
|
||||||
## By [@eyaltoledano](https://x.com/eyaltoledano), [@RalphEcom](https://x.com/RalphEcom) & [@jasonzhou1993](https://x.com/jasonzhou1993)
|
<p align="center">
|
||||||
|
<b>Taskmaster</b>: A task management system for AI-driven development, designed to work seamlessly with any AI chat.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<a href="https://discord.gg/taskmasterai" target="_blank"><img src="https://dcbadge.limes.pink/api/server/https://discord.gg/taskmasterai?style=flat" alt="Discord"></a> |
|
||||||
|
<a href="https://docs.task-master.dev" target="_blank">Docs</a>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<a href="https://github.com/eyaltoledano/claude-task-master/actions/workflows/ci.yml"><img src="https://github.com/eyaltoledano/claude-task-master/actions/workflows/ci.yml/badge.svg" alt="CI"></a>
|
||||||
|
<a href="https://github.com/eyaltoledano/claude-task-master/stargazers"><img src="https://img.shields.io/github/stars/eyaltoledano/claude-task-master?style=social" alt="GitHub stars"></a>
|
||||||
|
<a href="https://badge.fury.io/js/task-master-ai"><img src="https://badge.fury.io/js/task-master-ai.svg" alt="npm version"></a>
|
||||||
|
<a href="LICENSE"><img src="https://img.shields.io/badge/license-MIT%20with%20Commons%20Clause-blue.svg" alt="License"></a>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<a href="https://www.npmjs.com/package/task-master-ai"><img src="https://img.shields.io/npm/d18m/task-master-ai?style=flat" alt="NPM Downloads"></a>
|
||||||
|
<a href="https://www.npmjs.com/package/task-master-ai"><img src="https://img.shields.io/npm/dm/task-master-ai?style=flat" alt="NPM Downloads"></a>
|
||||||
|
<a href="https://www.npmjs.com/package/task-master-ai"><img src="https://img.shields.io/npm/dw/task-master-ai?style=flat" alt="NPM Downloads"></a>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
## By [@eyaltoledano](https://x.com/eyaltoledano) & [@RalphEcom](https://x.com/RalphEcom)
|
||||||
|
|
||||||
[](https://x.com/eyaltoledano)
|
[](https://x.com/eyaltoledano)
|
||||||
[](https://x.com/RalphEcom)
|
[](https://x.com/RalphEcom)
|
||||||
[](https://x.com/jasonzhou1993)
|
|
||||||
|
|
||||||
A task management system for AI-driven development with Claude, designed to work seamlessly with Cursor AI.
|
A task management system for AI-driven development with Claude, designed to work seamlessly with Cursor AI.
|
||||||
|
|
||||||
|
|||||||
22
apps/docs/README.md
Normal file
22
apps/docs/README.md
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
# Task Master Documentation
|
||||||
|
|
||||||
|
Welcome to the Task Master documentation. Use the links below to navigate to the information you need:
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
|
||||||
|
- [Configuration Guide](archive/configuration.md) - Set up environment variables and customize Task Master
|
||||||
|
- [Tutorial](archive/ctutorial.md) - Step-by-step guide to getting started with Task Master
|
||||||
|
|
||||||
|
## Reference
|
||||||
|
|
||||||
|
- [Command Reference](archive/ccommand-reference.md) - Complete list of all available commands
|
||||||
|
- [Task Structure](archive/ctask-structure.md) - Understanding the task format and features
|
||||||
|
|
||||||
|
## Examples & Licensing
|
||||||
|
|
||||||
|
- [Example Interactions](archive/cexamples.md) - Common Cursor AI interaction examples
|
||||||
|
- [Licensing Information](archive/clicensing.md) - Detailed information about the license
|
||||||
|
|
||||||
|
## Need More Help?
|
||||||
|
|
||||||
|
If you can't find what you're looking for in these docs, please check the [main README](../README.md) or visit our [GitHub repository](https://github.com/eyaltoledano/claude-task-master).
|
||||||
114
apps/docs/archive/Installation.mdx
Normal file
114
apps/docs/archive/Installation.mdx
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
---
|
||||||
|
title: "Installation(2)"
|
||||||
|
description: "This guide walks you through setting up Task Master in your development environment."
|
||||||
|
---
|
||||||
|
|
||||||
|
## Initial Setup
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
MCP (Model Control Protocol) provides the easiest way to get started with Task Master directly in your editor.
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
<AccordionGroup>
|
||||||
|
<Accordion title="Option 1: Using MCP (Recommended)" icon="sparkles">
|
||||||
|
<Steps>
|
||||||
|
<Step title="Add the MCP config to your editor">
|
||||||
|
<Link href="https://cursor.sh">Cursor</Link> recommended, but it works with other text editors
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"mcpServers": {
|
||||||
|
"taskmaster-ai": {
|
||||||
|
"command": "npx",
|
||||||
|
"args": ["-y", "--package", "task-master-ai", "task-master-mcp"],
|
||||||
|
"env": {
|
||||||
|
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
|
||||||
|
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
|
||||||
|
"MODEL": "claude-3-7-sonnet-20250219",
|
||||||
|
"PERPLEXITY_MODEL": "sonar-pro",
|
||||||
|
"MAX_TOKENS": 128000,
|
||||||
|
"TEMPERATURE": 0.2,
|
||||||
|
"DEFAULT_SUBTASKS": 5,
|
||||||
|
"DEFAULT_PRIORITY": "medium"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
</Step>
|
||||||
|
<Step title="Enable the MCP in your editor settings">
|
||||||
|
|
||||||
|
</Step>
|
||||||
|
<Step title="Prompt the AI to initialize Task Master">
|
||||||
|
> "Can you please initialize taskmaster-ai into my project?"
|
||||||
|
|
||||||
|
**The AI will:**
|
||||||
|
|
||||||
|
1. Create necessary project structure
|
||||||
|
2. Set up initial configuration files
|
||||||
|
3. Guide you through the rest of the process
|
||||||
|
4. Place your PRD document in the `scripts/` directory (e.g., `scripts/prd.txt`)
|
||||||
|
5. **Use natural language commands** to interact with Task Master:
|
||||||
|
|
||||||
|
> "Can you parse my PRD at scripts/prd.txt?"
|
||||||
|
>
|
||||||
|
> "What's the next task I should work on?"
|
||||||
|
>
|
||||||
|
> "Can you help me implement task 3?"
|
||||||
|
</Step>
|
||||||
|
</Steps>
|
||||||
|
</Accordion>
|
||||||
|
<Accordion title="Option 2: Manual Installation">
|
||||||
|
If you prefer to use the command line interface directly:
|
||||||
|
|
||||||
|
<Steps>
|
||||||
|
<Step title="Install">
|
||||||
|
<CodeGroup>
|
||||||
|
|
||||||
|
```bash Global
|
||||||
|
npm install -g task-master-ai
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
```bash Local
|
||||||
|
npm install task-master-ai
|
||||||
|
```
|
||||||
|
|
||||||
|
</CodeGroup>
|
||||||
|
</Step>
|
||||||
|
<Step title="Initialize a new project">
|
||||||
|
<CodeGroup>
|
||||||
|
|
||||||
|
```bash Global
|
||||||
|
task-master init
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
```bash Local
|
||||||
|
npx task-master-init
|
||||||
|
```
|
||||||
|
|
||||||
|
</CodeGroup>
|
||||||
|
</Step>
|
||||||
|
</Steps>
|
||||||
|
This will prompt you for project details and set up a new project with the necessary files and structure.
|
||||||
|
</Accordion>
|
||||||
|
</AccordionGroup>
|
||||||
|
|
||||||
|
## Common Commands
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
After setting up Task Master, you can use these commands (either via AI prompts or CLI)
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Parse a PRD and generate tasks
|
||||||
|
task-master parse-prd your-prd.txt
|
||||||
|
|
||||||
|
# List all tasks
|
||||||
|
task-master list
|
||||||
|
|
||||||
|
# Show the next task to work on
|
||||||
|
task-master next
|
||||||
|
|
||||||
|
# Generate task files
|
||||||
|
task-master generate
|
||||||
263
apps/docs/archive/ai-client-utils-example.mdx
Normal file
263
apps/docs/archive/ai-client-utils-example.mdx
Normal file
@@ -0,0 +1,263 @@
|
|||||||
|
---
|
||||||
|
title: "AI Client Utilities for MCP Tools"
|
||||||
|
description: "This document provides examples of how to use the new AI client utilities with AsyncOperationManager in MCP tools."
|
||||||
|
---
|
||||||
|
## Examples
|
||||||
|
<AccordionGroup>
|
||||||
|
<Accordion title="Basic Usage with Direct Functions">
|
||||||
|
```javascript
|
||||||
|
// In your direct function implementation:
|
||||||
|
import {
|
||||||
|
getAnthropicClientForMCP,
|
||||||
|
getModelConfig,
|
||||||
|
handleClaudeError
|
||||||
|
} from '../utils/ai-client-utils.js';
|
||||||
|
|
||||||
|
export async function someAiOperationDirect(args, log, context) {
|
||||||
|
try {
|
||||||
|
// Initialize Anthropic client with session from context
|
||||||
|
const client = getAnthropicClientForMCP(context.session, log);
|
||||||
|
|
||||||
|
// Get model configuration with defaults or session overrides
|
||||||
|
const modelConfig = getModelConfig(context.session);
|
||||||
|
|
||||||
|
// Make API call with proper error handling
|
||||||
|
try {
|
||||||
|
const response = await client.messages.create({
|
||||||
|
model: modelConfig.model,
|
||||||
|
max_tokens: modelConfig.maxTokens,
|
||||||
|
temperature: modelConfig.temperature,
|
||||||
|
messages: [{ role: 'user', content: 'Your prompt here' }]
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
data: response
|
||||||
|
};
|
||||||
|
} catch (apiError) {
|
||||||
|
// Use helper to get user-friendly error message
|
||||||
|
const friendlyMessage = handleClaudeError(apiError);
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'AI_API_ERROR',
|
||||||
|
message: friendlyMessage
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
// Handle client initialization errors
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'AI_CLIENT_ERROR',
|
||||||
|
message: error.message
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Integration with AsyncOperationManager">
|
||||||
|
```javascript
|
||||||
|
// In your MCP tool implementation:
|
||||||
|
import {
|
||||||
|
AsyncOperationManager,
|
||||||
|
StatusCodes
|
||||||
|
} from '../../utils/async-operation-manager.js';
|
||||||
|
import { someAiOperationDirect } from '../../core/direct-functions/some-ai-operation.js';
|
||||||
|
|
||||||
|
export async function someAiOperation(args, context) {
|
||||||
|
const { session, mcpLog } = context;
|
||||||
|
const log = mcpLog || console;
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Create operation description
|
||||||
|
const operationDescription = `AI operation: ${args.someParam}`;
|
||||||
|
|
||||||
|
// Start async operation
|
||||||
|
const operation = AsyncOperationManager.createOperation(
|
||||||
|
operationDescription,
|
||||||
|
async (reportProgress) => {
|
||||||
|
try {
|
||||||
|
// Initial progress report
|
||||||
|
reportProgress({
|
||||||
|
progress: 0,
|
||||||
|
status: 'Starting AI operation...'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Call direct function with session and progress reporting
|
||||||
|
const result = await someAiOperationDirect(args, log, {
|
||||||
|
reportProgress,
|
||||||
|
mcpLog: log,
|
||||||
|
session
|
||||||
|
});
|
||||||
|
|
||||||
|
// Final progress update
|
||||||
|
reportProgress({
|
||||||
|
progress: 100,
|
||||||
|
status: result.success ? 'Operation completed' : 'Operation failed',
|
||||||
|
result: result.data,
|
||||||
|
error: result.error
|
||||||
|
});
|
||||||
|
|
||||||
|
return result;
|
||||||
|
} catch (error) {
|
||||||
|
// Handle errors in the operation
|
||||||
|
reportProgress({
|
||||||
|
progress: 100,
|
||||||
|
status: 'Operation failed',
|
||||||
|
error: {
|
||||||
|
message: error.message,
|
||||||
|
code: error.code || 'OPERATION_FAILED'
|
||||||
|
}
|
||||||
|
});
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
// Return immediate response with operation ID
|
||||||
|
return {
|
||||||
|
status: StatusCodes.ACCEPTED,
|
||||||
|
body: {
|
||||||
|
success: true,
|
||||||
|
message: 'Operation started',
|
||||||
|
operationId: operation.id
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
// Handle errors in the MCP tool
|
||||||
|
log.error(`Error in someAiOperation: ${error.message}`);
|
||||||
|
return {
|
||||||
|
status: StatusCodes.INTERNAL_SERVER_ERROR,
|
||||||
|
body: {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'OPERATION_FAILED',
|
||||||
|
message: error.message
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Using Research Capabilities with Perplexity">
|
||||||
|
```javascript
|
||||||
|
// In your direct function:
|
||||||
|
import {
|
||||||
|
getPerplexityClientForMCP,
|
||||||
|
getBestAvailableAIModel
|
||||||
|
} from '../utils/ai-client-utils.js';
|
||||||
|
|
||||||
|
export async function researchOperationDirect(args, log, context) {
|
||||||
|
try {
|
||||||
|
// Get the best AI model for this operation based on needs
|
||||||
|
const { type, client } = await getBestAvailableAIModel(
|
||||||
|
context.session,
|
||||||
|
{ requiresResearch: true },
|
||||||
|
log
|
||||||
|
);
|
||||||
|
|
||||||
|
// Report which model we're using
|
||||||
|
if (context.reportProgress) {
|
||||||
|
await context.reportProgress({
|
||||||
|
progress: 10,
|
||||||
|
status: `Using ${type} model for research...`
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make API call based on the model type
|
||||||
|
if (type === 'perplexity') {
|
||||||
|
// Call Perplexity
|
||||||
|
const response = await client.chat.completions.create({
|
||||||
|
model: context.session?.env?.PERPLEXITY_MODEL || 'sonar-medium-online',
|
||||||
|
messages: [{ role: 'user', content: args.researchQuery }],
|
||||||
|
temperature: 0.1
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
data: response.choices[0].message.content
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
// Call Claude as fallback
|
||||||
|
// (Implementation depends on specific needs)
|
||||||
|
// ...
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
// Handle errors
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'RESEARCH_ERROR',
|
||||||
|
message: error.message
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Model Configuration Override">
|
||||||
|
```javascript
|
||||||
|
// In your direct function:
|
||||||
|
import { getModelConfig } from '../utils/ai-client-utils.js';
|
||||||
|
|
||||||
|
// Using custom defaults for a specific operation
|
||||||
|
const operationDefaults = {
|
||||||
|
model: 'claude-3-haiku-20240307', // Faster, smaller model
|
||||||
|
maxTokens: 1000, // Lower token limit
|
||||||
|
temperature: 0.2 // Lower temperature for more deterministic output
|
||||||
|
};
|
||||||
|
|
||||||
|
// Get model config with operation-specific defaults
|
||||||
|
const modelConfig = getModelConfig(context.session, operationDefaults);
|
||||||
|
|
||||||
|
// Now use modelConfig in your API calls
|
||||||
|
const response = await client.messages.create({
|
||||||
|
model: modelConfig.model,
|
||||||
|
max_tokens: modelConfig.maxTokens,
|
||||||
|
temperature: modelConfig.temperature
|
||||||
|
// Other parameters...
|
||||||
|
});
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
</AccordionGroup>
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
<AccordionGroup>
|
||||||
|
<Accordion title="Error Handling">
|
||||||
|
- Always use try/catch blocks around both client initialization and API calls
|
||||||
|
- Use `handleClaudeError` to provide user-friendly error messages
|
||||||
|
- Return standardized error objects with code and message
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Progress Reporting">
|
||||||
|
- Report progress at key points (starting, processing, completing)
|
||||||
|
- Include meaningful status messages
|
||||||
|
- Include error details in progress reports when failures occur
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Session Handling">
|
||||||
|
- Always pass the session from the context to the AI client getters
|
||||||
|
- Use `getModelConfig` to respect user settings from session
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Model Selection">
|
||||||
|
- Use `getBestAvailableAIModel` when you need to select between different models
|
||||||
|
- Set `requiresResearch: true` when you need Perplexity capabilities
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="AsyncOperationManager Integration">
|
||||||
|
- Create descriptive operation names
|
||||||
|
- Handle all errors within the operation function
|
||||||
|
- Return standardized results from direct functions
|
||||||
|
- Return immediate responses with operation IDs
|
||||||
|
</Accordion>
|
||||||
|
</AccordionGroup>
|
||||||
180
apps/docs/archive/ai-development-workflow.mdx
Normal file
180
apps/docs/archive/ai-development-workflow.mdx
Normal file
@@ -0,0 +1,180 @@
|
|||||||
|
---
|
||||||
|
title: "AI Development Workflow"
|
||||||
|
description: "Learn how Task Master and Cursor AI work together to streamline your development workflow"
|
||||||
|
---
|
||||||
|
|
||||||
|
<Tip>The Cursor agent is pre-configured (via the rules file) to follow this workflow</Tip>
|
||||||
|
|
||||||
|
<AccordionGroup>
|
||||||
|
<Accordion title="1. Task Discovery and Selection">
|
||||||
|
Ask the agent to list available tasks:
|
||||||
|
|
||||||
|
```
|
||||||
|
What tasks are available to work on next?
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will:
|
||||||
|
|
||||||
|
- Run `task-master list` to see all tasks
|
||||||
|
- Run `task-master next` to determine the next task to work on
|
||||||
|
- Analyze dependencies to determine which tasks are ready to be worked on
|
||||||
|
- Prioritize tasks based on priority level and ID order
|
||||||
|
- Suggest the next task(s) to implement
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="2. Task Implementation">
|
||||||
|
When implementing a task, the agent will:
|
||||||
|
|
||||||
|
- Reference the task's details section for implementation specifics
|
||||||
|
- Consider dependencies on previous tasks
|
||||||
|
- Follow the project's coding standards
|
||||||
|
- Create appropriate tests based on the task's testStrategy
|
||||||
|
|
||||||
|
You can ask:
|
||||||
|
|
||||||
|
```
|
||||||
|
Let's implement task 3. What does it involve?
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="3. Task Verification">
|
||||||
|
Before marking a task as complete, verify it according to:
|
||||||
|
|
||||||
|
- The task's specified testStrategy
|
||||||
|
- Any automated tests in the codebase
|
||||||
|
- Manual verification if required
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="4. Task Completion">
|
||||||
|
When a task is completed, tell the agent:
|
||||||
|
|
||||||
|
```
|
||||||
|
Task 3 is now complete. Please update its status.
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will execute:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master set-status --id=3 --status=done
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="5. Handling Implementation Drift">
|
||||||
|
If during implementation, you discover that:
|
||||||
|
|
||||||
|
- The current approach differs significantly from what was planned
|
||||||
|
- Future tasks need to be modified due to current implementation choices
|
||||||
|
- New dependencies or requirements have emerged
|
||||||
|
|
||||||
|
Tell the agent:
|
||||||
|
|
||||||
|
```
|
||||||
|
We've changed our approach. We're now using Express instead of Fastify. Please update all future tasks to reflect this change.
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will execute:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master update --from=4 --prompt="Now we are using Express instead of Fastify."
|
||||||
|
```
|
||||||
|
|
||||||
|
This will rewrite or re-scope subsequent tasks in tasks.json while preserving completed work.
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="6. Breaking Down Complex Tasks">
|
||||||
|
For complex tasks that need more granularity:
|
||||||
|
|
||||||
|
```
|
||||||
|
Task 5 seems complex. Can you break it down into subtasks?
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will execute:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master expand --id=5 --num=3
|
||||||
|
```
|
||||||
|
|
||||||
|
You can provide additional context:
|
||||||
|
|
||||||
|
```
|
||||||
|
Please break down task 5 with a focus on security considerations.
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will execute:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master expand --id=5 --prompt="Focus on security aspects"
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also expand all pending tasks:
|
||||||
|
|
||||||
|
```
|
||||||
|
Please break down all pending tasks into subtasks.
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will execute:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master expand --all
|
||||||
|
```
|
||||||
|
|
||||||
|
For research-backed subtask generation using Perplexity AI:
|
||||||
|
|
||||||
|
```
|
||||||
|
Please break down task 5 using research-backed generation.
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will execute:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master expand --id=5 --research
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
</AccordionGroup>
|
||||||
|
|
||||||
|
## Example Cursor AI Interactions
|
||||||
|
|
||||||
|
<AccordionGroup>
|
||||||
|
<Accordion title="Starting a new project">
|
||||||
|
```
|
||||||
|
I've just initialized a new project with Claude Task Master. I have a PRD at scripts/prd.txt.
|
||||||
|
Can you help me parse it and set up the initial tasks?
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
<Accordion title="Working on tasks">
|
||||||
|
```
|
||||||
|
What's the next task I should work on? Please consider dependencies and priorities.
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
<Accordion title="Implementing a specific task">
|
||||||
|
```
|
||||||
|
I'd like to implement task 4. Can you help me understand what needs to be done and how to approach it?
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
<Accordion title="Managing subtasks">
|
||||||
|
```
|
||||||
|
I need to regenerate the subtasks for task 3 with a different approach. Can you help me clear and regenerate them?
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
<Accordion title="Handling changes">
|
||||||
|
```
|
||||||
|
We've decided to use MongoDB instead of PostgreSQL. Can you update all future tasks to reflect this change?
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
<Accordion title="Completing work">
|
||||||
|
```
|
||||||
|
I've finished implementing the authentication system described in task 2. All tests are passing.
|
||||||
|
Please mark it as complete and tell me what I should work on next.
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
<Accordion title="Analyzing complexity">
|
||||||
|
```
|
||||||
|
Can you analyze the complexity of our tasks to help me understand which ones need to be broken down further?
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
<Accordion title="Viewing complexity report">
|
||||||
|
```
|
||||||
|
Can you show me the complexity report in a more readable format?
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
</AccordionGroup>
|
||||||
208
apps/docs/archive/command-reference.mdx
Normal file
208
apps/docs/archive/command-reference.mdx
Normal file
@@ -0,0 +1,208 @@
|
|||||||
|
---
|
||||||
|
title: "Task Master Commands"
|
||||||
|
description: "A comprehensive reference of all available Task Master commands"
|
||||||
|
---
|
||||||
|
|
||||||
|
<AccordionGroup>
|
||||||
|
<Accordion title="Parse PRD">
|
||||||
|
```bash
|
||||||
|
# Parse a PRD file and generate tasks
|
||||||
|
task-master parse-prd <prd-file.txt>
|
||||||
|
|
||||||
|
# Limit the number of tasks generated
|
||||||
|
task-master parse-prd <prd-file.txt> --num-tasks=10
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="List Tasks">
|
||||||
|
```bash
|
||||||
|
# List all tasks
|
||||||
|
task-master list
|
||||||
|
|
||||||
|
# List tasks with a specific status
|
||||||
|
task-master list --status=<status>
|
||||||
|
|
||||||
|
# List tasks with subtasks
|
||||||
|
task-master list --with-subtasks
|
||||||
|
|
||||||
|
# List tasks with a specific status and include subtasks
|
||||||
|
task-master list --status=<status> --with-subtasks
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Show Next Task">
|
||||||
|
```bash
|
||||||
|
# Show the next task to work on based on dependencies and status
|
||||||
|
task-master next
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Show Specific Task">
|
||||||
|
```bash
|
||||||
|
# Show details of a specific task
|
||||||
|
task-master show <id>
|
||||||
|
# or
|
||||||
|
task-master show --id=<id>
|
||||||
|
|
||||||
|
# View a specific subtask (e.g., subtask 2 of task 1)
|
||||||
|
task-master show 1.2
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Update Tasks">
|
||||||
|
```bash
|
||||||
|
# Update tasks from a specific ID and provide context
|
||||||
|
task-master update --from=<id> --prompt="<prompt>"
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Update a Specific Task">
|
||||||
|
```bash
|
||||||
|
# Update a single task by ID with new information
|
||||||
|
task-master update-task --id=<id> --prompt="<prompt>"
|
||||||
|
|
||||||
|
# Use research-backed updates with Perplexity AI
|
||||||
|
task-master update-task --id=<id> --prompt="<prompt>" --research
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Update a Subtask">
|
||||||
|
```bash
|
||||||
|
# Append additional information to a specific subtask
|
||||||
|
task-master update-subtask --id=<parentId.subtaskId> --prompt="<prompt>"
|
||||||
|
|
||||||
|
# Example: Add details about API rate limiting to subtask 2 of task 5
|
||||||
|
task-master update-subtask --id=5.2 --prompt="Add rate limiting of 100 requests per minute"
|
||||||
|
|
||||||
|
# Use research-backed updates with Perplexity AI
|
||||||
|
task-master update-subtask --id=<parentId.subtaskId> --prompt="<prompt>" --research
|
||||||
|
```
|
||||||
|
|
||||||
|
Unlike the `update-task` command which replaces task information, the `update-subtask` command _appends_ new information to the existing subtask details, marking it with a timestamp. This is useful for iteratively enhancing subtasks while preserving the original content.
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Generate Task Files">
|
||||||
|
```bash
|
||||||
|
# Generate individual task files from tasks.json
|
||||||
|
task-master generate
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Set Task Status">
|
||||||
|
```bash
|
||||||
|
# Set status of a single task
|
||||||
|
task-master set-status --id=<id> --status=<status>
|
||||||
|
|
||||||
|
# Set status for multiple tasks
|
||||||
|
task-master set-status --id=1,2,3 --status=<status>
|
||||||
|
|
||||||
|
# Set status for subtasks
|
||||||
|
task-master set-status --id=1.1,1.2 --status=<status>
|
||||||
|
```
|
||||||
|
|
||||||
|
When marking a task as "done", all of its subtasks will automatically be marked as "done" as well.
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Expand Tasks">
|
||||||
|
```bash
|
||||||
|
# Expand a specific task with subtasks
|
||||||
|
task-master expand --id=<id> --num=<number>
|
||||||
|
|
||||||
|
# Expand with additional context
|
||||||
|
task-master expand --id=<id> --prompt="<context>"
|
||||||
|
|
||||||
|
# Expand all pending tasks
|
||||||
|
task-master expand --all
|
||||||
|
|
||||||
|
# Force regeneration of subtasks for tasks that already have them
|
||||||
|
task-master expand --all --force
|
||||||
|
|
||||||
|
# Research-backed subtask generation for a specific task
|
||||||
|
task-master expand --id=<id> --research
|
||||||
|
|
||||||
|
# Research-backed generation for all tasks
|
||||||
|
task-master expand --all --research
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Clear Subtasks">
|
||||||
|
```bash
|
||||||
|
# Clear subtasks from a specific task
|
||||||
|
task-master clear-subtasks --id=<id>
|
||||||
|
|
||||||
|
# Clear subtasks from multiple tasks
|
||||||
|
task-master clear-subtasks --id=1,2,3
|
||||||
|
|
||||||
|
# Clear subtasks from all tasks
|
||||||
|
task-master clear-subtasks --all
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Analyze Task Complexity">
|
||||||
|
```bash
|
||||||
|
# Analyze complexity of all tasks
|
||||||
|
task-master analyze-complexity
|
||||||
|
|
||||||
|
# Save report to a custom location
|
||||||
|
task-master analyze-complexity --output=my-report.json
|
||||||
|
|
||||||
|
# Use a specific LLM model
|
||||||
|
task-master analyze-complexity --model=claude-3-opus-20240229
|
||||||
|
|
||||||
|
# Set a custom complexity threshold (1-10)
|
||||||
|
task-master analyze-complexity --threshold=6
|
||||||
|
|
||||||
|
# Use an alternative tasks file
|
||||||
|
task-master analyze-complexity --file=custom-tasks.json
|
||||||
|
|
||||||
|
# Use Perplexity AI for research-backed complexity analysis
|
||||||
|
task-master analyze-complexity --research
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="View Complexity Report">
|
||||||
|
```bash
|
||||||
|
# Display the task complexity analysis report
|
||||||
|
task-master complexity-report
|
||||||
|
|
||||||
|
# View a report at a custom location
|
||||||
|
task-master complexity-report --file=my-report.json
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Managing Task Dependencies">
|
||||||
|
```bash
|
||||||
|
# Add a dependency to a task
|
||||||
|
task-master add-dependency --id=<id> --depends-on=<id>
|
||||||
|
|
||||||
|
# Remove a dependency from a task
|
||||||
|
task-master remove-dependency --id=<id> --depends-on=<id>
|
||||||
|
|
||||||
|
# Validate dependencies without fixing them
|
||||||
|
task-master validate-dependencies
|
||||||
|
|
||||||
|
# Find and fix invalid dependencies automatically
|
||||||
|
task-master fix-dependencies
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Add a New Task">
|
||||||
|
```bash
|
||||||
|
# Add a new task using AI
|
||||||
|
task-master add-task --prompt="Description of the new task"
|
||||||
|
|
||||||
|
# Add a task with dependencies
|
||||||
|
task-master add-task --prompt="Description" --dependencies=1,2,3
|
||||||
|
|
||||||
|
# Add a task with priority
|
||||||
|
task-master add-task --prompt="Description" --priority=high
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Initialize a Project">
|
||||||
|
```bash
|
||||||
|
# Initialize a new project with Task Master structure
|
||||||
|
task-master init
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
</AccordionGroup>
|
||||||
80
apps/docs/archive/configuration.mdx
Normal file
80
apps/docs/archive/configuration.mdx
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
---
|
||||||
|
title: "Configuration"
|
||||||
|
description: "Configure Task Master through environment variables in a .env file"
|
||||||
|
---
|
||||||
|
|
||||||
|
## Required Configuration
|
||||||
|
|
||||||
|
<Note>
|
||||||
|
Task Master requires an Anthropic API key to function. Add this to your `.env` file:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ANTHROPIC_API_KEY=sk-ant-api03-your-api-key
|
||||||
|
```
|
||||||
|
|
||||||
|
You can obtain an API key from the [Anthropic Console](https://console.anthropic.com/).
|
||||||
|
</Note>
|
||||||
|
|
||||||
|
## Optional Configuration
|
||||||
|
|
||||||
|
| Variable | Default Value | Description | Example |
|
||||||
|
| --- | --- | --- | --- |
|
||||||
|
| `MODEL` | `"claude-3-7-sonnet-20250219"` | Claude model to use | `MODEL=claude-3-opus-20240229` |
|
||||||
|
| `MAX_TOKENS` | `"4000"` | Maximum tokens for responses | `MAX_TOKENS=8000` |
|
||||||
|
| `TEMPERATURE` | `"0.7"` | Temperature for model responses | `TEMPERATURE=0.5` |
|
||||||
|
| `DEBUG` | `"false"` | Enable debug logging | `DEBUG=true` |
|
||||||
|
| `LOG_LEVEL` | `"info"` | Console output level | `LOG_LEVEL=debug` |
|
||||||
|
| `DEFAULT_SUBTASKS` | `"3"` | Default subtask count | `DEFAULT_SUBTASKS=5` |
|
||||||
|
| `DEFAULT_PRIORITY` | `"medium"` | Default priority | `DEFAULT_PRIORITY=high` |
|
||||||
|
| `PROJECT_NAME` | `"MCP SaaS MVP"` | Project name in metadata | `PROJECT_NAME=My Awesome Project` |
|
||||||
|
| `PROJECT_VERSION` | `"1.0.0"` | Version in metadata | `PROJECT_VERSION=2.1.0` |
|
||||||
|
| `PERPLEXITY_API_KEY` | - | For research-backed features | `PERPLEXITY_API_KEY=pplx-...` |
|
||||||
|
| `PERPLEXITY_MODEL` | `"sonar-medium-online"` | Perplexity model | `PERPLEXITY_MODEL=sonar-large-online` |
|
||||||
|
|
||||||
|
## Example .env File
|
||||||
|
|
||||||
|
```
|
||||||
|
# Required
|
||||||
|
ANTHROPIC_API_KEY=sk-ant-api03-your-api-key
|
||||||
|
|
||||||
|
# Optional - Claude Configuration
|
||||||
|
MODEL=claude-3-7-sonnet-20250219
|
||||||
|
MAX_TOKENS=4000
|
||||||
|
TEMPERATURE=0.7
|
||||||
|
|
||||||
|
# Optional - Perplexity API for Research
|
||||||
|
PERPLEXITY_API_KEY=pplx-your-api-key
|
||||||
|
PERPLEXITY_MODEL=sonar-medium-online
|
||||||
|
|
||||||
|
# Optional - Project Info
|
||||||
|
PROJECT_NAME=My Project
|
||||||
|
PROJECT_VERSION=1.0.0
|
||||||
|
|
||||||
|
# Optional - Application Configuration
|
||||||
|
DEFAULT_SUBTASKS=3
|
||||||
|
DEFAULT_PRIORITY=medium
|
||||||
|
DEBUG=false
|
||||||
|
LOG_LEVEL=info
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### If `task-master init` doesn't respond:
|
||||||
|
|
||||||
|
Try running it with Node directly:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
node node_modules/claude-task-master/scripts/init.js
|
||||||
|
```
|
||||||
|
|
||||||
|
Or clone the repository and run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/eyaltoledano/claude-task-master.git
|
||||||
|
cd claude-task-master
|
||||||
|
node scripts/init.js
|
||||||
|
```
|
||||||
|
|
||||||
|
<Note>
|
||||||
|
For advanced configuration options and detailed customization, see our [Advanced Configuration Guide] page.
|
||||||
|
</Note>
|
||||||
95
apps/docs/archive/cursor-setup.mdx
Normal file
95
apps/docs/archive/cursor-setup.mdx
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
---
|
||||||
|
title: "Cursor AI Integration"
|
||||||
|
description: "Learn how to set up and use Task Master with Cursor AI"
|
||||||
|
---
|
||||||
|
|
||||||
|
## Setting up Cursor AI Integration
|
||||||
|
|
||||||
|
<Check>
|
||||||
|
Task Master is designed to work seamlessly with [Cursor AI](https://www.cursor.so/), providing a structured workflow for AI-driven development.
|
||||||
|
</Check>
|
||||||
|
|
||||||
|
<AccordionGroup>
|
||||||
|
<Accordion title="Using Cursor with MCP (Recommended)" icon="sparkles">
|
||||||
|
If you've already set up Task Master with MCP in Cursor, the integration is automatic. You can simply use natural language to interact with Task Master:
|
||||||
|
|
||||||
|
```
|
||||||
|
What tasks are available to work on next?
|
||||||
|
Can you analyze the complexity of our tasks?
|
||||||
|
I'd like to implement task 4. What does it involve?
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
<Accordion title="Manual Cursor Setup">
|
||||||
|
If you're not using MCP, you can still set up Cursor integration:
|
||||||
|
|
||||||
|
<Steps>
|
||||||
|
<Step title="After initializing your project, open it in Cursor">
|
||||||
|
The `.cursor/rules/dev_workflow.mdc` file is automatically loaded by Cursor, providing the AI with knowledge about the task management system
|
||||||
|
</Step>
|
||||||
|
<Step title="Place your PRD document in the scripts/ directory (e.g., scripts/prd.txt)">
|
||||||
|
|
||||||
|
</Step>
|
||||||
|
<Step title="Open Cursor's AI chat and switch to Agent mode">
|
||||||
|
|
||||||
|
</Step>
|
||||||
|
</Steps>
|
||||||
|
</Accordion>
|
||||||
|
<Accordion title="Alternative MCP Setup in Cursor">
|
||||||
|
<Steps>
|
||||||
|
<Step title="Go to Cursor settings">
|
||||||
|
|
||||||
|
</Step>
|
||||||
|
<Step title="Navigate to the MCP section">
|
||||||
|
|
||||||
|
</Step>
|
||||||
|
<Step title="Click on 'Add New MCP Server'">
|
||||||
|
|
||||||
|
</Step>
|
||||||
|
<Step title="Configure with the following details:">
|
||||||
|
- Name: "Task Master"
|
||||||
|
- Type: "Command"
|
||||||
|
- Command: "npx -y --package task-master-ai task-master-mcp"
|
||||||
|
</Step>
|
||||||
|
<Step title="Save Settings">
|
||||||
|
|
||||||
|
</Step>
|
||||||
|
</Steps>
|
||||||
|
Once configured, you can interact with Task Master's task management commands directly through Cursor's interface, providing a more integrated experience.
|
||||||
|
</Accordion>
|
||||||
|
</AccordionGroup>
|
||||||
|
|
||||||
|
## Initial Task Generation
|
||||||
|
|
||||||
|
In Cursor's AI chat, instruct the agent to generate tasks from your PRD:
|
||||||
|
|
||||||
|
```
|
||||||
|
Please use the task-master parse-prd command to generate tasks from my PRD. The PRD is located at scripts/prd.txt.
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will execute:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master parse-prd scripts/prd.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
This will:
|
||||||
|
|
||||||
|
- Parse your PRD document
|
||||||
|
- Generate a structured `tasks.json` file with tasks, dependencies, priorities, and test strategies
|
||||||
|
- The agent will understand this process due to the Cursor rules
|
||||||
|
|
||||||
|
### Generate Individual Task Files
|
||||||
|
|
||||||
|
Next, ask the agent to generate individual task files:
|
||||||
|
|
||||||
|
```
|
||||||
|
Please generate individual task files from tasks.json
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will execute:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master generate
|
||||||
|
```
|
||||||
|
|
||||||
|
This creates individual task files in the `tasks/` directory (e.g., `task_001.txt`, `task_002.txt`), making it easier to reference specific tasks.
|
||||||
56
apps/docs/archive/examples.mdx
Normal file
56
apps/docs/archive/examples.mdx
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
---
|
||||||
|
title: "Example Cursor AI Interactions"
|
||||||
|
description: "Below are some common interactions with Cursor AI when using Task Master"
|
||||||
|
---
|
||||||
|
|
||||||
|
<AccordionGroup>
|
||||||
|
<Accordion title="Starting a new project">
|
||||||
|
```
|
||||||
|
I've just initialized a new project with Claude Task Master. I have a PRD at scripts/prd.txt.
|
||||||
|
Can you help me parse it and set up the initial tasks?
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Working on tasks">
|
||||||
|
```
|
||||||
|
What's the next task I should work on? Please consider dependencies and priorities.
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Implementing a specific task">
|
||||||
|
```
|
||||||
|
I'd like to implement task 4. Can you help me understand what needs to be done and how to approach it?
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Managing subtasks">
|
||||||
|
```
|
||||||
|
I need to regenerate the subtasks for task 3 with a different approach. Can you help me clear and regenerate them?
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Handling changes">
|
||||||
|
```
|
||||||
|
We've decided to use MongoDB instead of PostgreSQL. Can you update all future tasks to reflect this change?
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Completing work">
|
||||||
|
```
|
||||||
|
I've finished implementing the authentication system described in task 2. All tests are passing.
|
||||||
|
Please mark it as complete and tell me what I should work on next.
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Analyzing complexity">
|
||||||
|
```
|
||||||
|
Can you analyze the complexity of our tasks to help me understand which ones need to be broken down further?
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Viewing complexity report">
|
||||||
|
```
|
||||||
|
Can you show me the complexity report in a more readable format?
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
</AccordionGroup>
|
||||||
210
apps/docs/best-practices/advanced-tasks.mdx
Normal file
210
apps/docs/best-practices/advanced-tasks.mdx
Normal file
@@ -0,0 +1,210 @@
|
|||||||
|
---
|
||||||
|
title: Advanced Tasks
|
||||||
|
sidebarTitle: "Advanced Tasks"
|
||||||
|
---
|
||||||
|
|
||||||
|
## AI-Driven Development Workflow
|
||||||
|
|
||||||
|
The Cursor agent is pre-configured (via the rules file) to follow this workflow:
|
||||||
|
|
||||||
|
### 1. Task Discovery and Selection
|
||||||
|
|
||||||
|
Ask the agent to list available tasks:
|
||||||
|
|
||||||
|
```
|
||||||
|
What tasks are available to work on next?
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
Can you show me tasks 1, 3, and 5 to understand their current status?
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will:
|
||||||
|
|
||||||
|
- Run `task-master list` to see all tasks
|
||||||
|
- Run `task-master next` to determine the next task to work on
|
||||||
|
- Run `task-master show 1,3,5` to display multiple tasks with interactive options
|
||||||
|
- Analyze dependencies to determine which tasks are ready to be worked on
|
||||||
|
- Prioritize tasks based on priority level and ID order
|
||||||
|
- Suggest the next task(s) to implement
|
||||||
|
|
||||||
|
### 2. Task Implementation
|
||||||
|
|
||||||
|
When implementing a task, the agent will:
|
||||||
|
|
||||||
|
- Reference the task's details section for implementation specifics
|
||||||
|
- Consider dependencies on previous tasks
|
||||||
|
- Follow the project's coding standards
|
||||||
|
- Create appropriate tests based on the task's testStrategy
|
||||||
|
|
||||||
|
You can ask:
|
||||||
|
|
||||||
|
```
|
||||||
|
Let's implement task 3. What does it involve?
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2.1. Viewing Multiple Tasks
|
||||||
|
|
||||||
|
For efficient context gathering and batch operations:
|
||||||
|
|
||||||
|
```
|
||||||
|
Show me tasks 5, 7, and 9 so I can plan my implementation approach.
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will:
|
||||||
|
|
||||||
|
- Run `task-master show 5,7,9` to display a compact summary table
|
||||||
|
- Show task status, priority, and progress indicators
|
||||||
|
- Provide an interactive action menu with batch operations
|
||||||
|
- Allow you to perform group actions like marking multiple tasks as in-progress
|
||||||
|
|
||||||
|
### 3. Task Verification
|
||||||
|
|
||||||
|
Before marking a task as complete, verify it according to:
|
||||||
|
|
||||||
|
- The task's specified testStrategy
|
||||||
|
- Any automated tests in the codebase
|
||||||
|
- Manual verification if required
|
||||||
|
|
||||||
|
### 4. Task Completion
|
||||||
|
|
||||||
|
When a task is completed, tell the agent:
|
||||||
|
|
||||||
|
```
|
||||||
|
Task 3 is now complete. Please update its status.
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will execute:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master set-status --id=3 --status=done
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. Handling Implementation Drift
|
||||||
|
|
||||||
|
If during implementation, you discover that:
|
||||||
|
|
||||||
|
- The current approach differs significantly from what was planned
|
||||||
|
- Future tasks need to be modified due to current implementation choices
|
||||||
|
- New dependencies or requirements have emerged
|
||||||
|
|
||||||
|
Tell the agent:
|
||||||
|
|
||||||
|
```
|
||||||
|
We've decided to use MongoDB instead of PostgreSQL. Can you update all future tasks (from ID 4) to reflect this change?
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will execute:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master update --from=4 --prompt="Now we are using MongoDB instead of PostgreSQL."
|
||||||
|
|
||||||
|
# OR, if research is needed to find best practices for MongoDB:
|
||||||
|
task-master update --from=4 --prompt="Update to use MongoDB, researching best practices" --research
|
||||||
|
```
|
||||||
|
|
||||||
|
This will rewrite or re-scope subsequent tasks in tasks.json while preserving completed work.
|
||||||
|
|
||||||
|
### 6. Reorganizing Tasks
|
||||||
|
|
||||||
|
If you need to reorganize your task structure:
|
||||||
|
|
||||||
|
```
|
||||||
|
I think subtask 5.2 would fit better as part of task 7 instead. Can you move it there?
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will execute:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master move --from=5.2 --to=7.3
|
||||||
|
```
|
||||||
|
|
||||||
|
You can reorganize tasks in various ways:
|
||||||
|
|
||||||
|
- Moving a standalone task to become a subtask: `--from=5 --to=7`
|
||||||
|
- Moving a subtask to become a standalone task: `--from=5.2 --to=7`
|
||||||
|
- Moving a subtask to a different parent: `--from=5.2 --to=7.3`
|
||||||
|
- Reordering subtasks within the same parent: `--from=5.2 --to=5.4`
|
||||||
|
- Moving a task to a new ID position: `--from=5 --to=25` (even if task 25 doesn't exist yet)
|
||||||
|
- Moving multiple tasks at once: `--from=10,11,12 --to=16,17,18` (must have same number of IDs, Taskmaster will look through each position)
|
||||||
|
|
||||||
|
When moving tasks to new IDs:
|
||||||
|
|
||||||
|
- The system automatically creates placeholder tasks for non-existent destination IDs
|
||||||
|
- This prevents accidental data loss during reorganization
|
||||||
|
- Any tasks that depend on moved tasks will have their dependencies updated
|
||||||
|
- When moving a parent task, all its subtasks are automatically moved with it and renumbered
|
||||||
|
|
||||||
|
This is particularly useful as your project understanding evolves and you need to refine your task structure.
|
||||||
|
|
||||||
|
### 7. Resolving Merge Conflicts with Tasks
|
||||||
|
|
||||||
|
When working with a team, you might encounter merge conflicts in your tasks.json file if multiple team members create tasks on different branches. The move command makes resolving these conflicts straightforward:
|
||||||
|
|
||||||
|
```
|
||||||
|
I just merged the main branch and there's a conflict with tasks.json. My teammates created tasks 10-15 while I created tasks 10-12 on my branch. Can you help me resolve this?
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will help you:
|
||||||
|
|
||||||
|
1. Keep your teammates' tasks (10-15)
|
||||||
|
2. Move your tasks to new positions to avoid conflicts:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Move your tasks to new positions (e.g., 16-18)
|
||||||
|
task-master move --from=10 --to=16
|
||||||
|
task-master move --from=11 --to=17
|
||||||
|
task-master move --from=12 --to=18
|
||||||
|
```
|
||||||
|
|
||||||
|
This approach preserves everyone's work while maintaining a clean task structure, making it much easier to handle task conflicts than trying to manually merge JSON files.
|
||||||
|
|
||||||
|
### 8. Breaking Down Complex Tasks
|
||||||
|
|
||||||
|
For complex tasks that need more granularity:
|
||||||
|
|
||||||
|
```
|
||||||
|
Task 5 seems complex. Can you break it down into subtasks?
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will execute:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master expand --id=5 --num=3
|
||||||
|
```
|
||||||
|
|
||||||
|
You can provide additional context:
|
||||||
|
|
||||||
|
```
|
||||||
|
Please break down task 5 with a focus on security considerations.
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will execute:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master expand --id=5 --prompt="Focus on security aspects"
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also expand all pending tasks:
|
||||||
|
|
||||||
|
```
|
||||||
|
Please break down all pending tasks into subtasks.
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will execute:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master expand --all
|
||||||
|
```
|
||||||
|
|
||||||
|
For research-backed subtask generation using the configured research model:
|
||||||
|
|
||||||
|
```
|
||||||
|
Please break down task 5 using research-backed generation.
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will execute:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master expand --id=5 --research
|
||||||
|
```
|
||||||
317
apps/docs/best-practices/configuration-advanced.mdx
Normal file
317
apps/docs/best-practices/configuration-advanced.mdx
Normal file
@@ -0,0 +1,317 @@
|
|||||||
|
---
|
||||||
|
title: Advanced Configuration
|
||||||
|
sidebarTitle: "Advanced Configuration"
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
Taskmaster uses two primary methods for configuration:
|
||||||
|
|
||||||
|
1. **`.taskmaster/config.json` File (Recommended - New Structure)**
|
||||||
|
|
||||||
|
- This JSON file stores most configuration settings, including AI model selections, parameters, logging levels, and project defaults.
|
||||||
|
- **Location:** This file is created in the `.taskmaster/` directory when you run the `task-master models --setup` interactive setup or initialize a new project with `task-master init`.
|
||||||
|
- **Migration:** Existing projects with `.taskmasterconfig` in the root will continue to work, but should be migrated to the new structure using `task-master migrate`.
|
||||||
|
- **Management:** Use the `task-master models --setup` command (or `models` MCP tool) to interactively create and manage this file. You can also set specific models directly using `task-master models --set-<role>=<model_id>`, adding `--ollama` or `--openrouter` flags for custom models. Manual editing is possible but not recommended unless you understand the structure.
|
||||||
|
- **Example Structure:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"models": {
|
||||||
|
"main": {
|
||||||
|
"provider": "anthropic",
|
||||||
|
"modelId": "claude-3-7-sonnet-20250219",
|
||||||
|
"maxTokens": 64000,
|
||||||
|
"temperature": 0.2,
|
||||||
|
"baseURL": "https://api.anthropic.com/v1"
|
||||||
|
},
|
||||||
|
"research": {
|
||||||
|
"provider": "perplexity",
|
||||||
|
"modelId": "sonar-pro",
|
||||||
|
"maxTokens": 8700,
|
||||||
|
"temperature": 0.1,
|
||||||
|
"baseURL": "https://api.perplexity.ai/v1"
|
||||||
|
},
|
||||||
|
"fallback": {
|
||||||
|
"provider": "anthropic",
|
||||||
|
"modelId": "claude-3-5-sonnet",
|
||||||
|
"maxTokens": 64000,
|
||||||
|
"temperature": 0.2
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"global": {
|
||||||
|
"logLevel": "info",
|
||||||
|
"debug": false,
|
||||||
|
"defaultSubtasks": 5,
|
||||||
|
"defaultPriority": "medium",
|
||||||
|
"defaultTag": "master",
|
||||||
|
"projectName": "Your Project Name",
|
||||||
|
"ollamaBaseURL": "http://localhost:11434/api",
|
||||||
|
"azureBaseURL": "https://your-endpoint.azure.com/openai/deployments",
|
||||||
|
"vertexProjectId": "your-gcp-project-id",
|
||||||
|
"vertexLocation": "us-central1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
2. **Legacy `.taskmasterconfig` File (Backward Compatibility)**
|
||||||
|
|
||||||
|
- For projects that haven't migrated to the new structure yet.
|
||||||
|
- **Location:** Project root directory.
|
||||||
|
- **Migration:** Use `task-master migrate` to move this to `.taskmaster/config.json`.
|
||||||
|
- **Deprecation:** While still supported, you'll see warnings encouraging migration to the new structure.
|
||||||
|
|
||||||
|
## Environment Variables (`.env` file or MCP `env` block - For API Keys Only)
|
||||||
|
|
||||||
|
- Used **exclusively** for sensitive API keys and specific endpoint URLs.
|
||||||
|
- **Location:**
|
||||||
|
- For CLI usage: Create a `.env` file in your project root.
|
||||||
|
- For MCP/Cursor usage: Configure keys in the `env` section of your `.cursor/mcp.json` file.
|
||||||
|
- **Required API Keys (Depending on configured providers):**
|
||||||
|
- `ANTHROPIC_API_KEY`: Your Anthropic API key.
|
||||||
|
- `PERPLEXITY_API_KEY`: Your Perplexity API key.
|
||||||
|
- `OPENAI_API_KEY`: Your OpenAI API key.
|
||||||
|
- `GOOGLE_API_KEY`: Your Google API key (also used for Vertex AI provider).
|
||||||
|
- `MISTRAL_API_KEY`: Your Mistral API key.
|
||||||
|
- `AZURE_OPENAI_API_KEY`: Your Azure OpenAI API key (also requires `AZURE_OPENAI_ENDPOINT`).
|
||||||
|
- `OPENROUTER_API_KEY`: Your OpenRouter API key.
|
||||||
|
- `XAI_API_KEY`: Your X-AI API key.
|
||||||
|
- **Optional Endpoint Overrides:**
|
||||||
|
- **Per-role `baseURL` in `.taskmasterconfig`:** You can add a `baseURL` property to any model role (`main`, `research`, `fallback`) to override the default API endpoint for that provider. If omitted, the provider's standard endpoint is used.
|
||||||
|
- **Environment Variable Overrides (`<PROVIDER>_BASE_URL`):** For greater flexibility, especially with third-party services, you can set an environment variable like `OPENAI_BASE_URL` or `MISTRAL_BASE_URL`. This will override any `baseURL` set in the configuration file for that provider. This is the recommended way to connect to OpenAI-compatible APIs.
|
||||||
|
- `AZURE_OPENAI_ENDPOINT`: Required if using Azure OpenAI key (can also be set as `baseURL` for the Azure model role).
|
||||||
|
- `OLLAMA_BASE_URL`: Override the default Ollama API URL (Default: `http://localhost:11434/api`).
|
||||||
|
- `VERTEX_PROJECT_ID`: Your Google Cloud project ID for Vertex AI. Required when using the 'vertex' provider.
|
||||||
|
- `VERTEX_LOCATION`: Google Cloud region for Vertex AI (e.g., 'us-central1'). Default is 'us-central1'.
|
||||||
|
- `GOOGLE_APPLICATION_CREDENTIALS`: Path to service account credentials JSON file for Google Cloud auth (alternative to API key for Vertex AI).
|
||||||
|
|
||||||
|
**Important:** Settings like model ID selections (`main`, `research`, `fallback`), `maxTokens`, `temperature`, `logLevel`, `defaultSubtasks`, `defaultPriority`, and `projectName` are **managed in `.taskmaster/config.json`** (or `.taskmasterconfig` for unmigrated projects), not environment variables.
|
||||||
|
|
||||||
|
## Tagged Task Lists Configuration (v0.17+)
|
||||||
|
|
||||||
|
Taskmaster includes a tagged task lists system for multi-context task management.
|
||||||
|
|
||||||
|
### Global Tag Settings
|
||||||
|
|
||||||
|
```json
|
||||||
|
"global": {
|
||||||
|
"defaultTag": "master"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
- **`defaultTag`** (string): Default tag context for new operations (default: "master")
|
||||||
|
|
||||||
|
### Git Integration
|
||||||
|
|
||||||
|
Task Master provides manual git integration through the `--from-branch` option:
|
||||||
|
|
||||||
|
- **Manual Tag Creation**: Use `task-master add-tag --from-branch` to create a tag based on your current git branch name
|
||||||
|
- **User Control**: No automatic tag switching - you control when and how tags are created
|
||||||
|
- **Flexible Workflow**: Supports any git workflow without imposing rigid branch-tag mappings
|
||||||
|
|
||||||
|
## State Management File
|
||||||
|
|
||||||
|
Taskmaster uses `.taskmaster/state.json` to track tagged system runtime information:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"currentTag": "master",
|
||||||
|
"lastSwitched": "2025-06-11T20:26:12.598Z",
|
||||||
|
"migrationNoticeShown": true
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
- **`currentTag`**: Currently active tag context
|
||||||
|
- **`lastSwitched`**: Timestamp of last tag switch
|
||||||
|
- **`migrationNoticeShown`**: Whether migration notice has been displayed
|
||||||
|
|
||||||
|
This file is automatically created during tagged system migration and should not be manually edited.
|
||||||
|
|
||||||
|
## Example `.env` File (for API Keys)
|
||||||
|
|
||||||
|
```
|
||||||
|
# Required API keys for providers configured in .taskmaster/config.json
|
||||||
|
ANTHROPIC_API_KEY=sk-ant-api03-your-key-here
|
||||||
|
PERPLEXITY_API_KEY=pplx-your-key-here
|
||||||
|
# OPENAI_API_KEY=sk-your-key-here
|
||||||
|
# GOOGLE_API_KEY=AIzaSy...
|
||||||
|
# AZURE_OPENAI_API_KEY=your-azure-openai-api-key-here
|
||||||
|
# etc.
|
||||||
|
|
||||||
|
# Optional Endpoint Overrides
|
||||||
|
# Use a specific provider's base URL, e.g., for an OpenAI-compatible API
|
||||||
|
# OPENAI_BASE_URL=https://api.third-party.com/v1
|
||||||
|
#
|
||||||
|
# Azure OpenAI Configuration
|
||||||
|
# AZURE_OPENAI_ENDPOINT=https://your-resource-name.openai.azure.com/ or https://your-endpoint-name.cognitiveservices.azure.com/openai/deployments
|
||||||
|
# OLLAMA_BASE_URL=http://custom-ollama-host:11434/api
|
||||||
|
|
||||||
|
# Google Vertex AI Configuration (Required if using 'vertex' provider)
|
||||||
|
# VERTEX_PROJECT_ID=your-gcp-project-id
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Configuration Errors
|
||||||
|
|
||||||
|
- If Task Master reports errors about missing configuration or cannot find the config file, run `task-master models --setup` in your project root to create or repair the file.
|
||||||
|
- For new projects, config will be created at `.taskmaster/config.json`. For legacy projects, you may want to use `task-master migrate` to move to the new structure.
|
||||||
|
- Ensure API keys are correctly placed in your `.env` file (for CLI) or `.cursor/mcp.json` (for MCP) and are valid for the providers selected in your config file.
|
||||||
|
|
||||||
|
### If `task-master init` doesn't respond:
|
||||||
|
|
||||||
|
Try running it with Node directly:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
node node_modules/claude-task-master/scripts/init.js
|
||||||
|
```
|
||||||
|
|
||||||
|
Or clone the repository and run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/eyaltoledano/claude-task-master.git
|
||||||
|
cd claude-task-master
|
||||||
|
node scripts/init.js
|
||||||
|
```
|
||||||
|
|
||||||
|
## Provider-Specific Configuration
|
||||||
|
|
||||||
|
### Google Vertex AI Configuration
|
||||||
|
|
||||||
|
Google Vertex AI is Google Cloud's enterprise AI platform and requires specific configuration:
|
||||||
|
|
||||||
|
1. **Prerequisites**:
|
||||||
|
- A Google Cloud account with Vertex AI API enabled
|
||||||
|
- Either a Google API key with Vertex AI permissions OR a service account with appropriate roles
|
||||||
|
- A Google Cloud project ID
|
||||||
|
2. **Authentication Options**:
|
||||||
|
- **API Key**: Set the `GOOGLE_API_KEY` environment variable
|
||||||
|
- **Service Account**: Set `GOOGLE_APPLICATION_CREDENTIALS` to point to your service account JSON file
|
||||||
|
3. **Required Configuration**:
|
||||||
|
- Set `VERTEX_PROJECT_ID` to your Google Cloud project ID
|
||||||
|
- Set `VERTEX_LOCATION` to your preferred Google Cloud region (default: us-central1)
|
||||||
|
4. **Example Setup**:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# In .env file
|
||||||
|
GOOGLE_API_KEY=AIzaSyXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||||
|
VERTEX_PROJECT_ID=my-gcp-project-123
|
||||||
|
VERTEX_LOCATION=us-central1
|
||||||
|
```
|
||||||
|
|
||||||
|
Or using service account:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# In .env file
|
||||||
|
GOOGLE_APPLICATION_CREDENTIALS=/path/to/service-account.json
|
||||||
|
VERTEX_PROJECT_ID=my-gcp-project-123
|
||||||
|
VERTEX_LOCATION=us-central1
|
||||||
|
```
|
||||||
|
|
||||||
|
5. **In .taskmaster/config.json**:
|
||||||
|
```json
|
||||||
|
"global": {
|
||||||
|
"vertexProjectId": "my-gcp-project-123",
|
||||||
|
"vertexLocation": "us-central1"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Azure OpenAI Configuration
|
||||||
|
|
||||||
|
Azure OpenAI provides enterprise-grade OpenAI models through Microsoft's Azure cloud platform and requires specific configuration:
|
||||||
|
|
||||||
|
1. **Prerequisites**:
|
||||||
|
- An Azure account with an active subscription
|
||||||
|
- Azure OpenAI service resource created in the Azure portal
|
||||||
|
- Azure OpenAI API key and endpoint URL
|
||||||
|
- Deployed models (e.g., gpt-4o, gpt-4o-mini, gpt-4.1, etc) in your Azure OpenAI resource
|
||||||
|
|
||||||
|
2. **Authentication**:
|
||||||
|
- Set the `AZURE_OPENAI_API_KEY` environment variable with your Azure OpenAI API key
|
||||||
|
- Configure the endpoint URL using one of the methods below
|
||||||
|
|
||||||
|
3. **Configuration Options**:
|
||||||
|
|
||||||
|
**Option 1: Using Global Azure Base URL (affects all Azure models)**
|
||||||
|
```json
|
||||||
|
// In .taskmaster/config.json
|
||||||
|
{
|
||||||
|
"models": {
|
||||||
|
"main": {
|
||||||
|
"provider": "azure",
|
||||||
|
"modelId": "gpt-4o",
|
||||||
|
"maxTokens": 16000,
|
||||||
|
"temperature": 0.7
|
||||||
|
},
|
||||||
|
"fallback": {
|
||||||
|
"provider": "azure",
|
||||||
|
"modelId": "gpt-4o-mini",
|
||||||
|
"maxTokens": 10000,
|
||||||
|
"temperature": 0.7
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"global": {
|
||||||
|
"azureBaseURL": "https://your-resource-name.azure.com/openai/deployments"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Option 2: Using Per-Model Base URLs (recommended for flexibility)**
|
||||||
|
```json
|
||||||
|
// In .taskmaster/config.json
|
||||||
|
{
|
||||||
|
"models": {
|
||||||
|
"main": {
|
||||||
|
"provider": "azure",
|
||||||
|
"modelId": "gpt-4o",
|
||||||
|
"maxTokens": 16000,
|
||||||
|
"temperature": 0.7,
|
||||||
|
"baseURL": "https://your-resource-name.azure.com/openai/deployments"
|
||||||
|
},
|
||||||
|
"research": {
|
||||||
|
"provider": "perplexity",
|
||||||
|
"modelId": "sonar-pro",
|
||||||
|
"maxTokens": 8700,
|
||||||
|
"temperature": 0.1
|
||||||
|
},
|
||||||
|
"fallback": {
|
||||||
|
"provider": "azure",
|
||||||
|
"modelId": "gpt-4o-mini",
|
||||||
|
"maxTokens": 10000,
|
||||||
|
"temperature": 0.7,
|
||||||
|
"baseURL": "https://your-resource-name.azure.com/openai/deployments"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
4. **Environment Variables**:
|
||||||
|
```bash
|
||||||
|
# In .env file
|
||||||
|
AZURE_OPENAI_API_KEY=your-azure-openai-api-key-here
|
||||||
|
|
||||||
|
# Optional: Override endpoint for all Azure models
|
||||||
|
AZURE_OPENAI_ENDPOINT=https://your-resource-name.azure.com/openai/deployments
|
||||||
|
```
|
||||||
|
|
||||||
|
5. **Important Notes**:
|
||||||
|
- **Model Deployment Names**: The `modelId` in your configuration should match the **deployment name** you created in Azure OpenAI Studio, not the underlying model name
|
||||||
|
- **Base URL Priority**: Per-model `baseURL` settings override the global `azureBaseURL` setting
|
||||||
|
- **Endpoint Format**: When using per-model `baseURL`, use the full path including `/openai/deployments`
|
||||||
|
|
||||||
|
6. **Troubleshooting**:
|
||||||
|
|
||||||
|
**"Resource not found" errors:**
|
||||||
|
- Ensure your `baseURL` includes the full path: `https://your-resource-name.openai.azure.com/openai/deployments`
|
||||||
|
- Verify that your deployment name in `modelId` exactly matches what's configured in Azure OpenAI Studio
|
||||||
|
- Check that your Azure OpenAI resource is in the correct region and properly deployed
|
||||||
|
|
||||||
|
**Authentication errors:**
|
||||||
|
- Verify your `AZURE_OPENAI_API_KEY` is correct and has not expired
|
||||||
|
- Ensure your Azure OpenAI resource has the necessary permissions
|
||||||
|
- Check that your subscription has not been suspended or reached quota limits
|
||||||
|
|
||||||
|
**Model availability errors:**
|
||||||
|
- Confirm the model is deployed in your Azure OpenAI resource
|
||||||
|
- Verify the deployment name matches your configuration exactly (case-sensitive)
|
||||||
|
- Ensure the model deployment is in a "Succeeded" state in Azure OpenAI Studio
|
||||||
|
- Ensure youre not getting rate limited by `maxTokens` maintain appropriate Tokens per Minute Rate Limit (TPM) in your deployment.
|
||||||
8
apps/docs/best-practices/index.mdx
Normal file
8
apps/docs/best-practices/index.mdx
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
title: Intro to Advanced Usage
|
||||||
|
sidebarTitle: "Advanced Usage"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Best Practices
|
||||||
|
|
||||||
|
Explore advanced tips, recommended workflows, and best practices for getting the most out of Task Master.
|
||||||
209
apps/docs/capabilities/cli-root-commands.mdx
Normal file
209
apps/docs/capabilities/cli-root-commands.mdx
Normal file
@@ -0,0 +1,209 @@
|
|||||||
|
---
|
||||||
|
title: CLI Commands
|
||||||
|
sidebarTitle: "CLI Commands"
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
<AccordionGroup>
|
||||||
|
<Accordion title="Parse PRD">
|
||||||
|
```bash
|
||||||
|
# Parse a PRD file and generate tasks
|
||||||
|
task-master parse-prd <prd-file.txt>
|
||||||
|
|
||||||
|
# Limit the number of tasks generated
|
||||||
|
task-master parse-prd <prd-file.txt> --num-tasks=10
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="List Tasks">
|
||||||
|
```bash
|
||||||
|
# List all tasks
|
||||||
|
task-master list
|
||||||
|
|
||||||
|
# List tasks with a specific status
|
||||||
|
task-master list --status=<status>
|
||||||
|
|
||||||
|
# List tasks with subtasks
|
||||||
|
task-master list --with-subtasks
|
||||||
|
|
||||||
|
# List tasks with a specific status and include subtasks
|
||||||
|
task-master list --status=<status> --with-subtasks
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Show Next Task">
|
||||||
|
```bash
|
||||||
|
# Show the next task to work on based on dependencies and status
|
||||||
|
task-master next
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Show Specific Task">
|
||||||
|
```bash
|
||||||
|
# Show details of a specific task
|
||||||
|
task-master show <id>
|
||||||
|
# or
|
||||||
|
task-master show --id=<id>
|
||||||
|
|
||||||
|
# View a specific subtask (e.g., subtask 2 of task 1)
|
||||||
|
task-master show 1.2
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Update Tasks">
|
||||||
|
```bash
|
||||||
|
# Update tasks from a specific ID and provide context
|
||||||
|
task-master update --from=<id> --prompt="<prompt>"
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Update a Specific Task">
|
||||||
|
```bash
|
||||||
|
# Update a single task by ID with new information
|
||||||
|
task-master update-task --id=<id> --prompt="<prompt>"
|
||||||
|
|
||||||
|
# Use research-backed updates with Perplexity AI
|
||||||
|
task-master update-task --id=<id> --prompt="<prompt>" --research
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Update a Subtask">
|
||||||
|
```bash
|
||||||
|
# Append additional information to a specific subtask
|
||||||
|
task-master update-subtask --id=<parentId.subtaskId> --prompt="<prompt>"
|
||||||
|
|
||||||
|
# Example: Add details about API rate limiting to subtask 2 of task 5
|
||||||
|
task-master update-subtask --id=5.2 --prompt="Add rate limiting of 100 requests per minute"
|
||||||
|
|
||||||
|
# Use research-backed updates with Perplexity AI
|
||||||
|
task-master update-subtask --id=<parentId.subtaskId> --prompt="<prompt>" --research
|
||||||
|
```
|
||||||
|
|
||||||
|
Unlike the `update-task` command which replaces task information, the `update-subtask` command _appends_ new information to the existing subtask details, marking it with a timestamp. This is useful for iteratively enhancing subtasks while preserving the original content.
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Generate Task Files">
|
||||||
|
```bash
|
||||||
|
# Generate individual task files from tasks.json
|
||||||
|
task-master generate
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Set Task Status">
|
||||||
|
```bash
|
||||||
|
# Set status of a single task
|
||||||
|
task-master set-status --id=<id> --status=<status>
|
||||||
|
|
||||||
|
# Set status for multiple tasks
|
||||||
|
task-master set-status --id=1,2,3 --status=<status>
|
||||||
|
|
||||||
|
# Set status for subtasks
|
||||||
|
task-master set-status --id=1.1,1.2 --status=<status>
|
||||||
|
```
|
||||||
|
|
||||||
|
When marking a task as "done", all of its subtasks will automatically be marked as "done" as well.
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Expand Tasks">
|
||||||
|
```bash
|
||||||
|
# Expand a specific task with subtasks
|
||||||
|
task-master expand --id=<id> --num=<number>
|
||||||
|
|
||||||
|
# Expand with additional context
|
||||||
|
task-master expand --id=<id> --prompt="<context>"
|
||||||
|
|
||||||
|
# Expand all pending tasks
|
||||||
|
task-master expand --all
|
||||||
|
|
||||||
|
# Force regeneration of subtasks for tasks that already have them
|
||||||
|
task-master expand --all --force
|
||||||
|
|
||||||
|
# Research-backed subtask generation for a specific task
|
||||||
|
task-master expand --id=<id> --research
|
||||||
|
|
||||||
|
# Research-backed generation for all tasks
|
||||||
|
task-master expand --all --research
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Clear Subtasks">
|
||||||
|
```bash
|
||||||
|
# Clear subtasks from a specific task
|
||||||
|
task-master clear-subtasks --id=<id>
|
||||||
|
|
||||||
|
# Clear subtasks from multiple tasks
|
||||||
|
task-master clear-subtasks --id=1,2,3
|
||||||
|
|
||||||
|
# Clear subtasks from all tasks
|
||||||
|
task-master clear-subtasks --all
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Analyze Task Complexity">
|
||||||
|
```bash
|
||||||
|
# Analyze complexity of all tasks
|
||||||
|
task-master analyze-complexity
|
||||||
|
|
||||||
|
# Save report to a custom location
|
||||||
|
task-master analyze-complexity --output=my-report.json
|
||||||
|
|
||||||
|
# Use a specific LLM model
|
||||||
|
task-master analyze-complexity --model=claude-3-opus-20240229
|
||||||
|
|
||||||
|
# Set a custom complexity threshold (1-10)
|
||||||
|
task-master analyze-complexity --threshold=6
|
||||||
|
|
||||||
|
# Use an alternative tasks file
|
||||||
|
task-master analyze-complexity --file=custom-tasks.json
|
||||||
|
|
||||||
|
# Use Perplexity AI for research-backed complexity analysis
|
||||||
|
task-master analyze-complexity --research
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="View Complexity Report">
|
||||||
|
```bash
|
||||||
|
# Display the task complexity analysis report
|
||||||
|
task-master complexity-report
|
||||||
|
|
||||||
|
# View a report at a custom location
|
||||||
|
task-master complexity-report --file=my-report.json
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Managing Task Dependencies">
|
||||||
|
```bash
|
||||||
|
# Add a dependency to a task
|
||||||
|
task-master add-dependency --id=<id> --depends-on=<id>
|
||||||
|
|
||||||
|
# Remove a dependency from a task
|
||||||
|
task-master remove-dependency --id=<id> --depends-on=<id>
|
||||||
|
|
||||||
|
# Validate dependencies without fixing them
|
||||||
|
task-master validate-dependencies
|
||||||
|
|
||||||
|
# Find and fix invalid dependencies automatically
|
||||||
|
task-master fix-dependencies
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Add a New Task">
|
||||||
|
```bash
|
||||||
|
# Add a new task using AI
|
||||||
|
task-master add-task --prompt="Description of the new task"
|
||||||
|
|
||||||
|
# Add a task with dependencies
|
||||||
|
task-master add-task --prompt="Description" --dependencies=1,2,3
|
||||||
|
|
||||||
|
# Add a task with priority
|
||||||
|
task-master add-task --prompt="Description" --priority=high
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Initialize a Project">
|
||||||
|
```bash
|
||||||
|
# Initialize a new project with Task Master structure
|
||||||
|
task-master init
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
</AccordionGroup>
|
||||||
241
apps/docs/capabilities/index.mdx
Normal file
241
apps/docs/capabilities/index.mdx
Normal file
@@ -0,0 +1,241 @@
|
|||||||
|
---
|
||||||
|
title: Technical Capabilities
|
||||||
|
sidebarTitle: "Technical Capabilities"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Capabilities (Technical)
|
||||||
|
|
||||||
|
Discover the technical capabilities of Task Master, including supported models, integrations, and more.
|
||||||
|
|
||||||
|
# CLI Interface Synopsis
|
||||||
|
|
||||||
|
This document outlines the command-line interface (CLI) for the Task Master application, as defined in `bin/task-master.js` and the `scripts/modules/commands.js` file (which I will assume exists based on the context). This guide is intended for those writing user-facing documentation to understand how users interact with the application from the command line.
|
||||||
|
|
||||||
|
## Entry Point
|
||||||
|
|
||||||
|
The main entry point for the CLI is the `task-master` command, which is an executable script that spawns the main application logic in `scripts/dev.js`.
|
||||||
|
|
||||||
|
## Global Options
|
||||||
|
|
||||||
|
The following options are available for all commands:
|
||||||
|
|
||||||
|
- `-h, --help`: Display help information.
|
||||||
|
- `--version`: Display the application's version.
|
||||||
|
|
||||||
|
## Commands
|
||||||
|
|
||||||
|
The CLI is organized into a series of commands, each with its own set of options. The following is a summary of the available commands, categorized by their functionality.
|
||||||
|
|
||||||
|
### 1. Task and Subtask Management
|
||||||
|
|
||||||
|
- **`add`**: Creates a new task using an AI-powered prompt.
|
||||||
|
- `--prompt <prompt>`: The prompt to use for generating the task.
|
||||||
|
- `--dependencies <dependencies>`: A comma-separated list of task IDs that this task depends on.
|
||||||
|
- `--priority <priority>`: The priority of the task (e.g., `high`, `medium`, `low`).
|
||||||
|
- **`add-subtask`**: Adds a subtask to a parent task.
|
||||||
|
- `--parent-id <parentId>`: The ID of the parent task.
|
||||||
|
- `--task-id <taskId>`: The ID of an existing task to convert to a subtask.
|
||||||
|
- `--title <title>`: The title of the new subtask.
|
||||||
|
- **`remove`**: Removes one or more tasks or subtasks.
|
||||||
|
- `--ids <ids>`: A comma-separated list of task or subtask IDs to remove.
|
||||||
|
- **`remove-subtask`**: Removes a subtask from its parent.
|
||||||
|
- `--id <subtaskId>`: The ID of the subtask to remove (in the format `parentId.subtaskId`).
|
||||||
|
- `--convert-to-task`: Converts the subtask to a standalone task.
|
||||||
|
- **`update`**: Updates multiple tasks starting from a specific ID.
|
||||||
|
- `--from <fromId>`: The ID of the task to start updating from.
|
||||||
|
- `--prompt <prompt>`: The new context to apply to the tasks.
|
||||||
|
- **`update-task`**: Updates a single task.
|
||||||
|
- `--id <taskId>`: The ID of the task to update.
|
||||||
|
- `--prompt <prompt>`: The new context to apply to the task.
|
||||||
|
- **`update-subtask`**: Appends information to a subtask.
|
||||||
|
- `--id <subtaskId>`: The ID of the subtask to update (in the format `parentId.subtaskId`).
|
||||||
|
- `--prompt <prompt>`: The information to append to the subtask.
|
||||||
|
- **`move`**: Moves a task or subtask.
|
||||||
|
- `--from <sourceId>`: The ID of the task or subtask to move.
|
||||||
|
- `--to <destinationId>`: The destination ID.
|
||||||
|
- **`clear-subtasks`**: Clears all subtasks from one or more tasks.
|
||||||
|
- `--ids <ids>`: A comma-separated list of task IDs.
|
||||||
|
|
||||||
|
### 2. Task Information and Status
|
||||||
|
|
||||||
|
- **`list`**: Lists all tasks.
|
||||||
|
- `--status <status>`: Filters tasks by status.
|
||||||
|
- `--with-subtasks`: Includes subtasks in the list.
|
||||||
|
- **`show`**: Shows the details of a specific task.
|
||||||
|
- `--id <taskId>`: The ID of the task to show.
|
||||||
|
- **`next`**: Shows the next task to work on.
|
||||||
|
- **`set-status`**: Sets the status of a task or subtask.
|
||||||
|
- `--id <id>`: The ID of the task or subtask.
|
||||||
|
- `--status <status>`: The new status.
|
||||||
|
|
||||||
|
### 3. Task Analysis and Expansion
|
||||||
|
|
||||||
|
- **`parse-prd`**: Parses a PRD to generate tasks.
|
||||||
|
- `--file <file>`: The path to the PRD file.
|
||||||
|
- `--num-tasks <numTasks>`: The number of tasks to generate.
|
||||||
|
- **`expand`**: Expands a task into subtasks.
|
||||||
|
- `--id <taskId>`: The ID of the task to expand.
|
||||||
|
- `--num-subtasks <numSubtasks>`: The number of subtasks to generate.
|
||||||
|
- **`expand-all`**: Expands all eligible tasks.
|
||||||
|
- `--num-subtasks <numSubtasks>`: The number of subtasks to generate for each task.
|
||||||
|
- **`analyze-complexity`**: Analyzes task complexity.
|
||||||
|
- `--file <file>`: The path to the tasks file.
|
||||||
|
- **`complexity-report`**: Displays the complexity analysis report.
|
||||||
|
|
||||||
|
### 4. Project and Configuration
|
||||||
|
|
||||||
|
- **`init`**: Initializes a new project.
|
||||||
|
- **`generate`**: Generates individual task files.
|
||||||
|
- **`migrate`**: Migrates a project to the new directory structure.
|
||||||
|
- **`research`**: Performs AI-powered research.
|
||||||
|
- `--query <query>`: The research query.
|
||||||
|
|
||||||
|
This synopsis provides a comprehensive overview of the CLI commands and their options, which should be helpful for creating user-facing documentation.
|
||||||
|
|
||||||
|
|
||||||
|
# Core Implementation Synopsis
|
||||||
|
|
||||||
|
This document provides a high-level overview of the core implementation of the Task Master application, focusing on the functionalities exposed through `scripts/modules/task-manager.js`. This serves as a guide for understanding the application's capabilities when writing user-facing documentation.
|
||||||
|
|
||||||
|
## Core Concepts
|
||||||
|
|
||||||
|
The application revolves around the management of tasks and subtasks, which are stored in a `tasks.json` file. The core logic provides functionalities to create, read, update, and delete tasks and subtasks, as well as manage their dependencies and statuses.
|
||||||
|
|
||||||
|
### Task Structure
|
||||||
|
|
||||||
|
A task is a JSON object with the following key properties:
|
||||||
|
|
||||||
|
- `id`: A unique number identifying the task.
|
||||||
|
- `title`: A string representing the task's title.
|
||||||
|
- `description`: A string providing a brief description of the task.
|
||||||
|
- `details`: A string containing detailed information about the task.
|
||||||
|
- `testStrategy`: A string describing how to test the task.
|
||||||
|
- `status`: A string representing the task's current status (e.g., `pending`, `in-progress`, `done`).
|
||||||
|
- `dependencies`: An array of task IDs that this task depends on.
|
||||||
|
- `priority`: A string representing the task's priority (e.g., `high`, `medium`, `low`).
|
||||||
|
- `subtasks`: An array of subtask objects.
|
||||||
|
|
||||||
|
A subtask has a similar structure to a task but is nested within a parent task.
|
||||||
|
|
||||||
|
## Feature Categories
|
||||||
|
|
||||||
|
The core functionalities can be categorized as follows:
|
||||||
|
|
||||||
|
### 1. Task and Subtask Management
|
||||||
|
|
||||||
|
These functions are the bread and butter of the application, allowing for the creation, modification, and deletion of tasks and subtasks.
|
||||||
|
|
||||||
|
- **`addTask(prompt, dependencies, priority)`**: Creates a new task using an AI-powered prompt to generate the title, description, details, and test strategy. It can also be used to create a task manually by providing the task data directly.
|
||||||
|
- **`addSubtask(parentId, existingTaskId, newSubtaskData)`**: Adds a subtask to a parent task. It can either convert an existing task into a subtask or create a new subtask from scratch.
|
||||||
|
- **`removeTask(taskIds)`**: Removes one or more tasks or subtasks.
|
||||||
|
- **`removeSubtask(subtaskId, convertToTask)`**: Removes a subtask from its parent. It can optionally convert the subtask into a standalone task.
|
||||||
|
- **`updateTaskById(taskId, prompt)`**: Updates a task's information based on a prompt.
|
||||||
|
- **`updateSubtaskById(subtaskId, prompt)`**: Appends additional information to a subtask's details.
|
||||||
|
- **`updateTasks(fromId, prompt)`**: Updates multiple tasks starting from a specific ID based on a new context.
|
||||||
|
- **`moveTask(sourceId, destinationId)`**: Moves a task or subtask to a new position.
|
||||||
|
- **`clearSubtasks(taskIds)`**: Clears all subtasks from one or more tasks.
|
||||||
|
|
||||||
|
### 2. Task Information and Status
|
||||||
|
|
||||||
|
These functions are used to retrieve information about tasks and manage their status.
|
||||||
|
|
||||||
|
- **`listTasks(statusFilter, withSubtasks)`**: Lists all tasks, with options to filter by status and include subtasks.
|
||||||
|
- **`findTaskById(taskId)`**: Finds a task by its ID.
|
||||||
|
- **`taskExists(taskId)`**: Checks if a task with a given ID exists.
|
||||||
|
- **`setTaskStatus(taskIdInput, newStatus)`**: Sets the status of a task or subtask.
|
||||||
|
-al
|
||||||
|
- **`updateSingleTaskStatus(taskIdInput, newStatus)`**: A helper function to update the status of a single task or subtask.
|
||||||
|
- **`findNextTask()`**: Determines the next task to work on based on dependencies and status.
|
||||||
|
|
||||||
|
### 3. Task Analysis and Expansion
|
||||||
|
|
||||||
|
These functions leverage AI to analyze and break down tasks.
|
||||||
|
|
||||||
|
- **`parsePRD(prdPath, numTasks)`**: Parses a Product Requirements Document (PRD) to generate an initial set of tasks.
|
||||||
|
- **`expandTask(taskId, numSubtasks)`**: Expands a task into a specified number of subtasks using AI.
|
||||||
|
- **`expandAllTasks(numSubtasks)`**: Expands all eligible pending or in-progress tasks.
|
||||||
|
- **`analyzeTaskComplexity(options)`**: Analyzes the complexity of tasks and generates recommendations for expansion.
|
||||||
|
- **`readComplexityReport()`**: Reads the complexity analysis report.
|
||||||
|
|
||||||
|
### 4. Dependency Management
|
||||||
|
|
||||||
|
These functions are crucial for managing the relationships between tasks.
|
||||||
|
|
||||||
|
- **`isTaskDependentOn(task, targetTaskId)`**: Checks if a task has a direct or indirect dependency on another task.
|
||||||
|
|
||||||
|
### 5. Project and Configuration
|
||||||
|
|
||||||
|
These functions are for managing the project and its configuration.
|
||||||
|
|
||||||
|
- **`generateTaskFiles()`**: Generates individual task files from `tasks.json`.
|
||||||
|
- **`migrateProject()`**: Migrates the project to the new `.taskmaster` directory structure.
|
||||||
|
- **`performResearch(query, options)`**: Performs AI-powered research with project context.
|
||||||
|
|
||||||
|
This overview should provide a solid foundation for creating user-facing documentation. For more detailed information on each function, refer to the source code in `scripts/modules/task-manager/`.
|
||||||
|
|
||||||
|
|
||||||
|
# MCP Interface Synopsis
|
||||||
|
|
||||||
|
This document provides an overview of the MCP (Machine-to-Machine Communication Protocol) interface for the Task Master application. The MCP interface is defined in the `mcp-server/` directory and exposes the application's core functionalities as a set of tools that can be called remotely.
|
||||||
|
|
||||||
|
## Core Concepts
|
||||||
|
|
||||||
|
The MCP interface is built on top of the `fastmcp` library and registers a set of tools that correspond to the core functionalities of the Task Master application. These tools are defined in the `mcp-server/src/tools/` directory and are registered with the MCP server in `mcp-server/src/tools/index.js`.
|
||||||
|
|
||||||
|
Each tool is defined with a name, a description, and a set of parameters that are validated using the `zod` library. The `execute` function of each tool calls the corresponding core logic function from `scripts/modules/task-manager.js`.
|
||||||
|
|
||||||
|
## Tool Categories
|
||||||
|
|
||||||
|
The MCP tools can be categorized in the same way as the core functionalities:
|
||||||
|
|
||||||
|
### 1. Task and Subtask Management
|
||||||
|
|
||||||
|
- **`add_task`**: Creates a new task.
|
||||||
|
- **`add_subtask`**: Adds a subtask to a parent task.
|
||||||
|
- **`remove_task`**: Removes one or more tasks or subtasks.
|
||||||
|
- **`remove_subtask`**: Removes a subtask from its parent.
|
||||||
|
- **`update_task`**: Updates a single task.
|
||||||
|
- **`update_subtask`**: Appends information to a subtask.
|
||||||
|
- **`update`**: Updates multiple tasks.
|
||||||
|
- **`move_task`**: Moves a task or subtask.
|
||||||
|
- **`clear_subtasks`**: Clears all subtasks from one or more tasks.
|
||||||
|
|
||||||
|
### 2. Task Information and Status
|
||||||
|
|
||||||
|
- **`get_tasks`**: Lists all tasks.
|
||||||
|
- **`get_task`**: Shows the details of a specific task.
|
||||||
|
- **`next_task`**: Shows the next task to work on.
|
||||||
|
- **`set_task_status`**: Sets the status of a task or subtask.
|
||||||
|
|
||||||
|
### 3. Task Analysis and Expansion
|
||||||
|
|
||||||
|
- **`parse_prd`**: Parses a PRD to generate tasks.
|
||||||
|
- **`expand_task`**: Expands a task into subtasks.
|
||||||
|
- **`expand_all`**: Expands all eligible tasks.
|
||||||
|
- **`analyze_project_complexity`**: Analyzes task complexity.
|
||||||
|
- **`complexity_report`**: Displays the complexity analysis report.
|
||||||
|
|
||||||
|
### 4. Dependency Management
|
||||||
|
|
||||||
|
- **`add_dependency`**: Adds a dependency to a task.
|
||||||
|
- **`remove_dependency`**: Removes a dependency from a task.
|
||||||
|
- **`validate_dependencies`**: Validates the dependencies of all tasks.
|
||||||
|
- **`fix_dependencies`**: Fixes any invalid dependencies.
|
||||||
|
|
||||||
|
### 5. Project and Configuration
|
||||||
|
|
||||||
|
- **`initialize_project`**: Initializes a new project.
|
||||||
|
- **`generate`**: Generates individual task files.
|
||||||
|
- **`models`**: Manages AI model configurations.
|
||||||
|
- **`research`**: Performs AI-powered research.
|
||||||
|
|
||||||
|
### 6. Tag Management
|
||||||
|
|
||||||
|
- **`add_tag`**: Creates a new tag.
|
||||||
|
- **`delete_tag`**: Deletes a tag.
|
||||||
|
- **`list_tags`**: Lists all tags.
|
||||||
|
- **`use_tag`**: Switches to a different tag.
|
||||||
|
- **`rename_tag`**: Renames a tag.
|
||||||
|
- **`copy_tag`**: Copies a tag.
|
||||||
|
|
||||||
|
This synopsis provides a clear overview of the MCP interface and its available tools, which will be valuable for anyone writing documentation for developers who need to interact with the Task Master application programmatically.
|
||||||
68
apps/docs/capabilities/mcp.mdx
Normal file
68
apps/docs/capabilities/mcp.mdx
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
---
|
||||||
|
title: MCP Tools
|
||||||
|
sidebarTitle: "MCP Tools"
|
||||||
|
---
|
||||||
|
|
||||||
|
# MCP Tools
|
||||||
|
|
||||||
|
This document provides an overview of the MCP (Machine-to-Machine Communication Protocol) interface for the Task Master application. The MCP interface is defined in the `mcp-server/` directory and exposes the application's core functionalities as a set of tools that can be called remotely.
|
||||||
|
|
||||||
|
## Core Concepts
|
||||||
|
|
||||||
|
The MCP interface is built on top of the `fastmcp` library and registers a set of tools that correspond to the core functionalities of the Task Master application. These tools are defined in the `mcp-server/src/tools/` directory and are registered with the MCP server in `mcp-server/src/tools/index.js`.
|
||||||
|
|
||||||
|
Each tool is defined with a name, a description, and a set of parameters that are validated using the `zod` library. The `execute` function of each tool calls the corresponding core logic function from `scripts/modules/task-manager.js`.
|
||||||
|
|
||||||
|
## Tool Categories
|
||||||
|
|
||||||
|
The MCP tools can be categorized in the same way as the core functionalities:
|
||||||
|
|
||||||
|
### 1. Task and Subtask Management
|
||||||
|
|
||||||
|
- **`add_task`**: Creates a new task.
|
||||||
|
- **`add_subtask`**: Adds a subtask to a parent task.
|
||||||
|
- **`remove_task`**: Removes one or more tasks or subtasks.
|
||||||
|
- **`remove_subtask`**: Removes a subtask from its parent.
|
||||||
|
- **`update_task`**: Updates a single task.
|
||||||
|
- **`update_subtask`**: Appends information to a subtask.
|
||||||
|
- **`update`**: Updates multiple tasks.
|
||||||
|
- **`move_task`**: Moves a task or subtask.
|
||||||
|
- **`clear_subtasks`**: Clears all subtasks from one or more tasks.
|
||||||
|
|
||||||
|
### 2. Task Information and Status
|
||||||
|
|
||||||
|
- **`get_tasks`**: Lists all tasks.
|
||||||
|
- **`get_task`**: Shows the details of a specific task.
|
||||||
|
- **`next_task`**: Shows the next task to work on.
|
||||||
|
- **`set_task_status`**: Sets the status of a task or subtask.
|
||||||
|
|
||||||
|
### 3. Task Analysis and Expansion
|
||||||
|
|
||||||
|
- **`parse_prd`**: Parses a PRD to generate tasks.
|
||||||
|
- **`expand_task`**: Expands a task into subtasks.
|
||||||
|
- **`expand_all`**: Expands all eligible tasks.
|
||||||
|
- **`analyze_project_complexity`**: Analyzes task complexity.
|
||||||
|
- **`complexity_report`**: Displays the complexity analysis report.
|
||||||
|
|
||||||
|
### 4. Dependency Management
|
||||||
|
|
||||||
|
- **`add_dependency`**: Adds a dependency to a task.
|
||||||
|
- **`remove_dependency`**: Removes a dependency from a task.
|
||||||
|
- **`validate_dependencies`**: Validates the dependencies of all tasks.
|
||||||
|
- **`fix_dependencies`**: Fixes any invalid dependencies.
|
||||||
|
|
||||||
|
### 5. Project and Configuration
|
||||||
|
|
||||||
|
- **`initialize_project`**: Initializes a new project.
|
||||||
|
- **`generate`**: Generates individual task files.
|
||||||
|
- **`models`**: Manages AI model configurations.
|
||||||
|
- **`research`**: Performs AI-powered research.
|
||||||
|
|
||||||
|
### 6. Tag Management
|
||||||
|
|
||||||
|
- **`add_tag`**: Creates a new tag.
|
||||||
|
- **`delete_tag`**: Deletes a tag.
|
||||||
|
- **`list_tags`**: Lists all tags.
|
||||||
|
- **`use_tag`**: Switches to a different tag.
|
||||||
|
- **`rename_tag`**: Renames a tag.
|
||||||
|
- **`copy_tag`**: Copies a tag.
|
||||||
163
apps/docs/capabilities/task-structure.mdx
Normal file
163
apps/docs/capabilities/task-structure.mdx
Normal file
@@ -0,0 +1,163 @@
|
|||||||
|
---
|
||||||
|
title: "Task Structure"
|
||||||
|
sidebarTitle: "Task Structure"
|
||||||
|
description: "Tasks in Task Master follow a specific format designed to provide comprehensive information for both humans and AI assistants."
|
||||||
|
---
|
||||||
|
|
||||||
|
## Task Fields in tasks.json
|
||||||
|
|
||||||
|
Tasks in tasks.json have the following structure:
|
||||||
|
|
||||||
|
| Field | Description | Example |
|
||||||
|
| -------------- | ---------------------------------------------- | ------------------------------------------------------ |
|
||||||
|
| `id` | Unique identifier for the task. | `1` |
|
||||||
|
| `title` | Brief, descriptive title. | `"Initialize Repo"` |
|
||||||
|
| `description` | What the task involves. | `"Create a new repository, set up initial structure."` |
|
||||||
|
| `status` | Current state. | `"pending"`, `"done"`, `"deferred"` |
|
||||||
|
| `dependencies` | Prerequisite task IDs. ✅ Completed, ⏱️ Pending | `[1, 2]` |
|
||||||
|
| `priority` | Task importance. | `"high"`, `"medium"`, `"low"` |
|
||||||
|
| `details` | Implementation instructions. | `"Use GitHub client ID/secret, handle callback..."` |
|
||||||
|
| `testStrategy` | How to verify success. | `"Deploy and confirm 'Hello World' response."` |
|
||||||
|
| `subtasks` | Nested subtasks related to the main task. | `[{"id": 1, "title": "Configure OAuth", ...}]` |
|
||||||
|
|
||||||
|
## Task File Format
|
||||||
|
|
||||||
|
Individual task files follow this format:
|
||||||
|
|
||||||
|
```
|
||||||
|
# Task ID: <id>
|
||||||
|
# Title: <title>
|
||||||
|
# Status: <status>
|
||||||
|
# Dependencies: <comma-separated list of dependency IDs>
|
||||||
|
# Priority: <priority>
|
||||||
|
# Description: <brief description>
|
||||||
|
# Details:
|
||||||
|
<detailed implementation notes>
|
||||||
|
|
||||||
|
# Test Strategy:
|
||||||
|
<verification approach>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Features in Detail
|
||||||
|
|
||||||
|
<AccordionGroup>
|
||||||
|
<Accordion title="Analyzing Task Complexity">
|
||||||
|
The `analyze-complexity` command:
|
||||||
|
|
||||||
|
- Analyzes each task using AI to assess its complexity on a scale of 1-10
|
||||||
|
- Recommends optimal number of subtasks based on configured DEFAULT_SUBTASKS
|
||||||
|
- Generates tailored prompts for expanding each task
|
||||||
|
- Creates a comprehensive JSON report with ready-to-use commands
|
||||||
|
- Saves the report to scripts/task-complexity-report.json by default
|
||||||
|
|
||||||
|
The generated report contains:
|
||||||
|
|
||||||
|
- Complexity analysis for each task (scored 1-10)
|
||||||
|
- Recommended number of subtasks based on complexity
|
||||||
|
- AI-generated expansion prompts customized for each task
|
||||||
|
- Ready-to-run expansion commands directly within each task analysis
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Viewing Complexity Report">
|
||||||
|
The `complexity-report` command:
|
||||||
|
|
||||||
|
- Displays a formatted, easy-to-read version of the complexity analysis report
|
||||||
|
- Shows tasks organized by complexity score (highest to lowest)
|
||||||
|
- Provides complexity distribution statistics (low, medium, high)
|
||||||
|
- Highlights tasks recommended for expansion based on threshold score
|
||||||
|
- Includes ready-to-use expansion commands for each complex task
|
||||||
|
- If no report exists, offers to generate one on the spot
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Smart Task Expansion">
|
||||||
|
The `expand` command automatically checks for and uses the complexity report:
|
||||||
|
|
||||||
|
When a complexity report exists:
|
||||||
|
|
||||||
|
- Tasks are automatically expanded using the recommended subtask count and prompts
|
||||||
|
- When expanding all tasks, they're processed in order of complexity (highest first)
|
||||||
|
- Research-backed generation is preserved from the complexity analysis
|
||||||
|
- You can still override recommendations with explicit command-line options
|
||||||
|
|
||||||
|
Example workflow:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate the complexity analysis report with research capabilities
|
||||||
|
task-master analyze-complexity --research
|
||||||
|
|
||||||
|
# Review the report in a readable format
|
||||||
|
task-master complexity-report
|
||||||
|
|
||||||
|
# Expand tasks using the optimized recommendations
|
||||||
|
task-master expand --id=8
|
||||||
|
# or expand all tasks
|
||||||
|
task-master expand --all
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Finding the Next Task">
|
||||||
|
The `next` command:
|
||||||
|
|
||||||
|
- Identifies tasks that are pending/in-progress and have all dependencies satisfied
|
||||||
|
- Prioritizes tasks by priority level, dependency count, and task ID
|
||||||
|
- Displays comprehensive information about the selected task:
|
||||||
|
- Basic task details (ID, title, priority, dependencies)
|
||||||
|
- Implementation details
|
||||||
|
- Subtasks (if they exist)
|
||||||
|
- Provides contextual suggested actions:
|
||||||
|
- Command to mark the task as in-progress
|
||||||
|
- Command to mark the task as done
|
||||||
|
- Commands for working with subtasks
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Viewing Specific Task Details">
|
||||||
|
The `show` command:
|
||||||
|
|
||||||
|
- Displays comprehensive details about a specific task or subtask
|
||||||
|
- Shows task status, priority, dependencies, and detailed implementation notes
|
||||||
|
- For parent tasks, displays all subtasks and their status
|
||||||
|
- For subtasks, shows parent task relationship
|
||||||
|
- Provides contextual action suggestions based on the task's state
|
||||||
|
- Works with both regular tasks and subtasks (using the format taskId.subtaskId)
|
||||||
|
</Accordion>
|
||||||
|
</AccordionGroup>
|
||||||
|
|
||||||
|
## Best Practices for AI-Driven Development
|
||||||
|
|
||||||
|
<CardGroup cols={2}>
|
||||||
|
<Card title="📝 Detailed PRD" icon="lightbulb">
|
||||||
|
The more detailed your PRD, the better the generated tasks will be.
|
||||||
|
</Card>
|
||||||
|
|
||||||
|
<Card title="👀 Review Tasks" icon="magnifying-glass">
|
||||||
|
After parsing the PRD, review the tasks to ensure they make sense and have appropriate dependencies.
|
||||||
|
</Card>
|
||||||
|
|
||||||
|
<Card title="📊 Analyze Complexity" icon="chart-line">
|
||||||
|
Use the complexity analysis feature to identify which tasks should be broken down further.
|
||||||
|
</Card>
|
||||||
|
|
||||||
|
<Card title="⛓️ Follow Dependencies" icon="link">
|
||||||
|
Always respect task dependencies - the Cursor agent will help with this.
|
||||||
|
</Card>
|
||||||
|
|
||||||
|
<Card title="🔄 Update As You Go" icon="arrows-rotate">
|
||||||
|
If your implementation diverges from the plan, use the update command to keep future tasks aligned.
|
||||||
|
</Card>
|
||||||
|
|
||||||
|
<Card title="📦 Break Down Tasks" icon="boxes-stacked">
|
||||||
|
Use the expand command to break down complex tasks into manageable subtasks.
|
||||||
|
</Card>
|
||||||
|
|
||||||
|
<Card title="🔄 Regenerate Files" icon="file-arrow-up">
|
||||||
|
After any updates to tasks.json, regenerate the task files to keep them in sync.
|
||||||
|
</Card>
|
||||||
|
|
||||||
|
<Card title="💬 Provide Context" icon="comment">
|
||||||
|
When asking the Cursor agent to help with a task, provide context about what you're trying to achieve.
|
||||||
|
</Card>
|
||||||
|
|
||||||
|
<Card title="✅ Validate Dependencies" icon="circle-check">
|
||||||
|
Periodically run the validate-dependencies command to check for invalid or circular dependencies.
|
||||||
|
</Card>
|
||||||
|
</CardGroup>
|
||||||
83
apps/docs/docs.json
Normal file
83
apps/docs/docs.json
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://mintlify.com/docs.json",
|
||||||
|
"theme": "mint",
|
||||||
|
"name": "Task Master",
|
||||||
|
"colors": {
|
||||||
|
"primary": "#3366CC",
|
||||||
|
"light": "#6699FF",
|
||||||
|
"dark": "#24478F"
|
||||||
|
},
|
||||||
|
"favicon": "/favicon.svg",
|
||||||
|
"navigation": {
|
||||||
|
"tabs": [
|
||||||
|
{
|
||||||
|
"tab": "Task Master Documentation",
|
||||||
|
"groups": [
|
||||||
|
{
|
||||||
|
"group": "Welcome",
|
||||||
|
"pages": ["introduction"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"group": "Getting Started",
|
||||||
|
"pages": [
|
||||||
|
{
|
||||||
|
"group": "Quick Start",
|
||||||
|
"pages": [
|
||||||
|
"getting-started/quick-start/quick-start",
|
||||||
|
"getting-started/quick-start/requirements",
|
||||||
|
"getting-started/quick-start/installation",
|
||||||
|
"getting-started/quick-start/configuration-quick",
|
||||||
|
"getting-started/quick-start/prd-quick",
|
||||||
|
"getting-started/quick-start/tasks-quick",
|
||||||
|
"getting-started/quick-start/execute-quick"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"getting-started/faq",
|
||||||
|
"getting-started/contribute"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"group": "Best Practices",
|
||||||
|
"pages": [
|
||||||
|
"best-practices/index",
|
||||||
|
"best-practices/configuration-advanced",
|
||||||
|
"best-practices/advanced-tasks"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"group": "Technical Capabilities",
|
||||||
|
"pages": [
|
||||||
|
"capabilities/mcp",
|
||||||
|
"capabilities/cli-root-commands",
|
||||||
|
"capabilities/task-structure"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"global": {
|
||||||
|
"anchors": [
|
||||||
|
{
|
||||||
|
"anchor": "Github",
|
||||||
|
"href": "https://github.com/eyaltoledano/claude-task-master",
|
||||||
|
"icon": "github"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"anchor": "Discord",
|
||||||
|
"href": "https://discord.gg/fWJkU7rf",
|
||||||
|
"icon": "discord"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"logo": {
|
||||||
|
"light": "/logo/task-master-logo.png",
|
||||||
|
"dark": "/logo/task-master-logo.png"
|
||||||
|
},
|
||||||
|
"footer": {
|
||||||
|
"socials": {
|
||||||
|
"x": "https://x.com/TaskmasterAI",
|
||||||
|
"github": "https://github.com/eyaltoledano/claude-task-master"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
9
apps/docs/favicon.svg
Normal file
9
apps/docs/favicon.svg
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
<svg width="100" height="100" viewBox="0 0 100 100" xmlns="http://www.w3.org/2000/svg">
|
||||||
|
<!-- Blue form with check from logo -->
|
||||||
|
<rect x="16" y="10" width="68" height="80" rx="9" fill="#3366CC"/>
|
||||||
|
<polyline points="33,44 41,55 56,29" fill="none" stroke="#FFFFFF" stroke-width="6"/>
|
||||||
|
<circle cx="33" cy="64" r="4" fill="#FFFFFF"/>
|
||||||
|
<rect x="43" y="61" width="27" height="6" fill="#FFFFFF"/>
|
||||||
|
<circle cx="33" cy="77" r="4" fill="#FFFFFF"/>
|
||||||
|
<rect x="43" y="75" width="27" height="6" fill="#FFFFFF"/>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 513 B |
335
apps/docs/getting-started/contribute.mdx
Normal file
335
apps/docs/getting-started/contribute.mdx
Normal file
@@ -0,0 +1,335 @@
|
|||||||
|
# Contributing to Task Master
|
||||||
|
|
||||||
|
Thank you for your interest in contributing to Task Master! We're excited to work with you and appreciate your help in making this project better. 🚀
|
||||||
|
|
||||||
|
## 🤝 Our Collaborative Approach
|
||||||
|
|
||||||
|
We're a **PR-friendly team** that values collaboration:
|
||||||
|
|
||||||
|
- ✅ **We review PRs quickly** - Usually within hours, not days
|
||||||
|
- ✅ **We're super reactive** - Expect fast feedback and engagement
|
||||||
|
- ✅ **We sometimes take over PRs** - If your contribution is valuable but needs cleanup, we might jump in to help finish it
|
||||||
|
- ✅ **We're open to all contributions** - From bug fixes to major features
|
||||||
|
|
||||||
|
**We don't mind AI-generated code**, but we do expect you to:
|
||||||
|
|
||||||
|
- ✅ **Review and understand** what the AI generated
|
||||||
|
- ✅ **Test the code thoroughly** before submitting
|
||||||
|
- ✅ **Ensure it's well-written** and follows our patterns
|
||||||
|
- ❌ **Don't submit "AI slop"** - untested, unreviewed AI output
|
||||||
|
|
||||||
|
> **Why this matters**: We spend significant time reviewing PRs. Help us help you by submitting quality contributions that save everyone time!
|
||||||
|
|
||||||
|
## 🚀 Quick Start for Contributors
|
||||||
|
|
||||||
|
### 1. Fork and Clone
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/YOUR_USERNAME/claude-task-master.git
|
||||||
|
cd claude-task-master
|
||||||
|
npm install
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Create a Feature Branch
|
||||||
|
|
||||||
|
**Important**: Always target the `next` branch, not `main`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git checkout next
|
||||||
|
git pull origin next
|
||||||
|
git checkout -b feature/your-feature-name
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Make Your Changes
|
||||||
|
|
||||||
|
Follow our development guidelines below.
|
||||||
|
|
||||||
|
### 4. Test Everything Yourself
|
||||||
|
|
||||||
|
**Before submitting your PR**, ensure:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run all tests
|
||||||
|
npm test
|
||||||
|
|
||||||
|
# Check formatting
|
||||||
|
npm run format-check
|
||||||
|
|
||||||
|
# Fix formatting if needed
|
||||||
|
npm run format
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. Create a Changeset
|
||||||
|
|
||||||
|
**Required for most changes**:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm run changeset
|
||||||
|
```
|
||||||
|
|
||||||
|
See the [Changeset Guidelines](#changeset-guidelines) below for details.
|
||||||
|
|
||||||
|
### 6. Submit Your PR
|
||||||
|
|
||||||
|
- Target the `next` branch
|
||||||
|
- Write a clear description
|
||||||
|
- Reference any related issues
|
||||||
|
|
||||||
|
## 📋 Development Guidelines
|
||||||
|
|
||||||
|
### Branch Strategy
|
||||||
|
|
||||||
|
- **`main`**: Production-ready code
|
||||||
|
- **`next`**: Development branch - **target this for PRs**
|
||||||
|
- **Feature branches**: `feature/description` or `fix/description`
|
||||||
|
|
||||||
|
### Code Quality Standards
|
||||||
|
|
||||||
|
1. **Write tests** for new functionality
|
||||||
|
2. **Follow existing patterns** in the codebase
|
||||||
|
3. **Add JSDoc comments** for functions
|
||||||
|
4. **Keep functions focused** and single-purpose
|
||||||
|
|
||||||
|
### Testing Requirements
|
||||||
|
|
||||||
|
Your PR **must pass all CI checks**:
|
||||||
|
|
||||||
|
- ✅ **Unit tests**: `npm test`
|
||||||
|
- ✅ **Format check**: `npm run format-check`
|
||||||
|
|
||||||
|
**Test your changes locally first** - this saves review time and shows you care about quality.
|
||||||
|
|
||||||
|
## 📦 Changeset Guidelines
|
||||||
|
|
||||||
|
We use [Changesets](https://github.com/changesets/changesets) to manage versioning and generate changelogs.
|
||||||
|
|
||||||
|
### When to Create a Changeset
|
||||||
|
|
||||||
|
**Always create a changeset for**:
|
||||||
|
|
||||||
|
- ✅ New features
|
||||||
|
- ✅ Bug fixes
|
||||||
|
- ✅ Breaking changes
|
||||||
|
- ✅ Performance improvements
|
||||||
|
- ✅ User-facing documentation updates
|
||||||
|
- ✅ Dependency updates that affect functionality
|
||||||
|
|
||||||
|
**Skip changesets for**:
|
||||||
|
|
||||||
|
- ❌ Internal documentation only
|
||||||
|
- ❌ Test-only changes
|
||||||
|
- ❌ Code formatting/linting
|
||||||
|
- ❌ Development tooling that doesn't affect users
|
||||||
|
|
||||||
|
### How to Create a Changeset
|
||||||
|
|
||||||
|
1. **After making your changes**:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm run changeset
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Choose the bump type**:
|
||||||
|
|
||||||
|
- **Major**: Breaking changes
|
||||||
|
- **Minor**: New features
|
||||||
|
- **Patch**: Bug fixes, docs, performance improvements
|
||||||
|
|
||||||
|
3. **Write a clear summary**:
|
||||||
|
|
||||||
|
```
|
||||||
|
Add support for custom AI models in MCP configuration
|
||||||
|
```
|
||||||
|
|
||||||
|
4. **Commit the changeset file** with your changes:
|
||||||
|
```bash
|
||||||
|
git add .changeset/*.md
|
||||||
|
git commit -m "feat: add custom AI model support"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Changeset vs Git Commit Messages
|
||||||
|
|
||||||
|
- **Changeset summary**: User-facing, goes in CHANGELOG.md
|
||||||
|
- **Git commit**: Developer-facing, explains the technical change
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Changeset summary (user-facing)
|
||||||
|
"Add support for custom Ollama models"
|
||||||
|
|
||||||
|
# Git commit message (developer-facing)
|
||||||
|
"feat(models): implement custom Ollama model validation
|
||||||
|
|
||||||
|
- Add model validation for custom Ollama endpoints
|
||||||
|
- Update configuration schema to support custom models
|
||||||
|
- Add tests for new validation logic"
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🔧 Development Setup
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
- Node.js 18+
|
||||||
|
- npm or yarn
|
||||||
|
|
||||||
|
### Environment Setup
|
||||||
|
|
||||||
|
1. **Copy environment template**:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cp .env.example .env
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Add your API keys** (for testing AI features):
|
||||||
|
```bash
|
||||||
|
ANTHROPIC_API_KEY=your_key_here
|
||||||
|
OPENAI_API_KEY=your_key_here
|
||||||
|
# Add others as needed
|
||||||
|
```
|
||||||
|
|
||||||
|
### Running Tests
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run all tests
|
||||||
|
npm test
|
||||||
|
|
||||||
|
# Run tests in watch mode
|
||||||
|
npm run test:watch
|
||||||
|
|
||||||
|
# Run with coverage
|
||||||
|
npm run test:coverage
|
||||||
|
|
||||||
|
# Run E2E tests
|
||||||
|
npm run test:e2e
|
||||||
|
```
|
||||||
|
|
||||||
|
### Code Formatting
|
||||||
|
|
||||||
|
We use Prettier for consistent formatting:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check formatting
|
||||||
|
npm run format-check
|
||||||
|
|
||||||
|
# Fix formatting
|
||||||
|
npm run format
|
||||||
|
```
|
||||||
|
|
||||||
|
## 📝 PR Guidelines
|
||||||
|
|
||||||
|
### Before Submitting
|
||||||
|
|
||||||
|
- [ ] **Target the `next` branch**
|
||||||
|
- [ ] **Test everything locally**
|
||||||
|
- [ ] **Run the full test suite**
|
||||||
|
- [ ] **Check code formatting**
|
||||||
|
- [ ] **Create a changeset** (if needed)
|
||||||
|
- [ ] **Re-read your changes** - ensure they're clean and well-thought-out
|
||||||
|
|
||||||
|
### PR Description Template
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
## Description
|
||||||
|
|
||||||
|
Brief description of what this PR does.
|
||||||
|
|
||||||
|
## Type of Change
|
||||||
|
|
||||||
|
- [ ] Bug fix
|
||||||
|
- [ ] New feature
|
||||||
|
- [ ] Breaking change
|
||||||
|
- [ ] Documentation update
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
- [ ] I have tested this locally
|
||||||
|
- [ ] All existing tests pass
|
||||||
|
- [ ] I have added tests for new functionality
|
||||||
|
|
||||||
|
## Changeset
|
||||||
|
|
||||||
|
- [ ] I have created a changeset (or this change doesn't need one)
|
||||||
|
|
||||||
|
## Additional Notes
|
||||||
|
|
||||||
|
Any additional context or notes for reviewers.
|
||||||
|
```
|
||||||
|
|
||||||
|
### What We Look For
|
||||||
|
|
||||||
|
✅ **Good PRs**:
|
||||||
|
|
||||||
|
- Clear, focused changes
|
||||||
|
- Comprehensive testing
|
||||||
|
- Good commit messages
|
||||||
|
- Proper changeset (when needed)
|
||||||
|
- Self-reviewed code
|
||||||
|
|
||||||
|
❌ **Avoid**:
|
||||||
|
|
||||||
|
- Massive PRs that change everything
|
||||||
|
- Untested code
|
||||||
|
- Formatting issues
|
||||||
|
- Missing changesets for user-facing changes
|
||||||
|
- AI-generated code that wasn't reviewed
|
||||||
|
|
||||||
|
## 🏗️ Project Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
claude-task-master/
|
||||||
|
├── bin/ # CLI executables
|
||||||
|
├── mcp-server/ # MCP server implementation
|
||||||
|
├── scripts/ # Core task management logic
|
||||||
|
├── src/ # Shared utilities and providers and well refactored code (we are slowly moving everything here)
|
||||||
|
├── tests/ # Test files
|
||||||
|
├── docs/ # Documentation
|
||||||
|
└── .cursor/ # Cursor IDE rules and configuration
|
||||||
|
└── assets/ # Assets like rules and configuration for all IDEs
|
||||||
|
```
|
||||||
|
|
||||||
|
### Key Areas for Contribution
|
||||||
|
|
||||||
|
- **CLI Commands**: `scripts/modules/commands.js`
|
||||||
|
- **MCP Tools**: `mcp-server/src/tools/`
|
||||||
|
- **Core Logic**: `scripts/modules/task-manager/`
|
||||||
|
- **AI Providers**: `src/ai-providers/`
|
||||||
|
- **Tests**: `tests/`
|
||||||
|
|
||||||
|
## 🐛 Reporting Issues
|
||||||
|
|
||||||
|
### Bug Reports
|
||||||
|
|
||||||
|
Include:
|
||||||
|
|
||||||
|
- Task Master version
|
||||||
|
- Node.js version
|
||||||
|
- Operating system
|
||||||
|
- Steps to reproduce
|
||||||
|
- Expected vs actual behavior
|
||||||
|
- Error messages/logs
|
||||||
|
|
||||||
|
### Feature Requests
|
||||||
|
|
||||||
|
Include:
|
||||||
|
|
||||||
|
- Clear description of the feature
|
||||||
|
- Use case/motivation
|
||||||
|
- Proposed implementation (if you have ideas)
|
||||||
|
- Willingness to contribute
|
||||||
|
|
||||||
|
## 💬 Getting Help
|
||||||
|
|
||||||
|
- **Discord**: [Join our community](https://discord.gg/taskmasterai)
|
||||||
|
- **Issues**: [GitHub Issues](https://github.com/eyaltoledano/claude-task-master/issues)
|
||||||
|
- **Discussions**: [GitHub Discussions](https://github.com/eyaltoledano/claude-task-master/discussions)
|
||||||
|
|
||||||
|
## 📄 License
|
||||||
|
|
||||||
|
By contributing, you agree that your contributions will be licensed under the same license as the project (MIT with Commons Clause).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Thank you for contributing to Task Master!** 🎉
|
||||||
|
|
||||||
|
Your contributions help make AI-driven development more accessible and efficient for everyone.
|
||||||
12
apps/docs/getting-started/faq.mdx
Normal file
12
apps/docs/getting-started/faq.mdx
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
title: FAQ
|
||||||
|
sidebarTitle: "FAQ"
|
||||||
|
---
|
||||||
|
|
||||||
|
Coming soon.
|
||||||
|
|
||||||
|
## 💬 Getting Help
|
||||||
|
|
||||||
|
- **Discord**: [Join our community](https://discord.gg/taskmasterai)
|
||||||
|
- **Issues**: [GitHub Issues](https://github.com/eyaltoledano/claude-task-master/issues)
|
||||||
|
- **Discussions**: [GitHub Discussions](https://github.com/eyaltoledano/claude-task-master/discussions)
|
||||||
112
apps/docs/getting-started/quick-start/configuration-quick.mdx
Normal file
112
apps/docs/getting-started/quick-start/configuration-quick.mdx
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
---
|
||||||
|
title: Configuration
|
||||||
|
sidebarTitle: "Configuration"
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Before getting started with Task Master, you'll need to set up your API keys. There are a couple of ways to do this depending on whether you're using the CLI or working inside MCP. It's also a good time to start getting familiar with the other configuration options available — even if you don’t need to adjust them yet, knowing what’s possible will help down the line.
|
||||||
|
|
||||||
|
## API Key Setup
|
||||||
|
|
||||||
|
Task Master uses environment variables to securely store provider API keys and optional endpoint URLs.
|
||||||
|
|
||||||
|
### MCP Usage: mcp.json file
|
||||||
|
|
||||||
|
For MCP/Cursor usage: Configure keys in the env section of your .cursor/mcp.json file.
|
||||||
|
|
||||||
|
```java .env lines icon="java"
|
||||||
|
{
|
||||||
|
"mcpServers": {
|
||||||
|
"task-master-ai": {
|
||||||
|
"command": "node",
|
||||||
|
"args": ["./mcp-server/server.js"],
|
||||||
|
"env": {
|
||||||
|
"ANTHROPIC_API_KEY": "ANTHROPIC_API_KEY_HERE",
|
||||||
|
"PERPLEXITY_API_KEY": "PERPLEXITY_API_KEY_HERE",
|
||||||
|
"OPENAI_API_KEY": "OPENAI_API_KEY_HERE",
|
||||||
|
"GOOGLE_API_KEY": "GOOGLE_API_KEY_HERE",
|
||||||
|
"XAI_API_KEY": "XAI_API_KEY_HERE",
|
||||||
|
"OPENROUTER_API_KEY": "OPENROUTER_API_KEY_HERE",
|
||||||
|
"MISTRAL_API_KEY": "MISTRAL_API_KEY_HERE",
|
||||||
|
"AZURE_OPENAI_API_KEY": "AZURE_OPENAI_API_KEY_HERE",
|
||||||
|
"OLLAMA_API_KEY": "OLLAMA_API_KEY_HERE",
|
||||||
|
"GITHUB_API_KEY": "GITHUB_API_KEY_HERE"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Usage: `.env` File
|
||||||
|
|
||||||
|
Create a `.env` file in your project root and include the keys for the providers you plan to use:
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
```java .env lines icon="java"
|
||||||
|
# Required API keys for providers configured in .taskmaster/config.json
|
||||||
|
ANTHROPIC_API_KEY=sk-ant-api03-your-key-here
|
||||||
|
PERPLEXITY_API_KEY=pplx-your-key-here
|
||||||
|
# OPENAI_API_KEY=sk-your-key-here
|
||||||
|
# GOOGLE_API_KEY=AIzaSy...
|
||||||
|
# AZURE_OPENAI_API_KEY=your-azure-openai-api-key-here
|
||||||
|
# etc.
|
||||||
|
|
||||||
|
# Optional Endpoint Overrides
|
||||||
|
# Use a specific provider's base URL, e.g., for an OpenAI-compatible API
|
||||||
|
# OPENAI_BASE_URL=https://api.third-party.com/v1
|
||||||
|
#
|
||||||
|
# Azure OpenAI Configuration
|
||||||
|
# AZURE_OPENAI_ENDPOINT=https://your-resource-name.openai.azure.com/ or https://your-endpoint-name.cognitiveservices.azure.com/openai/deployments
|
||||||
|
# OLLAMA_BASE_URL=http://custom-ollama-host:11434/api
|
||||||
|
|
||||||
|
# Google Vertex AI Configuration (Required if using 'vertex' provider)
|
||||||
|
# VERTEX_PROJECT_ID=your-gcp-project-id
|
||||||
|
```
|
||||||
|
|
||||||
|
## What Else Can Be Configured?
|
||||||
|
|
||||||
|
The main configuration file (`.taskmaster/config.json`) allows you to control nearly every aspect of Task Master’s behavior. Here’s a high-level look at what you can customize:
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
You don’t need to configure everything up front. Most settings can be left as defaults or updated later as your workflow evolves.
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
<Accordion title="View Configuration Options">
|
||||||
|
|
||||||
|
### Models and Providers
|
||||||
|
- Role-based model setup: `main`, `research`, `fallback`
|
||||||
|
- Provider selection (Anthropic, OpenAI, Perplexity, etc.)
|
||||||
|
- Model IDs per role
|
||||||
|
- Temperature, max tokens, and other generation settings
|
||||||
|
- Custom base URLs for OpenAI-compatible APIs
|
||||||
|
|
||||||
|
### Global Settings
|
||||||
|
- `logLevel`: Logging verbosity
|
||||||
|
- `debug`: Enable/disable debug mode
|
||||||
|
- `projectName`: Optional name for your project
|
||||||
|
- `defaultTag`: Default tag for task grouping
|
||||||
|
- `defaultSubtasks`: Number of subtasks to auto-generate
|
||||||
|
- `defaultPriority`: Priority level for new tasks
|
||||||
|
|
||||||
|
### API Endpoint Overrides
|
||||||
|
- `ollamaBaseURL`: Custom Ollama server URL
|
||||||
|
- `azureBaseURL`: Global Azure endpoint
|
||||||
|
- `vertexProjectId`: Google Vertex AI project ID
|
||||||
|
- `vertexLocation`: Region for Vertex AI models
|
||||||
|
|
||||||
|
### Tag and Git Integration
|
||||||
|
- Default tag context per project
|
||||||
|
- Support for task isolation by tag
|
||||||
|
- Manual tag creation from Git branches
|
||||||
|
|
||||||
|
### State Management
|
||||||
|
- Active tag tracking
|
||||||
|
- Migration state
|
||||||
|
- Last tag switch timestamp
|
||||||
|
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Note>
|
||||||
|
For advanced configuration options and detailed customization, see our [Advanced Configuration Guide](/docs/best-practices/configuration-advanced) page.
|
||||||
|
</Note>
|
||||||
59
apps/docs/getting-started/quick-start/execute-quick.mdx
Normal file
59
apps/docs/getting-started/quick-start/execute-quick.mdx
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
---
|
||||||
|
title: Executing Tasks
|
||||||
|
sidebarTitle: "Executing Tasks"
|
||||||
|
---
|
||||||
|
|
||||||
|
Now that your tasks are generated and reviewed you are ready to begin executing.
|
||||||
|
|
||||||
|
## Select the Task to Work on: Next Task
|
||||||
|
|
||||||
|
Task Master has the "next" command to find the next task to work on. You can access it with the following request:
|
||||||
|
```
|
||||||
|
What's the next task I should work on? Please consider dependencies and priorities.
|
||||||
|
```
|
||||||
|
Alternatively you can use the CLI to show the next task
|
||||||
|
```bash
|
||||||
|
task-master next
|
||||||
|
```
|
||||||
|
|
||||||
|
## Discuss Task
|
||||||
|
When you know what task to work on next you can then start chatting with the agent to make sure it understands the plan of action.
|
||||||
|
|
||||||
|
You can tag relevant files and folders so it knows what context to pull up as it generates its plan. For example:
|
||||||
|
```
|
||||||
|
Please review Task 5 and confirm you understand how to execute before beginning. Refer to @models @api and @schema
|
||||||
|
```
|
||||||
|
The agent will begin analyzing the task and files and respond with the steps to complete the task.
|
||||||
|
|
||||||
|
## Agent Task execution
|
||||||
|
|
||||||
|
If you agree with the plan of action, tell the agent to get started.
|
||||||
|
```
|
||||||
|
You may begin. I believe in you.
|
||||||
|
```
|
||||||
|
|
||||||
|
## Review and Test
|
||||||
|
|
||||||
|
Once the agent is finished with the task you can refer to the task testing strategy to make sure it was completed correctly.
|
||||||
|
|
||||||
|
## Update Task Status
|
||||||
|
|
||||||
|
If the task was completed correctly you can update the status to done
|
||||||
|
|
||||||
|
```
|
||||||
|
Please mark Task 5 as done
|
||||||
|
```
|
||||||
|
The agent will execute
|
||||||
|
```bash
|
||||||
|
task-master set-status --id=5 --status=done
|
||||||
|
```
|
||||||
|
|
||||||
|
## Rules and Context
|
||||||
|
|
||||||
|
If you ran into problems and had to debug errors you can create new rules as you go. This helps build context on your codebase that helps the creation and execution of future tasks.
|
||||||
|
|
||||||
|
## On to the Next Task!
|
||||||
|
|
||||||
|
By now you have all you need to get started executing code faster and smarter with Task Master.
|
||||||
|
|
||||||
|
If you have any questions please check out [Frequently Asked Questions](/docs/getting-started/faq)
|
||||||
159
apps/docs/getting-started/quick-start/installation.mdx
Normal file
159
apps/docs/getting-started/quick-start/installation.mdx
Normal file
@@ -0,0 +1,159 @@
|
|||||||
|
---
|
||||||
|
title: Installation
|
||||||
|
sidebarTitle: "Installation"
|
||||||
|
---
|
||||||
|
|
||||||
|
Now that you have Node.js and your first API Key, you are ready to begin installing Task Master in one of three ways.
|
||||||
|
|
||||||
|
<Note>Cursor Users Can Use the One Click Install Below</Note>
|
||||||
|
<Accordion title="Quick Install for Cursor 1.0+ (One-Click)">
|
||||||
|
|
||||||
|
<a href="cursor://anysphere.cursor-deeplink/mcp/install?name=task-master-ai&config=eyJjb21tYW5kIjoibnB4IiwiYXJncyI6WyIteSIsIi0tcGFja2FnZT10YXNrLW1hc3Rlci1haSIsInRhc2stbWFzdGVyLWFpIl0sImVudiI6eyJBTlRIUk9QSUNfQVBJX0tFWSI6IllPVVJfQU5USFJPUElDX0FQSV9LRVlfSEVSRSIsIlBFUlBMRVhJVFlfQVBJX0tFWSI6IllPVVJfUEVSUExFWElUWV9BUElfS0VZX0hFUkUiLCJPUEVOQUlfQVBJX0tFWSI6IllPVVJfT1BFTkFJX0tFWV9IRVJFIiwiR09PR0xFX0FQSV9LRVkiOiJZT1VSX0dPT0dMRV9LRVlfSEVSRSIsIk1JU1RSQUxfQVBJX0tFWSI6IllPVVJfTUlTVFJBTF9LRVlfSEVSRSIsIk9QRU5ST1VURVJfQVBJX0tFWSI6IllPVVJfT1BFTlJPVVRFUl9LRVlfSEVSRSIsIlhBSV9BUElfS0VZIjoiWU9VUl9YQUlfS0VZX0hFUkUiLCJBWlVSRV9PUEVOQUJFX0FQSV9LRVkiOiJZT1VSX0FaVVJFX0tFWV9IRVJFIiwiT0xMQU1BX0FQSV9LRVkiOiJZT1VSX09MTEFNQV9BUElfS0VZX0hFUkUifX0%3D">
|
||||||
|
<img
|
||||||
|
className="block dark:hidden"
|
||||||
|
src="https://cursor.com/deeplink/mcp-install-light.png"
|
||||||
|
alt="Add Task Master MCP server to Cursor"
|
||||||
|
noZoom
|
||||||
|
/>
|
||||||
|
<img
|
||||||
|
className="hidden dark:block"
|
||||||
|
src="https://cursor.com/deeplink/mcp-install-dark.png"
|
||||||
|
alt="Add Task Master MCP server to Cursor"
|
||||||
|
noZoom
|
||||||
|
/>
|
||||||
|
</a>
|
||||||
|
|
||||||
|
Or click the copy button (top-right of code block) then paste into your browser:
|
||||||
|
|
||||||
|
```text
|
||||||
|
cursor://anysphere.cursor-deeplink/mcp/install?name=taskmaster-ai&config=eyJjb21tYW5kIjoibnB4IiwiYXJncyI6WyIteSIsIi0tcGFja2FnZT10YXNrLW1hc3Rlci1haSIsInRhc2stbWFzdGVyLWFpIl0sImVudiI6eyJBTlRIUk9QSUNfQVBJX0tFWSI6IllPVVJfQU5USFJPUElDX0FQSV9LRVlfSEVSRSIsIlBFUlBMRVhJVFlfQVBJX0tFWSI6IllPVVJfUEVSUExFWElUWV9BUElfS0VZX0hFUkUiLCJPUEVOQUlfQVBJX0tFWSI6IllPVVJfT1BFTkFJX0tFWV9IRVJFIiwiR09PR0xFX0FQSV9LRVkiOiJZT1VSX0dPT0dMRV9LRVlfSEVSRSIsIk1JU1RSQUxfQVBJX0tFWSI6IllPVVJfTUlTVFJBTF9LRVlfSEVSRSIsIk9QRU5ST1VURVJfQVBJX0tFWSI6IllPVVJfT1BFTlJPVVRFUl9LRVlfSEVSRSIsIlhBSV9BUElfS0VZIjoiWU9VUl9YQUlfS0VZX0hFUkUiLCJBWlVSRV9PUEVOQUlfQVBJX0tFWSI6IllPVVJfQVpVUkVfS0VZX0hFUkUiLCJPTExBTUFfQVBJX0tFWSI6IllPVVJfT0xMQU1BX0FQSV9LRVlfSEVSRSJ9fQo=
|
||||||
|
```
|
||||||
|
|
||||||
|
> **Note:** After clicking the link, you'll still need to add your API keys to the configuration. The link installs the MCP server with placeholder keys that you'll need to replace with your actual API keys.
|
||||||
|
</Accordion>
|
||||||
|
## Installation Options
|
||||||
|
|
||||||
|
|
||||||
|
<Accordion title="Option 1: MCP (Recommended)">
|
||||||
|
|
||||||
|
MCP (Model Control Protocol) lets you run Task Master directly from your editor.
|
||||||
|
|
||||||
|
## 1. Add your MCP config at the following path depending on your editor
|
||||||
|
|
||||||
|
| Editor | Scope | Linux/macOS Path | Windows Path | Key |
|
||||||
|
| ------------ | ------- | ------------------------------------- | ------------------------------------------------- | ------------ |
|
||||||
|
| **Cursor** | Global | `~/.cursor/mcp.json` | `%USERPROFILE%\.cursor\mcp.json` | `mcpServers` |
|
||||||
|
| | Project | `<project_folder>/.cursor/mcp.json` | `<project_folder>\.cursor\mcp.json` | `mcpServers` |
|
||||||
|
| **Windsurf** | Global | `~/.codeium/windsurf/mcp_config.json` | `%USERPROFILE%\.codeium\windsurf\mcp_config.json` | `mcpServers` |
|
||||||
|
| **VS Code** | Project | `<project_folder>/.vscode/mcp.json` | `<project_folder>\.vscode\mcp.json` | `servers` |
|
||||||
|
|
||||||
|
## Manual Configuration
|
||||||
|
|
||||||
|
### Cursor & Windsurf (`mcpServers`)
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"mcpServers": {
|
||||||
|
"taskmaster-ai": {
|
||||||
|
"command": "npx",
|
||||||
|
"args": ["-y", "--package=task-master-ai", "task-master-ai"],
|
||||||
|
"env": {
|
||||||
|
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
|
||||||
|
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
|
||||||
|
"OPENAI_API_KEY": "YOUR_OPENAI_KEY_HERE",
|
||||||
|
"GOOGLE_API_KEY": "YOUR_GOOGLE_KEY_HERE",
|
||||||
|
"MISTRAL_API_KEY": "YOUR_MISTRAL_KEY_HERE",
|
||||||
|
"OPENROUTER_API_KEY": "YOUR_OPENROUTER_KEY_HERE",
|
||||||
|
"XAI_API_KEY": "YOUR_XAI_KEY_HERE",
|
||||||
|
"AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE",
|
||||||
|
"OLLAMA_API_KEY": "YOUR_OLLAMA_API_KEY_HERE"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
> 🔑 Replace `YOUR_…_KEY_HERE` with your real API keys. You can remove keys you don't use.
|
||||||
|
|
||||||
|
> **Note**: If you see `0 tools enabled` in the MCP settings, try removing the `--package=task-master-ai` flag from `args`.
|
||||||
|
|
||||||
|
### VS Code (`servers` + `type`)
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"servers": {
|
||||||
|
"taskmaster-ai": {
|
||||||
|
"command": "npx",
|
||||||
|
"args": ["-y", "--package=task-master-ai", "task-master-ai"],
|
||||||
|
"env": {
|
||||||
|
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
|
||||||
|
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
|
||||||
|
"OPENAI_API_KEY": "YOUR_OPENAI_KEY_HERE",
|
||||||
|
"GOOGLE_API_KEY": "YOUR_GOOGLE_KEY_HERE",
|
||||||
|
"MISTRAL_API_KEY": "YOUR_MISTRAL_KEY_HERE",
|
||||||
|
"OPENROUTER_API_KEY": "YOUR_OPENROUTER_KEY_HERE",
|
||||||
|
"XAI_API_KEY": "YOUR_XAI_KEY_HERE",
|
||||||
|
"AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE"
|
||||||
|
},
|
||||||
|
"type": "stdio"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
> 🔑 Replace `YOUR_…_KEY_HERE` with your real API keys. You can remove keys you don't use.
|
||||||
|
|
||||||
|
#### 2. (Cursor-only) Enable Taskmaster MCP
|
||||||
|
|
||||||
|
Open Cursor Settings (Ctrl+Shift+J) ➡ Click on MCP tab on the left ➡ Enable task-master-ai with the toggle
|
||||||
|
|
||||||
|
#### 3. (Optional) Configure the models you want to use
|
||||||
|
|
||||||
|
In your editor's AI chat pane, say:
|
||||||
|
|
||||||
|
```txt
|
||||||
|
Change the main, research and fallback models to <model_name>, <model_name> and <model_name> respectively.
|
||||||
|
```
|
||||||
|
|
||||||
|
For example, to use Claude Code (no API key required):
|
||||||
|
```txt
|
||||||
|
Change the main model to claude-code/sonnet
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 4. Initialize Task Master
|
||||||
|
|
||||||
|
In your editor's AI chat pane, say:
|
||||||
|
|
||||||
|
```txt
|
||||||
|
Initialize taskmaster-ai in my project
|
||||||
|
```
|
||||||
|
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Option 2: Using Command Line">
|
||||||
|
|
||||||
|
## CLI Installation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install globally
|
||||||
|
npm install -g task-master-ai
|
||||||
|
|
||||||
|
# OR install locally within your project
|
||||||
|
npm install task-master-ai
|
||||||
|
```
|
||||||
|
|
||||||
|
## Initialize a new project
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# If installed globally
|
||||||
|
task-master init
|
||||||
|
|
||||||
|
# If installed locally
|
||||||
|
npx task-master init
|
||||||
|
|
||||||
|
# Initialize project with specific rules
|
||||||
|
task-master init --rules cursor,windsurf,vscode
|
||||||
|
```
|
||||||
|
|
||||||
|
This will prompt you for project details and set up a new project with the necessary files and structure.
|
||||||
|
</Accordion>
|
||||||
4
apps/docs/getting-started/quick-start/moving-forward.mdx
Normal file
4
apps/docs/getting-started/quick-start/moving-forward.mdx
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
title: Moving Forward
|
||||||
|
sidebarTitle: "Moving Forward"
|
||||||
|
---
|
||||||
81
apps/docs/getting-started/quick-start/prd-quick.mdx
Normal file
81
apps/docs/getting-started/quick-start/prd-quick.mdx
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
---
|
||||||
|
title: PRD Creation and Parsing
|
||||||
|
sidebarTitle: "PRD Creation and Parsing"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Writing a PRD
|
||||||
|
|
||||||
|
A PRD (Product Requirements Document) is the starting point of every task flow in Task Master. It defines what you're building and why. A clear PRD dramatically improves the quality of your tasks, your model outputs, and your final product — so it’s worth taking the time to get it right.
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
You don’t need to define your whole app up front. You can write a focused PRD just for the next feature or module you’re working on.
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
You can start with an empty project or you can start with a feature PRD on an existing project.
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
You can add and parse multiple PRDs per project using the --append flag
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
## What Makes a Good PRD?
|
||||||
|
|
||||||
|
- Clear objective — what’s the outcome or feature?
|
||||||
|
- Context — what’s already in place or assumed?
|
||||||
|
- Constraints — what limits or requirements need to be respected?
|
||||||
|
- Reasoning — why are you building it this way?
|
||||||
|
|
||||||
|
The more context you give the model, the better the breakdown and results.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Writing a PRD for Task Master
|
||||||
|
|
||||||
|
<Note>An example PRD can be found in .taskmaster/templates/example_prd.txt</Note>
|
||||||
|
|
||||||
|
|
||||||
|
You can co-write your PRD with an LLM model using the following workflow:
|
||||||
|
|
||||||
|
1. **Chat about requirements** — explain what you want to build.
|
||||||
|
2. **Show an example PRD** — share the example PRD so the model understands the expected format. The example uses formatting that work well with Task Master's code. Following the example will yield better results.
|
||||||
|
3. **Iterate and refine** — work with the model to shape the draft into a clear and well-structured PRD.
|
||||||
|
|
||||||
|
This approach works great in Cursor, or anywhere you use a chat-based LLM.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Where to Save Your PRD
|
||||||
|
|
||||||
|
Place your PRD file in the `.taskmaster/docs` folder in your project.
|
||||||
|
|
||||||
|
- You can have **multiple PRDs** per project.
|
||||||
|
- Name your PRDs clearly so they’re easy to reference later.
|
||||||
|
- Examples: `dashboard_redesign.txt`, `user_onboarding.txt`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# Parse your PRD into Tasks
|
||||||
|
|
||||||
|
This is where the Task Master magic begins.
|
||||||
|
|
||||||
|
In Cursor's AI chat, instruct the agent to generate tasks from your PRD:
|
||||||
|
|
||||||
|
```
|
||||||
|
Please use the task-master parse-prd command to generate tasks from my PRD. The PRD is located at .taskmaster/docs/<prd-name>.txt.
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will execute the following command which you can alternatively paste into the CLI:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master parse-prd .taskmaster/docs/<prd-name>.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
This will:
|
||||||
|
|
||||||
|
- Parse your PRD document
|
||||||
|
- Generate a structured `tasks.json` file with tasks, dependencies, priorities, and test strategies
|
||||||
|
|
||||||
|
Now that you have written and parsed a PRD, you are ready to start setting up your tasks.
|
||||||
|
|
||||||
|
|
||||||
19
apps/docs/getting-started/quick-start/quick-start.mdx
Normal file
19
apps/docs/getting-started/quick-start/quick-start.mdx
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
---
|
||||||
|
title: Quick Start
|
||||||
|
sidebarTitle: "Quick Start"
|
||||||
|
---
|
||||||
|
|
||||||
|
This guide is for new users who want to start using Task Master with minimal setup time.
|
||||||
|
|
||||||
|
It covers:
|
||||||
|
- [Requirements](/docs/getting-started/quick-start/requirements): You will need Node.js and an AI model API Key.
|
||||||
|
- [Installation](/docs/getting-started/quick-start/installation): How to Install Task Master.
|
||||||
|
- [Configuration](/docs/getting-started/quick-start/configuration-quick): Setting up your API Key, MCP, and more.
|
||||||
|
- [PRD](/docs/getting-started/quick-start/prd-quick): Writing and parsing your first PRD.
|
||||||
|
- [Task Setup](/docs/getting-started/quick-start/tasks-quick): Preparing your tasks for execution.
|
||||||
|
- [Executing Tasks](/docs/getting-started/quick-start/execute-quick): Using Task Master to execute tasks.
|
||||||
|
- [Rules & Context](/docs/getting-started/quick-start/rules-quick): Learn how and why to build context in your project over time.
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
By the end of this guide, you'll have everything you need to begin working productively with Task Master.
|
||||||
|
</Tip>
|
||||||
50
apps/docs/getting-started/quick-start/requirements.mdx
Normal file
50
apps/docs/getting-started/quick-start/requirements.mdx
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
---
|
||||||
|
title: Requirements
|
||||||
|
sidebarTitle: "Requirements"
|
||||||
|
---
|
||||||
|
Before you can start using TaskMaster AI, you'll need to install Node.js and set up at least one model API Key.
|
||||||
|
|
||||||
|
## 1. Node.js
|
||||||
|
|
||||||
|
TaskMaster AI is built with Node.js and requires it to run. npm (Node Package Manager) comes bundled with Node.js.
|
||||||
|
|
||||||
|
<Accordion title="Install Node.js">
|
||||||
|
|
||||||
|
### Installation
|
||||||
|
|
||||||
|
**Option 1: Download from official website**
|
||||||
|
1. Visit [nodejs.org](https://nodejs.org)
|
||||||
|
2. Download the **LTS (Long Term Support)** version for your operating system
|
||||||
|
3. Run the installer and follow the setup wizard
|
||||||
|
|
||||||
|
**Option 2: Use a package manager**
|
||||||
|
|
||||||
|
<CodeGroup>
|
||||||
|
|
||||||
|
```bash Windows (Chocolatey)
|
||||||
|
choco install nodejs
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash Windows (winget)
|
||||||
|
winget install OpenJS.NodeJS
|
||||||
|
```
|
||||||
|
|
||||||
|
</CodeGroup>
|
||||||
|
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
## 2. Model API Key
|
||||||
|
|
||||||
|
Taskmaster utilizes AI across several commands, and those require a separate API key. For the purpose of a Quick Start we recommend setting up an API Key with Anthropic for your main model and Perplexity for your research model (optional but recommended).
|
||||||
|
|
||||||
|
<Tip>Task Master shows API costs per command used. Most users load $5-10 on their keys and don't have to top it off for a few months.</Tip>
|
||||||
|
|
||||||
|
At least one (1) of the following is required:
|
||||||
|
|
||||||
|
1. Anthropic API key (Claude API) - **recommended for Quick Start**
|
||||||
|
2. OpenAI API key
|
||||||
|
3. Google Gemini API key
|
||||||
|
4. Perplexity API key (for research model)
|
||||||
|
5. xAI API Key (for research or main model)
|
||||||
|
6. OpenRouter API Key (for research or main model)
|
||||||
|
7. Claude Code (no API key required - requires Claude Code CLI)
|
||||||
4
apps/docs/getting-started/quick-start/rules-quick.mdx
Normal file
4
apps/docs/getting-started/quick-start/rules-quick.mdx
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
title: Rules and Context
|
||||||
|
sidebarTitle: "Rules and Context"
|
||||||
|
---
|
||||||
69
apps/docs/getting-started/quick-start/tasks-quick.mdx
Normal file
69
apps/docs/getting-started/quick-start/tasks-quick.mdx
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
---
|
||||||
|
title: Tasks Setup
|
||||||
|
sidebarTitle: "Tasks Setup"
|
||||||
|
---
|
||||||
|
Now that your tasks are generated you can review the plan and prepare for execution.
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
Not all of the setup steps are required but they are recommended in order to ensure your coding agents work on accurate tasks.
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
## Expand Tasks
|
||||||
|
Used to add detail to tasks and create subtasks. We recommend expanding all tasks using the MCP request below:
|
||||||
|
```
|
||||||
|
Expand all tasks into subtasks.
|
||||||
|
```
|
||||||
|
The agent will execute
|
||||||
|
```bash
|
||||||
|
task-master expand --all
|
||||||
|
```
|
||||||
|
## List/Show Tasks
|
||||||
|
|
||||||
|
Used to view task details. It is important to review the plan and ensure it makes sense in your project. Check for correct folder structures, dependencies, out of scope subtasks, etc.
|
||||||
|
|
||||||
|
To see a list of tasks and descriptions use the following command:
|
||||||
|
|
||||||
|
```
|
||||||
|
List all pending tasks so I can review.
|
||||||
|
```
|
||||||
|
To see all tasks in the CLI you can use:
|
||||||
|
```bash
|
||||||
|
task-master list
|
||||||
|
```
|
||||||
|
|
||||||
|
To see all implementation details of an individual task, including subtasks and testing strategy, you can use Show Task:
|
||||||
|
|
||||||
|
```
|
||||||
|
Show task 2 so I can review.
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master show --id=<##>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Update Tasks
|
||||||
|
|
||||||
|
If the task details need to be edited you can update the task using this request:
|
||||||
|
|
||||||
|
```
|
||||||
|
Update Task 2 to use Postgres instead of MongoDB and remove the sharding subtask
|
||||||
|
```
|
||||||
|
Or this CLI command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master update-task --id=2 --prompt="use Postgres instead of MongoDB and remove the sharding subtask"
|
||||||
|
```
|
||||||
|
## Analyze complexity
|
||||||
|
|
||||||
|
Task Master can provide a complexity report which can be helpful to read before you begin. If you didn't already expand all your tasks, it could help identify which could be broken down further with subtasks.
|
||||||
|
|
||||||
|
```
|
||||||
|
Can you analyze the complexity of our tasks to help me understand which ones need to be broken down further?
|
||||||
|
```
|
||||||
|
|
||||||
|
You can view the report in a friendly table using:
|
||||||
|
```
|
||||||
|
Can you show me the complexity report in a more readable format?
|
||||||
|
```
|
||||||
|
|
||||||
|
<Check>Now you are ready to begin [executing tasks](/docs/getting-started/quick-start/execute-quick)</Check>
|
||||||
20
apps/docs/introduction.mdx
Normal file
20
apps/docs/introduction.mdx
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
<Tip>
|
||||||
|
Welcome to v1 of the Task Master Docs. Expect weekly updates as we expand and refine each section.
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
We've organized the docs into three sections depending on your experience level and goals:
|
||||||
|
|
||||||
|
### Getting Started - Jump in to [Quick Start](/docs/getting-started/quick-start)
|
||||||
|
Designed for first-time users. Get set up, create your first PRD, and run your first task.
|
||||||
|
|
||||||
|
### Best Practices
|
||||||
|
Covers common workflows, strategic usage of commands, model configuration tips, and real-world usage patterns. Recommended for active users.
|
||||||
|
|
||||||
|
### Technical Capabilities
|
||||||
|
A detailed glossary of every root command and available capability — meant for power users and contributors.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Thanks for being here early. If you spot something broken or want to contribute, check out the [GitHub repo](https://github.com/eyaltoledano/claude-task-master).
|
||||||
|
|
||||||
|
Have questions? Join our [Discord community](https://discord.gg/fWJkU7rf) to connect with other users and get help from the team.
|
||||||
18
apps/docs/licensing.md
Normal file
18
apps/docs/licensing.md
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
# Licensing
|
||||||
|
|
||||||
|
Task Master is licensed under the MIT License with Commons Clause. This means you can:
|
||||||
|
|
||||||
|
## ✅ Allowed:
|
||||||
|
|
||||||
|
- Use Task Master for any purpose (personal, commercial, academic)
|
||||||
|
- Modify the code
|
||||||
|
- Distribute copies
|
||||||
|
- Create and sell products built using Task Master
|
||||||
|
|
||||||
|
## ❌ Not Allowed:
|
||||||
|
|
||||||
|
- Sell Task Master itself
|
||||||
|
- Offer Task Master as a hosted service
|
||||||
|
- Create competing products based on Task Master
|
||||||
|
|
||||||
|
{/* See the [LICENSE](../LICENSE) file for the complete license text. */}
|
||||||
19
apps/docs/logo/dark.svg
Normal file
19
apps/docs/logo/dark.svg
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
<svg width="800" height="240" viewBox="0 0 800 240" xmlns="http://www.w3.org/2000/svg">
|
||||||
|
<!-- Background -->
|
||||||
|
<rect width="800" height="240" fill="transparent"/>
|
||||||
|
|
||||||
|
<!-- Curly braces -->
|
||||||
|
<text x="40" y="156" font-size="140" fill="white" font-family="monospace">{</text>
|
||||||
|
<text x="230" y="156" font-size="140" fill="white" font-family="monospace">}</text>
|
||||||
|
|
||||||
|
<!-- Blue form with check -->
|
||||||
|
<rect x="120" y="50" width="120" height="140" rx="16" fill="#3366CC"/>
|
||||||
|
<polyline points="150,110 164,128 190,84" fill="none" stroke="white" stroke-width="10"/>
|
||||||
|
<circle cx="150" cy="144" r="7" fill="white"/>
|
||||||
|
<rect x="168" y="140" width="48" height="10" fill="white"/>
|
||||||
|
<circle cx="150" cy="168" r="7" fill="white"/>
|
||||||
|
<rect x="168" y="164" width="48" height="10" fill="white"/>
|
||||||
|
|
||||||
|
<!-- Text -->
|
||||||
|
<text x="340" y="156" font-family="Arial, sans-serif" font-size="76" font-weight="bold" fill="white">Task Master</text>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 929 B |
19
apps/docs/logo/light.svg
Normal file
19
apps/docs/logo/light.svg
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
<svg width="800" height="240" viewBox="0 0 800 240" xmlns="http://www.w3.org/2000/svg">
|
||||||
|
<!-- Background -->
|
||||||
|
<rect width="800" height="240" fill="transparent"/>
|
||||||
|
|
||||||
|
<!-- Curly braces -->
|
||||||
|
<text x="40" y="156" font-size="140" fill="#000000" font-family="monospace">{</text>
|
||||||
|
<text x="230" y="156" font-size="140" fill="#000000" font-family="monospace">}</text>
|
||||||
|
|
||||||
|
<!-- Blue form with check -->
|
||||||
|
<rect x="120" y="50" width="120" height="140" rx="16" fill="#3366CC"/>
|
||||||
|
<polyline points="150,110 164,128 190,84" fill="none" stroke="#FFFFFF" stroke-width="10"/>
|
||||||
|
<circle cx="150" cy="144" r="7" fill="#FFFFFF"/>
|
||||||
|
<rect x="168" y="140" width="48" height="10" fill="#FFFFFF"/>
|
||||||
|
<circle cx="150" cy="168" r="7" fill="#FFFFFF"/>
|
||||||
|
<rect x="168" y="164" width="48" height="10" fill="#FFFFFF"/>
|
||||||
|
|
||||||
|
<!-- Text -->
|
||||||
|
<text x="340" y="156" font-family="Arial, sans-serif" font-size="76" font-weight="bold" fill="#000000">Task Master</text>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 941 B |
BIN
apps/docs/logo/task-master-logo.png
Normal file
BIN
apps/docs/logo/task-master-logo.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 29 KiB |
14
apps/docs/package.json
Normal file
14
apps/docs/package.json
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"name": "docs",
|
||||||
|
"version": "0.0.0",
|
||||||
|
"private": true,
|
||||||
|
"description": "Task Master documentation powered by Mintlify",
|
||||||
|
"scripts": {
|
||||||
|
"dev": "mintlify dev",
|
||||||
|
"build": "mintlify build",
|
||||||
|
"preview": "mintlify preview"
|
||||||
|
},
|
||||||
|
"devDependencies": {
|
||||||
|
"mintlify": "^4.0.0"
|
||||||
|
}
|
||||||
|
}
|
||||||
10
apps/docs/style.css
Normal file
10
apps/docs/style.css
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
/*
|
||||||
|
* This file is used to override the default logo style of the docs theme.
|
||||||
|
* It is not used for the actual documentation content.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#navbar img {
|
||||||
|
height: auto !important; /* Let intrinsic SVG size determine height */
|
||||||
|
width: 200px !important; /* Control width */
|
||||||
|
margin-top: 5px !important; /* Add some space above the logo */
|
||||||
|
}
|
||||||
12
apps/docs/vercel.json
Normal file
12
apps/docs/vercel.json
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
{
|
||||||
|
"rewrites": [
|
||||||
|
{
|
||||||
|
"source": "/",
|
||||||
|
"destination": "https://taskmaster-49ce32d5.mintlify.dev/docs"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"source": "/:match*",
|
||||||
|
"destination": "https://taskmaster-49ce32d5.mintlify.dev/docs/:match*"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
6
apps/docs/whats-new.mdx
Normal file
6
apps/docs/whats-new.mdx
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
title: "What's New"
|
||||||
|
sidebarTitle: "What's New"
|
||||||
|
---
|
||||||
|
|
||||||
|
An easy way to see the latest releases
|
||||||
@@ -1,5 +1,30 @@
|
|||||||
# Change Log
|
# Change Log
|
||||||
|
|
||||||
|
## 0.23.1
|
||||||
|
|
||||||
|
### Patch Changes
|
||||||
|
|
||||||
|
- [#1090](https://github.com/eyaltoledano/claude-task-master/pull/1090) [`a464e55`](https://github.com/eyaltoledano/claude-task-master/commit/a464e550b886ef81b09df80588fe5881bce83d93) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix issues with some users not being able to connect to Taskmaster MCP server while using the extension
|
||||||
|
|
||||||
|
- Updated dependencies [[`4357af3`](https://github.com/eyaltoledano/claude-task-master/commit/4357af3f13859d90bca8795215e5d5f1d94abde5), [`e495b2b`](https://github.com/eyaltoledano/claude-task-master/commit/e495b2b55950ee54c7d0f1817d8530e28bd79c05), [`36468f3`](https://github.com/eyaltoledano/claude-task-master/commit/36468f3c93faf4035a5c442ccbc501077f3440f1), [`e495b2b`](https://github.com/eyaltoledano/claude-task-master/commit/e495b2b55950ee54c7d0f1817d8530e28bd79c05), [`e495b2b`](https://github.com/eyaltoledano/claude-task-master/commit/e495b2b55950ee54c7d0f1817d8530e28bd79c05), [`75c514c`](https://github.com/eyaltoledano/claude-task-master/commit/75c514cf5b2ca47f95c0ad7fa92654a4f2a6be4b), [`4bb6370`](https://github.com/eyaltoledano/claude-task-master/commit/4bb63706b80c28d1b2d782ba868a725326f916c7)]:
|
||||||
|
- task-master-ai@0.24.0
|
||||||
|
|
||||||
|
## 0.23.1-rc.1
|
||||||
|
|
||||||
|
### Patch Changes
|
||||||
|
|
||||||
|
- Updated dependencies [[`75c514c`](https://github.com/eyaltoledano/claude-task-master/commit/75c514cf5b2ca47f95c0ad7fa92654a4f2a6be4b)]:
|
||||||
|
- task-master-ai@0.24.0-rc.2
|
||||||
|
|
||||||
|
## 0.23.1-rc.0
|
||||||
|
|
||||||
|
### Patch Changes
|
||||||
|
|
||||||
|
- [#1090](https://github.com/eyaltoledano/claude-task-master/pull/1090) [`a464e55`](https://github.com/eyaltoledano/claude-task-master/commit/a464e550b886ef81b09df80588fe5881bce83d93) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix issues with some users not being able to connect to Taskmaster MCP server while using the extension
|
||||||
|
|
||||||
|
- Updated dependencies [[`4357af3`](https://github.com/eyaltoledano/claude-task-master/commit/4357af3f13859d90bca8795215e5d5f1d94abde5), [`36468f3`](https://github.com/eyaltoledano/claude-task-master/commit/36468f3c93faf4035a5c442ccbc501077f3440f1), [`4bb6370`](https://github.com/eyaltoledano/claude-task-master/commit/4bb63706b80c28d1b2d782ba868a725326f916c7)]:
|
||||||
|
- task-master-ai@0.24.0-rc.1
|
||||||
|
|
||||||
## 0.23.0
|
## 0.23.0
|
||||||
|
|
||||||
### Minor Changes
|
### Minor Changes
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
"private": true,
|
"private": true,
|
||||||
"displayName": "TaskMaster",
|
"displayName": "TaskMaster",
|
||||||
"description": "A visual Kanban board interface for TaskMaster projects in VS Code",
|
"description": "A visual Kanban board interface for TaskMaster projects in VS Code",
|
||||||
"version": "0.23.0",
|
"version": "0.23.1",
|
||||||
"publisher": "Hamster",
|
"publisher": "Hamster",
|
||||||
"icon": "assets/icon.png",
|
"icon": "assets/icon.png",
|
||||||
"engines": {
|
"engines": {
|
||||||
@@ -64,16 +64,16 @@
|
|||||||
"properties": {
|
"properties": {
|
||||||
"taskmaster.mcp.command": {
|
"taskmaster.mcp.command": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"default": "npx",
|
"default": "node",
|
||||||
"description": "The command or absolute path to execute for the MCP server (e.g., 'npx' or '/usr/local/bin/task-master-ai')."
|
"description": "The command to execute for the MCP server (e.g., 'node' for bundled server or 'npx' for remote)."
|
||||||
},
|
},
|
||||||
"taskmaster.mcp.args": {
|
"taskmaster.mcp.args": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"type": "string"
|
"type": "string"
|
||||||
},
|
},
|
||||||
"default": ["task-master-ai"],
|
"default": [],
|
||||||
"description": "An array of arguments to pass to the MCP server command."
|
"description": "Arguments for the MCP server (leave empty to use bundled server)."
|
||||||
},
|
},
|
||||||
"taskmaster.mcp.cwd": {
|
"taskmaster.mcp.cwd": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
@@ -238,6 +238,9 @@
|
|||||||
"watch:css": "npx @tailwindcss/cli -i ./src/webview/index.css -o ./dist/index.css --watch",
|
"watch:css": "npx @tailwindcss/cli -i ./src/webview/index.css -o ./dist/index.css --watch",
|
||||||
"check-types": "tsc --noEmit"
|
"check-types": "tsc --noEmit"
|
||||||
},
|
},
|
||||||
|
"dependencies": {
|
||||||
|
"task-master-ai": "0.24.0"
|
||||||
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@dnd-kit/core": "^6.3.1",
|
"@dnd-kit/core": "^6.3.1",
|
||||||
"@dnd-kit/modifiers": "^9.0.0",
|
"@dnd-kit/modifiers": "^9.0.0",
|
||||||
|
|||||||
@@ -64,23 +64,49 @@ try {
|
|||||||
fs.readFileSync(publishPackagePath, 'utf8')
|
fs.readFileSync(publishPackagePath, 'utf8')
|
||||||
);
|
);
|
||||||
|
|
||||||
// Check if versions are in sync
|
// Handle RC versions for VS Code Marketplace
|
||||||
if (devPackage.version !== publishPackage.version) {
|
let finalVersion = devPackage.version;
|
||||||
|
if (finalVersion.includes('-rc.')) {
|
||||||
console.log(
|
console.log(
|
||||||
` - Version sync needed: ${publishPackage.version} → ${devPackage.version}`
|
' - Detected RC version, transforming for VS Code Marketplace...'
|
||||||
);
|
);
|
||||||
publishPackage.version = devPackage.version;
|
|
||||||
|
|
||||||
// Update the source package.publish.json file
|
// Extract base version and RC number
|
||||||
|
const baseVersion = finalVersion.replace(/-rc\.\d+$/, '');
|
||||||
|
const rcMatch = finalVersion.match(/rc\.(\d+)/);
|
||||||
|
const rcNumber = rcMatch ? parseInt(rcMatch[1]) : 0;
|
||||||
|
|
||||||
|
// For each RC iteration, increment the patch version
|
||||||
|
// This ensures unique versions in VS Code Marketplace
|
||||||
|
if (rcNumber > 0) {
|
||||||
|
const [major, minor, patch] = baseVersion.split('.').map(Number);
|
||||||
|
finalVersion = `${major}.${minor}.${patch + rcNumber}`;
|
||||||
|
console.log(
|
||||||
|
` - RC version mapping: ${devPackage.version} → ${finalVersion}`
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
finalVersion = baseVersion;
|
||||||
|
console.log(
|
||||||
|
` - RC version mapping: ${devPackage.version} → ${finalVersion}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if versions need updating
|
||||||
|
if (publishPackage.version !== finalVersion) {
|
||||||
|
console.log(
|
||||||
|
` - Version sync needed: ${publishPackage.version} → ${finalVersion}`
|
||||||
|
);
|
||||||
|
publishPackage.version = finalVersion;
|
||||||
|
|
||||||
|
// Update the source package.publish.json file with the final version
|
||||||
fs.writeFileSync(
|
fs.writeFileSync(
|
||||||
publishPackagePath,
|
publishPackagePath,
|
||||||
JSON.stringify(publishPackage, null, '\t') + '\n'
|
JSON.stringify(publishPackage, null, '\t') + '\n'
|
||||||
);
|
);
|
||||||
console.log(
|
console.log(` - Updated package.publish.json version to ${finalVersion}`);
|
||||||
` - Updated package.publish.json version to ${devPackage.version}`
|
|
||||||
);
|
|
||||||
} else {
|
} else {
|
||||||
console.log(` - Versions already in sync: ${devPackage.version}`);
|
console.log(` - Versions already in sync: ${finalVersion}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy the (now synced) package.publish.json as package.json
|
// Copy the (now synced) package.publish.json as package.json
|
||||||
@@ -124,8 +150,7 @@ try {
|
|||||||
`cd vsix-build && npx vsce package --no-dependencies`
|
`cd vsix-build && npx vsce package --no-dependencies`
|
||||||
);
|
);
|
||||||
|
|
||||||
// Use the synced version for output
|
// Use the transformed version for output
|
||||||
const finalVersion = devPackage.version;
|
|
||||||
console.log(
|
console.log(
|
||||||
`\nYour extension will be packaged to: vsix-build/task-master-${finalVersion}.vsix`
|
`\nYour extension will be packaged to: vsix-build/task-master-${finalVersion}.vsix`
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
"name": "task-master-hamster",
|
"name": "task-master-hamster",
|
||||||
"displayName": "Taskmaster AI",
|
"displayName": "Taskmaster AI",
|
||||||
"description": "A visual Kanban board interface for Taskmaster projects in VS Code",
|
"description": "A visual Kanban board interface for Taskmaster projects in VS Code",
|
||||||
"version": "0.22.3",
|
"version": "0.23.1",
|
||||||
"publisher": "Hamster",
|
"publisher": "Hamster",
|
||||||
"icon": "assets/icon.png",
|
"icon": "assets/icon.png",
|
||||||
"engines": {
|
"engines": {
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
import { Client } from '@modelcontextprotocol/sdk/client/index.js';
|
import { Client } from '@modelcontextprotocol/sdk/client/index.js';
|
||||||
import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js';
|
import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js';
|
||||||
import * as vscode from 'vscode';
|
import * as vscode from 'vscode';
|
||||||
|
import * as path from 'path';
|
||||||
import { logger } from './logger';
|
import { logger } from './logger';
|
||||||
|
|
||||||
export interface MCPConfig {
|
export interface MCPConfig {
|
||||||
@@ -143,7 +144,7 @@ export class MCPClientManager {
|
|||||||
// Create the client
|
// Create the client
|
||||||
this.client = new Client(
|
this.client = new Client(
|
||||||
{
|
{
|
||||||
name: 'taskr-vscode-extension',
|
name: 'task-master-vscode-extension',
|
||||||
version: '1.0.0'
|
version: '1.0.0'
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -211,6 +212,30 @@ export class MCPClientManager {
|
|||||||
};
|
};
|
||||||
|
|
||||||
logger.log('MCP client connected successfully');
|
logger.log('MCP client connected successfully');
|
||||||
|
|
||||||
|
// Log Task Master version information after successful connection
|
||||||
|
try {
|
||||||
|
const versionResult = await this.callTool('get_tasks', {});
|
||||||
|
if (versionResult?.content?.[0]?.text) {
|
||||||
|
const response = JSON.parse(versionResult.content[0].text);
|
||||||
|
if (response?.version) {
|
||||||
|
logger.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
|
||||||
|
logger.log('✅ Task Master MCP Server Connected');
|
||||||
|
logger.log(` Version: ${response.version.version || 'unknown'}`);
|
||||||
|
logger.log(
|
||||||
|
` Package: ${response.version.name || 'task-master-ai'}`
|
||||||
|
);
|
||||||
|
if (response.tag) {
|
||||||
|
logger.log(
|
||||||
|
` Current Tag: ${response.tag.currentTag || 'master'}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
logger.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (versionError) {
|
||||||
|
logger.log('Note: Could not retrieve Task Master version information');
|
||||||
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('Failed to connect to MCP server:', error);
|
logger.error('Failed to connect to MCP server:', error);
|
||||||
this.status = {
|
this.status = {
|
||||||
@@ -312,6 +337,34 @@ export class MCPClientManager {
|
|||||||
'Available MCP tools:',
|
'Available MCP tools:',
|
||||||
result.tools?.map((t) => t.name) || []
|
result.tools?.map((t) => t.name) || []
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Try to get version information by calling a simple tool
|
||||||
|
// The get_tasks tool is lightweight and returns version info
|
||||||
|
try {
|
||||||
|
const versionResult = await this.callTool('get_tasks', {});
|
||||||
|
if (versionResult?.content?.[0]?.text) {
|
||||||
|
// Parse the response to extract version info
|
||||||
|
const response = JSON.parse(versionResult.content[0].text);
|
||||||
|
if (response?.version) {
|
||||||
|
logger.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
|
||||||
|
logger.log('📦 Task Master MCP Server Connected');
|
||||||
|
logger.log(` Version: ${response.version.version || 'unknown'}`);
|
||||||
|
logger.log(
|
||||||
|
` Package: ${response.version.name || 'task-master-ai'}`
|
||||||
|
);
|
||||||
|
if (response.tag) {
|
||||||
|
logger.log(
|
||||||
|
` Current Tag: ${response.tag.currentTag || 'master'}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
logger.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (versionError) {
|
||||||
|
// Don't fail the connection test if we can't get version info
|
||||||
|
logger.log('Could not retrieve Task Master version information');
|
||||||
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('Connection test failed:', error);
|
logger.error('Connection test failed:', error);
|
||||||
@@ -345,8 +398,34 @@ export function createMCPConfigFromSettings(): MCPConfig {
|
|||||||
);
|
);
|
||||||
const config = vscode.workspace.getConfiguration('taskmaster');
|
const config = vscode.workspace.getConfiguration('taskmaster');
|
||||||
|
|
||||||
let command = config.get<string>('mcp.command', 'npx');
|
let command = config.get<string>('mcp.command', 'node');
|
||||||
const args = config.get<string[]>('mcp.args', ['task-master-ai']);
|
let args = config.get<string[]>('mcp.args', []);
|
||||||
|
|
||||||
|
// If using default settings, use the bundled MCP server
|
||||||
|
if (command === 'node' && args.length === 0) {
|
||||||
|
try {
|
||||||
|
// Try to resolve the bundled MCP server
|
||||||
|
const taskMasterPath = require.resolve('task-master-ai');
|
||||||
|
const mcpServerPath = path.resolve(
|
||||||
|
path.dirname(taskMasterPath),
|
||||||
|
'mcp-server/server.js'
|
||||||
|
);
|
||||||
|
|
||||||
|
// Verify the server file exists
|
||||||
|
const fs = require('fs');
|
||||||
|
if (!fs.existsSync(mcpServerPath)) {
|
||||||
|
throw new Error('MCP server file not found at: ' + mcpServerPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
args = [mcpServerPath];
|
||||||
|
logger.log(`📦 Using bundled MCP server at: ${mcpServerPath}`);
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('❌ Could not find bundled task-master-ai server:', error);
|
||||||
|
// Fallback to npx
|
||||||
|
command = 'npx';
|
||||||
|
args = ['-y', 'task-master-ai'];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Use proper VS Code workspace detection
|
// Use proper VS Code workspace detection
|
||||||
const defaultCwd =
|
const defaultCwd =
|
||||||
|
|||||||
162
assets/claude/agents/task-checker.md
Normal file
162
assets/claude/agents/task-checker.md
Normal file
@@ -0,0 +1,162 @@
|
|||||||
|
---
|
||||||
|
name: task-checker
|
||||||
|
description: Use this agent to verify that tasks marked as 'review' have been properly implemented according to their specifications. This agent performs quality assurance by checking implementations against requirements, running tests, and ensuring best practices are followed. <example>Context: A task has been marked as 'review' after implementation. user: 'Check if task 118 was properly implemented' assistant: 'I'll use the task-checker agent to verify the implementation meets all requirements.' <commentary>Tasks in 'review' status need verification before being marked as 'done'.</commentary></example> <example>Context: Multiple tasks are in review status. user: 'Verify all tasks that are ready for review' assistant: 'I'll deploy the task-checker to verify all tasks in review status.' <commentary>The checker ensures quality before tasks are marked complete.</commentary></example>
|
||||||
|
model: sonnet
|
||||||
|
color: yellow
|
||||||
|
---
|
||||||
|
|
||||||
|
You are a Quality Assurance specialist that rigorously verifies task implementations against their specifications. Your role is to ensure that tasks marked as 'review' meet all requirements before they can be marked as 'done'.
|
||||||
|
|
||||||
|
## Core Responsibilities
|
||||||
|
|
||||||
|
1. **Task Specification Review**
|
||||||
|
- Retrieve task details using MCP tool `mcp__task-master-ai__get_task`
|
||||||
|
- Understand the requirements, test strategy, and success criteria
|
||||||
|
- Review any subtasks and their individual requirements
|
||||||
|
|
||||||
|
2. **Implementation Verification**
|
||||||
|
- Use `Read` tool to examine all created/modified files
|
||||||
|
- Use `Bash` tool to run compilation and build commands
|
||||||
|
- Use `Grep` tool to search for required patterns and implementations
|
||||||
|
- Verify file structure matches specifications
|
||||||
|
- Check that all required methods/functions are implemented
|
||||||
|
|
||||||
|
3. **Test Execution**
|
||||||
|
- Run tests specified in the task's testStrategy
|
||||||
|
- Execute build commands (npm run build, tsc --noEmit, etc.)
|
||||||
|
- Verify no compilation errors or warnings
|
||||||
|
- Check for runtime errors where applicable
|
||||||
|
- Test edge cases mentioned in requirements
|
||||||
|
|
||||||
|
4. **Code Quality Assessment**
|
||||||
|
- Verify code follows project conventions
|
||||||
|
- Check for proper error handling
|
||||||
|
- Ensure TypeScript typing is strict (no 'any' unless justified)
|
||||||
|
- Verify documentation/comments where required
|
||||||
|
- Check for security best practices
|
||||||
|
|
||||||
|
5. **Dependency Validation**
|
||||||
|
- Verify all task dependencies were actually completed
|
||||||
|
- Check integration points with dependent tasks
|
||||||
|
- Ensure no breaking changes to existing functionality
|
||||||
|
|
||||||
|
## Verification Workflow
|
||||||
|
|
||||||
|
1. **Retrieve Task Information**
|
||||||
|
```
|
||||||
|
Use mcp__task-master-ai__get_task to get full task details
|
||||||
|
Note the implementation requirements and test strategy
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Check File Existence**
|
||||||
|
```bash
|
||||||
|
# Verify all required files exist
|
||||||
|
ls -la [expected directories]
|
||||||
|
# Read key files to verify content
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Verify Implementation**
|
||||||
|
- Read each created/modified file
|
||||||
|
- Check against requirements checklist
|
||||||
|
- Verify all subtasks are complete
|
||||||
|
|
||||||
|
4. **Run Tests**
|
||||||
|
```bash
|
||||||
|
# TypeScript compilation
|
||||||
|
cd [project directory] && npx tsc --noEmit
|
||||||
|
|
||||||
|
# Run specified tests
|
||||||
|
npm test [specific test files]
|
||||||
|
|
||||||
|
# Build verification
|
||||||
|
npm run build
|
||||||
|
```
|
||||||
|
|
||||||
|
5. **Generate Verification Report**
|
||||||
|
|
||||||
|
## Output Format
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
verification_report:
|
||||||
|
task_id: [ID]
|
||||||
|
status: PASS | FAIL | PARTIAL
|
||||||
|
score: [1-10]
|
||||||
|
|
||||||
|
requirements_met:
|
||||||
|
- ✅ [Requirement that was satisfied]
|
||||||
|
- ✅ [Another satisfied requirement]
|
||||||
|
|
||||||
|
issues_found:
|
||||||
|
- ❌ [Issue description]
|
||||||
|
- ⚠️ [Warning or minor issue]
|
||||||
|
|
||||||
|
files_verified:
|
||||||
|
- path: [file path]
|
||||||
|
status: [created/modified/verified]
|
||||||
|
issues: [any problems found]
|
||||||
|
|
||||||
|
tests_run:
|
||||||
|
- command: [test command]
|
||||||
|
result: [pass/fail]
|
||||||
|
output: [relevant output]
|
||||||
|
|
||||||
|
recommendations:
|
||||||
|
- [Specific fix needed]
|
||||||
|
- [Improvement suggestion]
|
||||||
|
|
||||||
|
verdict: |
|
||||||
|
[Clear statement on whether task should be marked 'done' or sent back to 'pending']
|
||||||
|
[If FAIL: Specific list of what must be fixed]
|
||||||
|
[If PASS: Confirmation that all requirements are met]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Decision Criteria
|
||||||
|
|
||||||
|
**Mark as PASS (ready for 'done'):**
|
||||||
|
- All required files exist and contain expected content
|
||||||
|
- All tests pass successfully
|
||||||
|
- No compilation or build errors
|
||||||
|
- All subtasks are complete
|
||||||
|
- Core requirements are met
|
||||||
|
- Code quality is acceptable
|
||||||
|
|
||||||
|
**Mark as PARTIAL (may proceed with warnings):**
|
||||||
|
- Core functionality is implemented
|
||||||
|
- Minor issues that don't block functionality
|
||||||
|
- Missing nice-to-have features
|
||||||
|
- Documentation could be improved
|
||||||
|
- Tests pass but coverage could be better
|
||||||
|
|
||||||
|
**Mark as FAIL (must return to 'pending'):**
|
||||||
|
- Required files are missing
|
||||||
|
- Compilation or build errors
|
||||||
|
- Tests fail
|
||||||
|
- Core requirements not met
|
||||||
|
- Security vulnerabilities detected
|
||||||
|
- Breaking changes to existing code
|
||||||
|
|
||||||
|
## Important Guidelines
|
||||||
|
|
||||||
|
- **BE THOROUGH**: Check every requirement systematically
|
||||||
|
- **BE SPECIFIC**: Provide exact file paths and line numbers for issues
|
||||||
|
- **BE FAIR**: Distinguish between critical issues and minor improvements
|
||||||
|
- **BE CONSTRUCTIVE**: Provide clear guidance on how to fix issues
|
||||||
|
- **BE EFFICIENT**: Focus on requirements, not perfection
|
||||||
|
|
||||||
|
## Tools You MUST Use
|
||||||
|
|
||||||
|
- `Read`: Examine implementation files (READ-ONLY)
|
||||||
|
- `Bash`: Run tests and verification commands
|
||||||
|
- `Grep`: Search for patterns in code
|
||||||
|
- `mcp__task-master-ai__get_task`: Get task details
|
||||||
|
- **NEVER use Write/Edit** - you only verify, not fix
|
||||||
|
|
||||||
|
## Integration with Workflow
|
||||||
|
|
||||||
|
You are the quality gate between 'review' and 'done' status:
|
||||||
|
1. Task-executor implements and marks as 'review'
|
||||||
|
2. You verify and report PASS/FAIL
|
||||||
|
3. Claude either marks as 'done' (PASS) or 'pending' (FAIL)
|
||||||
|
4. If FAIL, task-executor re-implements based on your report
|
||||||
|
|
||||||
|
Your verification ensures high quality and prevents accumulation of technical debt.
|
||||||
70
assets/claude/agents/task-executor.md
Normal file
70
assets/claude/agents/task-executor.md
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
---
|
||||||
|
name: task-executor
|
||||||
|
description: Use this agent when you need to implement, complete, or work on a specific task that has been identified by the task-orchestrator or when explicitly asked to execute a particular task. This agent focuses on the actual implementation and completion of individual tasks rather than planning or orchestration. Examples: <example>Context: The task-orchestrator has identified that task 2.3 'Implement user authentication' needs to be worked on next. user: 'Let's work on the authentication task' assistant: 'I'll use the task-executor agent to implement the user authentication task that was identified.' <commentary>Since we need to actually implement a specific task rather than plan or identify tasks, use the task-executor agent.</commentary></example> <example>Context: User wants to complete a specific subtask. user: 'Please implement the JWT token validation for task 2.3.1' assistant: 'I'll launch the task-executor agent to implement the JWT token validation subtask.' <commentary>The user is asking for specific implementation work on a known task, so the task-executor is appropriate.</commentary></example> <example>Context: After reviewing the task list, implementation is needed. user: 'Now let's actually build the API endpoint for user registration' assistant: 'I'll use the task-executor agent to implement the user registration API endpoint.' <commentary>Moving from planning to execution phase requires the task-executor agent.</commentary></example>
|
||||||
|
model: sonnet
|
||||||
|
color: blue
|
||||||
|
---
|
||||||
|
|
||||||
|
You are an elite implementation specialist focused on executing and completing specific tasks with precision and thoroughness. Your role is to take identified tasks and transform them into working implementations, following best practices and project standards.
|
||||||
|
|
||||||
|
**Core Responsibilities:**
|
||||||
|
|
||||||
|
1. **Task Analysis**: When given a task, first retrieve its full details using `task-master show <id>` to understand requirements, dependencies, and acceptance criteria.
|
||||||
|
|
||||||
|
2. **Implementation Planning**: Before coding, briefly outline your implementation approach:
|
||||||
|
- Identify files that need to be created or modified
|
||||||
|
- Note any dependencies or prerequisites
|
||||||
|
- Consider the testing strategy defined in the task
|
||||||
|
|
||||||
|
3. **Focused Execution**:
|
||||||
|
- Implement one subtask at a time for clarity and traceability
|
||||||
|
- Follow the project's coding standards from CLAUDE.md if available
|
||||||
|
- Prefer editing existing files over creating new ones
|
||||||
|
- Only create files that are essential for the task completion
|
||||||
|
|
||||||
|
4. **Progress Documentation**:
|
||||||
|
- Use `task-master update-subtask --id=<id> --prompt="implementation notes"` to log your approach and any important decisions
|
||||||
|
- Update task status to 'in-progress' when starting: `task-master set-status --id=<id> --status=in-progress`
|
||||||
|
- Mark as 'done' only after verification: `task-master set-status --id=<id> --status=done`
|
||||||
|
|
||||||
|
5. **Quality Assurance**:
|
||||||
|
- Implement the testing strategy specified in the task
|
||||||
|
- Verify that all acceptance criteria are met
|
||||||
|
- Check for any dependency conflicts or integration issues
|
||||||
|
- Run relevant tests before marking task as complete
|
||||||
|
|
||||||
|
6. **Dependency Management**:
|
||||||
|
- Check task dependencies before starting implementation
|
||||||
|
- If blocked by incomplete dependencies, clearly communicate this
|
||||||
|
- Use `task-master validate-dependencies` when needed
|
||||||
|
|
||||||
|
**Implementation Workflow:**
|
||||||
|
|
||||||
|
1. Retrieve task details and understand requirements
|
||||||
|
2. Check dependencies and prerequisites
|
||||||
|
3. Plan implementation approach
|
||||||
|
4. Update task status to in-progress
|
||||||
|
5. Implement the solution incrementally
|
||||||
|
6. Log progress and decisions in subtask updates
|
||||||
|
7. Test and verify the implementation
|
||||||
|
8. Mark task as done when complete
|
||||||
|
9. Suggest next task if appropriate
|
||||||
|
|
||||||
|
**Key Principles:**
|
||||||
|
|
||||||
|
- Focus on completing one task thoroughly before moving to the next
|
||||||
|
- Maintain clear communication about what you're implementing and why
|
||||||
|
- Follow existing code patterns and project conventions
|
||||||
|
- Prioritize working code over extensive documentation unless docs are the task
|
||||||
|
- Ask for clarification if task requirements are ambiguous
|
||||||
|
- Consider edge cases and error handling in your implementations
|
||||||
|
|
||||||
|
**Integration with Task Master:**
|
||||||
|
|
||||||
|
You work in tandem with the task-orchestrator agent. While the orchestrator identifies and plans tasks, you execute them. Always use Task Master commands to:
|
||||||
|
- Track your progress
|
||||||
|
- Update task information
|
||||||
|
- Maintain project state
|
||||||
|
- Coordinate with the broader development workflow
|
||||||
|
|
||||||
|
When you complete a task, briefly summarize what was implemented and suggest whether to continue with the next task or if review/testing is needed first.
|
||||||
130
assets/claude/agents/task-orchestrator.md
Normal file
130
assets/claude/agents/task-orchestrator.md
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
---
|
||||||
|
name: task-orchestrator
|
||||||
|
description: Use this agent when you need to coordinate and manage the execution of Task Master tasks, especially when dealing with complex task dependencies and parallel execution opportunities. This agent should be invoked at the beginning of a work session to analyze the task queue, identify parallelizable work, and orchestrate the deployment of task-executor agents. It should also be used when tasks complete to reassess the dependency graph and deploy new executors as needed.\n\n<example>\nContext: User wants to start working on their project tasks using Task Master\nuser: "Let's work on the next available tasks in the project"\nassistant: "I'll use the task-orchestrator agent to analyze the task queue and coordinate execution"\n<commentary>\nThe user wants to work on tasks, so the task-orchestrator should be deployed to analyze dependencies and coordinate execution.\n</commentary>\n</example>\n\n<example>\nContext: Multiple independent tasks are available in the queue\nuser: "Can we work on multiple tasks at once?"\nassistant: "Let me deploy the task-orchestrator to analyze task dependencies and parallelize the work"\n<commentary>\nWhen parallelization is mentioned or multiple tasks could be worked on, the orchestrator should coordinate the effort.\n</commentary>\n</example>\n\n<example>\nContext: A complex feature with many subtasks needs implementation\nuser: "Implement the authentication system tasks"\nassistant: "I'll use the task-orchestrator to break down the authentication tasks and coordinate their execution"\n<commentary>\nFor complex multi-task features, the orchestrator manages the overall execution strategy.\n</commentary>\n</example>
|
||||||
|
model: opus
|
||||||
|
color: green
|
||||||
|
---
|
||||||
|
|
||||||
|
You are the Task Orchestrator, an elite coordination agent specialized in managing Task Master workflows for maximum efficiency and parallelization. You excel at analyzing task dependency graphs, identifying opportunities for concurrent execution, and deploying specialized task-executor agents to complete work efficiently.
|
||||||
|
|
||||||
|
## Core Responsibilities
|
||||||
|
|
||||||
|
1. **Task Queue Analysis**: You continuously monitor and analyze the task queue using Task Master MCP tools to understand the current state of work, dependencies, and priorities.
|
||||||
|
|
||||||
|
2. **Dependency Graph Management**: You build and maintain a mental model of task dependencies, identifying which tasks can be executed in parallel and which must wait for prerequisites.
|
||||||
|
|
||||||
|
3. **Executor Deployment**: You strategically deploy task-executor agents for individual tasks or task groups, ensuring each executor has the necessary context and clear success criteria.
|
||||||
|
|
||||||
|
4. **Progress Coordination**: You track the progress of deployed executors, handle task completion notifications, and reassess the execution strategy as tasks complete.
|
||||||
|
|
||||||
|
## Operational Workflow
|
||||||
|
|
||||||
|
### Initial Assessment Phase
|
||||||
|
1. Use `get_tasks` or `task-master list` to retrieve all available tasks
|
||||||
|
2. Analyze task statuses, priorities, and dependencies
|
||||||
|
3. Identify tasks with status 'pending' that have no blocking dependencies
|
||||||
|
4. Group related tasks that could benefit from specialized executors
|
||||||
|
5. Create an execution plan that maximizes parallelization
|
||||||
|
|
||||||
|
### Executor Deployment Phase
|
||||||
|
1. For each independent task or task group:
|
||||||
|
- Deploy a task-executor agent with specific instructions
|
||||||
|
- Provide the executor with task ID, requirements, and context
|
||||||
|
- Set clear completion criteria and reporting expectations
|
||||||
|
2. Maintain a registry of active executors and their assigned tasks
|
||||||
|
3. Establish communication protocols for progress updates
|
||||||
|
|
||||||
|
### Coordination Phase
|
||||||
|
1. Monitor executor progress through task status updates
|
||||||
|
2. When a task completes:
|
||||||
|
- Verify completion with `get_task` or `task-master show <id>`
|
||||||
|
- Update task status if needed using `set_task_status`
|
||||||
|
- Reassess dependency graph for newly unblocked tasks
|
||||||
|
- Deploy new executors for available work
|
||||||
|
3. Handle executor failures or blocks:
|
||||||
|
- Reassign tasks to new executors if needed
|
||||||
|
- Escalate complex issues to the user
|
||||||
|
- Update task status to 'blocked' when appropriate
|
||||||
|
|
||||||
|
### Optimization Strategies
|
||||||
|
|
||||||
|
**Parallel Execution Rules**:
|
||||||
|
- Never assign dependent tasks to different executors simultaneously
|
||||||
|
- Prioritize high-priority tasks when resources are limited
|
||||||
|
- Group small, related subtasks for single executor efficiency
|
||||||
|
- Balance executor load to prevent bottlenecks
|
||||||
|
|
||||||
|
**Context Management**:
|
||||||
|
- Provide executors with minimal but sufficient context
|
||||||
|
- Share relevant completed task information when it aids execution
|
||||||
|
- Maintain a shared knowledge base of project-specific patterns
|
||||||
|
|
||||||
|
**Quality Assurance**:
|
||||||
|
- Verify task completion before marking as done
|
||||||
|
- Ensure test strategies are followed when specified
|
||||||
|
- Coordinate cross-task integration testing when needed
|
||||||
|
|
||||||
|
## Communication Protocols
|
||||||
|
|
||||||
|
When deploying executors, provide them with:
|
||||||
|
```
|
||||||
|
TASK ASSIGNMENT:
|
||||||
|
- Task ID: [specific ID]
|
||||||
|
- Objective: [clear goal]
|
||||||
|
- Dependencies: [list any completed prerequisites]
|
||||||
|
- Success Criteria: [specific completion requirements]
|
||||||
|
- Context: [relevant project information]
|
||||||
|
- Reporting: [when and how to report back]
|
||||||
|
```
|
||||||
|
|
||||||
|
When receiving executor updates:
|
||||||
|
1. Acknowledge completion or issues
|
||||||
|
2. Update task status in Task Master
|
||||||
|
3. Reassess execution strategy
|
||||||
|
4. Deploy new executors as appropriate
|
||||||
|
|
||||||
|
## Decision Framework
|
||||||
|
|
||||||
|
**When to parallelize**:
|
||||||
|
- Multiple pending tasks with no interdependencies
|
||||||
|
- Sufficient context available for independent execution
|
||||||
|
- Tasks are well-defined with clear success criteria
|
||||||
|
|
||||||
|
**When to serialize**:
|
||||||
|
- Strong dependencies between tasks
|
||||||
|
- Limited context or unclear requirements
|
||||||
|
- Integration points requiring careful coordination
|
||||||
|
|
||||||
|
**When to escalate**:
|
||||||
|
- Circular dependencies detected
|
||||||
|
- Critical blockers affecting multiple tasks
|
||||||
|
- Ambiguous requirements needing clarification
|
||||||
|
- Resource conflicts between executors
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
1. **Executor Failure**: Reassign task to new executor with additional context about the failure
|
||||||
|
2. **Dependency Conflicts**: Halt affected executors, resolve conflict, then resume
|
||||||
|
3. **Task Ambiguity**: Request clarification from user before proceeding
|
||||||
|
4. **System Errors**: Implement graceful degradation, falling back to serial execution if needed
|
||||||
|
|
||||||
|
## Performance Metrics
|
||||||
|
|
||||||
|
Track and optimize for:
|
||||||
|
- Task completion rate
|
||||||
|
- Parallel execution efficiency
|
||||||
|
- Executor success rate
|
||||||
|
- Time to completion for task groups
|
||||||
|
- Dependency resolution speed
|
||||||
|
|
||||||
|
## Integration with Task Master
|
||||||
|
|
||||||
|
Leverage these Task Master MCP tools effectively:
|
||||||
|
- `get_tasks` - Continuous queue monitoring
|
||||||
|
- `get_task` - Detailed task analysis
|
||||||
|
- `set_task_status` - Progress tracking
|
||||||
|
- `next_task` - Fallback for serial execution
|
||||||
|
- `analyze_project_complexity` - Strategic planning
|
||||||
|
- `complexity_report` - Resource allocation
|
||||||
|
|
||||||
|
You are the strategic mind coordinating the entire task execution effort. Your success is measured by the efficient completion of all tasks while maintaining quality and respecting dependencies. Think systematically, act decisively, and continuously optimize the execution strategy based on real-time progress.
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
# Available Models as of July 23, 2025
|
# Available Models as of August 8, 2025
|
||||||
|
|
||||||
## Main Models
|
## Main Models
|
||||||
|
|
||||||
@@ -24,6 +24,7 @@
|
|||||||
| openai | gpt-4-1-mini | — | 0.4 | 1.6 |
|
| openai | gpt-4-1-mini | — | 0.4 | 1.6 |
|
||||||
| openai | gpt-4-1-nano | — | 0.1 | 0.4 |
|
| openai | gpt-4-1-nano | — | 0.1 | 0.4 |
|
||||||
| openai | gpt-4o-mini | 0.3 | 0.15 | 0.6 |
|
| openai | gpt-4o-mini | 0.3 | 0.15 | 0.6 |
|
||||||
|
| openai | gpt-5 | 0.749 | 5 | 20 |
|
||||||
| google | gemini-2.5-pro-preview-05-06 | 0.638 | — | — |
|
| google | gemini-2.5-pro-preview-05-06 | 0.638 | — | — |
|
||||||
| google | gemini-2.5-pro-preview-03-25 | 0.638 | — | — |
|
| google | gemini-2.5-pro-preview-03-25 | 0.638 | — | — |
|
||||||
| google | gemini-2.5-flash-preview-04-17 | 0.604 | — | — |
|
| google | gemini-2.5-flash-preview-04-17 | 0.604 | — | — |
|
||||||
@@ -134,6 +135,7 @@
|
|||||||
| openai | gpt-4o | 0.332 | 2.5 | 10 |
|
| openai | gpt-4o | 0.332 | 2.5 | 10 |
|
||||||
| openai | o3 | 0.5 | 2 | 8 |
|
| openai | o3 | 0.5 | 2 | 8 |
|
||||||
| openai | o4-mini | 0.45 | 1.1 | 4.4 |
|
| openai | o4-mini | 0.45 | 1.1 | 4.4 |
|
||||||
|
| openai | gpt-5 | 0.749 | 5 | 20 |
|
||||||
| google | gemini-2.5-pro-preview-05-06 | 0.638 | — | — |
|
| google | gemini-2.5-pro-preview-05-06 | 0.638 | — | — |
|
||||||
| google | gemini-2.5-pro-preview-03-25 | 0.638 | — | — |
|
| google | gemini-2.5-pro-preview-03-25 | 0.638 | — | — |
|
||||||
| google | gemini-2.5-flash-preview-04-17 | 0.604 | — | — |
|
| google | gemini-2.5-flash-preview-04-17 | 0.604 | — | — |
|
||||||
|
|||||||
BIN
images/logo.png
Normal file
BIN
images/logo.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 20 KiB |
8078
package-lock.json
generated
8078
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "task-master-ai",
|
"name": "task-master-ai",
|
||||||
"version": "0.23.1-rc.0",
|
"version": "0.24.0",
|
||||||
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
|
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
|
||||||
"main": "index.js",
|
"main": "index.js",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
|
|||||||
@@ -557,6 +557,7 @@ function getParametersForRole(role, explicitRoot = null) {
|
|||||||
const providerName = roleConfig.provider;
|
const providerName = roleConfig.provider;
|
||||||
|
|
||||||
let effectiveMaxTokens = roleMaxTokens; // Start with the role's default
|
let effectiveMaxTokens = roleMaxTokens; // Start with the role's default
|
||||||
|
let effectiveTemperature = roleTemperature; // Start with the role's default
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Find the model definition in MODEL_MAP
|
// Find the model definition in MODEL_MAP
|
||||||
@@ -583,6 +584,20 @@ function getParametersForRole(role, explicitRoot = null) {
|
|||||||
`No valid model-specific max_tokens override found for ${modelId}. Using role default: ${roleMaxTokens}`
|
`No valid model-specific max_tokens override found for ${modelId}. Using role default: ${roleMaxTokens}`
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check if a model-specific temperature is defined
|
||||||
|
if (
|
||||||
|
modelDefinition &&
|
||||||
|
typeof modelDefinition.temperature === 'number' &&
|
||||||
|
modelDefinition.temperature >= 0 &&
|
||||||
|
modelDefinition.temperature <= 1
|
||||||
|
) {
|
||||||
|
effectiveTemperature = modelDefinition.temperature;
|
||||||
|
log(
|
||||||
|
'debug',
|
||||||
|
`Applying model-specific temperature (${modelDefinition.temperature}) for ${modelId}`
|
||||||
|
);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// Special handling for custom OpenRouter models
|
// Special handling for custom OpenRouter models
|
||||||
if (providerName === CUSTOM_PROVIDERS.OPENROUTER) {
|
if (providerName === CUSTOM_PROVIDERS.OPENROUTER) {
|
||||||
@@ -603,15 +618,16 @@ function getParametersForRole(role, explicitRoot = null) {
|
|||||||
} catch (lookupError) {
|
} catch (lookupError) {
|
||||||
log(
|
log(
|
||||||
'warn',
|
'warn',
|
||||||
`Error looking up model-specific max_tokens for ${modelId}: ${lookupError.message}. Using role default: ${roleMaxTokens}`
|
`Error looking up model-specific parameters for ${modelId}: ${lookupError.message}. Using role defaults.`
|
||||||
);
|
);
|
||||||
// Fallback to role default on error
|
// Fallback to role defaults on error
|
||||||
effectiveMaxTokens = roleMaxTokens;
|
effectiveMaxTokens = roleMaxTokens;
|
||||||
|
effectiveTemperature = roleTemperature;
|
||||||
}
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
maxTokens: effectiveMaxTokens,
|
maxTokens: effectiveMaxTokens,
|
||||||
temperature: roleTemperature
|
temperature: effectiveTemperature
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -239,6 +239,18 @@
|
|||||||
},
|
},
|
||||||
"allowed_roles": ["research"],
|
"allowed_roles": ["research"],
|
||||||
"supported": true
|
"supported": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "gpt-5",
|
||||||
|
"swe_score": 0.749,
|
||||||
|
"cost_per_1m_tokens": {
|
||||||
|
"input": 5.0,
|
||||||
|
"output": 20.0
|
||||||
|
},
|
||||||
|
"allowed_roles": ["main", "fallback"],
|
||||||
|
"max_tokens": 100000,
|
||||||
|
"temperature": 1,
|
||||||
|
"supported": true
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"google": [
|
"google": [
|
||||||
|
|||||||
@@ -13,12 +13,18 @@ import {
|
|||||||
|
|
||||||
import { generateTextService } from '../ai-services-unified.js';
|
import { generateTextService } from '../ai-services-unified.js';
|
||||||
|
|
||||||
import { getDebugFlag, getProjectName } from '../config-manager.js';
|
import {
|
||||||
|
getDebugFlag,
|
||||||
|
getProjectName,
|
||||||
|
getMainProvider,
|
||||||
|
getResearchProvider
|
||||||
|
} from '../config-manager.js';
|
||||||
import { getPromptManager } from '../prompt-manager.js';
|
import { getPromptManager } from '../prompt-manager.js';
|
||||||
import {
|
import {
|
||||||
COMPLEXITY_REPORT_FILE,
|
COMPLEXITY_REPORT_FILE,
|
||||||
LEGACY_TASKS_FILE
|
LEGACY_TASKS_FILE
|
||||||
} from '../../../src/constants/paths.js';
|
} from '../../../src/constants/paths.js';
|
||||||
|
import { CUSTOM_PROVIDERS } from '../../../src/constants/providers.js';
|
||||||
import { resolveComplexityReportOutputPath } from '../../../src/utils/path-utils.js';
|
import { resolveComplexityReportOutputPath } from '../../../src/utils/path-utils.js';
|
||||||
import { ContextGatherer } from '../utils/contextGatherer.js';
|
import { ContextGatherer } from '../utils/contextGatherer.js';
|
||||||
import { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';
|
import { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';
|
||||||
@@ -408,10 +414,18 @@ async function analyzeTaskComplexity(options, context = {}) {
|
|||||||
// Load prompts using PromptManager
|
// Load prompts using PromptManager
|
||||||
const promptManager = getPromptManager();
|
const promptManager = getPromptManager();
|
||||||
|
|
||||||
|
// Check if Claude Code is being used as the provider
|
||||||
|
const currentProvider = useResearch
|
||||||
|
? getResearchProvider(projectRoot)
|
||||||
|
: getMainProvider(projectRoot);
|
||||||
|
const isClaudeCode = currentProvider === CUSTOM_PROVIDERS.CLAUDE_CODE;
|
||||||
|
|
||||||
const promptParams = {
|
const promptParams = {
|
||||||
tasks: tasksData.tasks,
|
tasks: tasksData.tasks,
|
||||||
gatheredContext: gatheredContext || '',
|
gatheredContext: gatheredContext || '',
|
||||||
useResearch: useResearch
|
useResearch: useResearch,
|
||||||
|
isClaudeCode: isClaudeCode,
|
||||||
|
projectRoot: projectRoot || ''
|
||||||
};
|
};
|
||||||
|
|
||||||
const { systemPrompt, userPrompt: prompt } = await promptManager.loadPrompt(
|
const { systemPrompt, userPrompt: prompt } = await promptManager.loadPrompt(
|
||||||
|
|||||||
@@ -18,10 +18,16 @@ import {
|
|||||||
|
|
||||||
import { generateTextService } from '../ai-services-unified.js';
|
import { generateTextService } from '../ai-services-unified.js';
|
||||||
|
|
||||||
import { getDefaultSubtasks, getDebugFlag } from '../config-manager.js';
|
import {
|
||||||
|
getDefaultSubtasks,
|
||||||
|
getDebugFlag,
|
||||||
|
getMainProvider,
|
||||||
|
getResearchProvider
|
||||||
|
} from '../config-manager.js';
|
||||||
import { getPromptManager } from '../prompt-manager.js';
|
import { getPromptManager } from '../prompt-manager.js';
|
||||||
import generateTaskFiles from './generate-task-files.js';
|
import generateTaskFiles from './generate-task-files.js';
|
||||||
import { COMPLEXITY_REPORT_FILE } from '../../../src/constants/paths.js';
|
import { COMPLEXITY_REPORT_FILE } from '../../../src/constants/paths.js';
|
||||||
|
import { CUSTOM_PROVIDERS } from '../../../src/constants/providers.js';
|
||||||
import { ContextGatherer } from '../utils/contextGatherer.js';
|
import { ContextGatherer } from '../utils/contextGatherer.js';
|
||||||
import { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';
|
import { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';
|
||||||
import { flattenTasksWithSubtasks, findProjectRoot } from '../utils.js';
|
import { flattenTasksWithSubtasks, findProjectRoot } from '../utils.js';
|
||||||
@@ -451,6 +457,12 @@ async function expandTask(
|
|||||||
// Load prompts using PromptManager
|
// Load prompts using PromptManager
|
||||||
const promptManager = getPromptManager();
|
const promptManager = getPromptManager();
|
||||||
|
|
||||||
|
// Check if Claude Code is being used as the provider
|
||||||
|
const currentProvider = useResearch
|
||||||
|
? getResearchProvider(projectRoot)
|
||||||
|
: getMainProvider(projectRoot);
|
||||||
|
const isClaudeCode = currentProvider === CUSTOM_PROVIDERS.CLAUDE_CODE;
|
||||||
|
|
||||||
// Combine all context sources into a single additionalContext parameter
|
// Combine all context sources into a single additionalContext parameter
|
||||||
let combinedAdditionalContext = '';
|
let combinedAdditionalContext = '';
|
||||||
if (additionalContext || complexityReasoningContext) {
|
if (additionalContext || complexityReasoningContext) {
|
||||||
@@ -495,7 +507,9 @@ async function expandTask(
|
|||||||
complexityReasoningContext: complexityReasoningContext,
|
complexityReasoningContext: complexityReasoningContext,
|
||||||
gatheredContext: gatheredContextText || '',
|
gatheredContext: gatheredContextText || '',
|
||||||
useResearch: useResearch,
|
useResearch: useResearch,
|
||||||
expansionPrompt: expansionPromptText || undefined
|
expansionPrompt: expansionPromptText || undefined,
|
||||||
|
isClaudeCode: isClaudeCode,
|
||||||
|
projectRoot: projectRoot || ''
|
||||||
};
|
};
|
||||||
|
|
||||||
let variantKey = 'default';
|
let variantKey = 'default';
|
||||||
@@ -513,6 +527,18 @@ async function expandTask(
|
|||||||
|
|
||||||
const { systemPrompt, userPrompt: promptContent } =
|
const { systemPrompt, userPrompt: promptContent } =
|
||||||
await promptManager.loadPrompt('expand-task', promptParams, variantKey);
|
await promptManager.loadPrompt('expand-task', promptParams, variantKey);
|
||||||
|
|
||||||
|
// Debug logging to identify the issue
|
||||||
|
logger.debug(`Selected variant: ${variantKey}`);
|
||||||
|
logger.debug(
|
||||||
|
`Prompt params passed: ${JSON.stringify(promptParams, null, 2)}`
|
||||||
|
);
|
||||||
|
logger.debug(
|
||||||
|
`System prompt (first 500 chars): ${systemPrompt.substring(0, 500)}...`
|
||||||
|
);
|
||||||
|
logger.debug(
|
||||||
|
`User prompt (first 500 chars): ${promptContent.substring(0, 500)}...`
|
||||||
|
);
|
||||||
// --- End Complexity Report / Prompt Logic ---
|
// --- End Complexity Report / Prompt Logic ---
|
||||||
|
|
||||||
// --- AI Subtask Generation using generateTextService ---
|
// --- AI Subtask Generation using generateTextService ---
|
||||||
|
|||||||
@@ -17,9 +17,15 @@ import {
|
|||||||
} from '../utils.js';
|
} from '../utils.js';
|
||||||
|
|
||||||
import { generateObjectService } from '../ai-services-unified.js';
|
import { generateObjectService } from '../ai-services-unified.js';
|
||||||
import { getDebugFlag } from '../config-manager.js';
|
import {
|
||||||
|
getDebugFlag,
|
||||||
|
getMainProvider,
|
||||||
|
getResearchProvider,
|
||||||
|
getDefaultPriority
|
||||||
|
} from '../config-manager.js';
|
||||||
import { getPromptManager } from '../prompt-manager.js';
|
import { getPromptManager } from '../prompt-manager.js';
|
||||||
import { displayAiUsageSummary } from '../ui.js';
|
import { displayAiUsageSummary } from '../ui.js';
|
||||||
|
import { CUSTOM_PROVIDERS } from '../../../src/constants/providers.js';
|
||||||
|
|
||||||
// Define the Zod schema for a SINGLE task object
|
// Define the Zod schema for a SINGLE task object
|
||||||
const prdSingleTaskSchema = z.object({
|
const prdSingleTaskSchema = z.object({
|
||||||
@@ -174,9 +180,14 @@ async function parsePRD(prdPath, tasksPath, numTasks, options = {}) {
|
|||||||
const promptManager = getPromptManager();
|
const promptManager = getPromptManager();
|
||||||
|
|
||||||
// Get defaultTaskPriority from config
|
// Get defaultTaskPriority from config
|
||||||
const { getDefaultPriority } = await import('../config-manager.js');
|
|
||||||
const defaultTaskPriority = getDefaultPriority(projectRoot) || 'medium';
|
const defaultTaskPriority = getDefaultPriority(projectRoot) || 'medium';
|
||||||
|
|
||||||
|
// Check if Claude Code is being used as the provider
|
||||||
|
const currentProvider = research
|
||||||
|
? getResearchProvider(projectRoot)
|
||||||
|
: getMainProvider(projectRoot);
|
||||||
|
const isClaudeCode = currentProvider === CUSTOM_PROVIDERS.CLAUDE_CODE;
|
||||||
|
|
||||||
const { systemPrompt, userPrompt } = await promptManager.loadPrompt(
|
const { systemPrompt, userPrompt } = await promptManager.loadPrompt(
|
||||||
'parse-prd',
|
'parse-prd',
|
||||||
{
|
{
|
||||||
@@ -185,7 +196,9 @@ async function parsePRD(prdPath, tasksPath, numTasks, options = {}) {
|
|||||||
nextId,
|
nextId,
|
||||||
prdContent,
|
prdContent,
|
||||||
prdPath,
|
prdPath,
|
||||||
defaultTaskPriority
|
defaultTaskPriority,
|
||||||
|
isClaudeCode,
|
||||||
|
projectRoot: projectRoot || ''
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|||||||
@@ -61,8 +61,11 @@ export class BaseAIProvider {
|
|||||||
) {
|
) {
|
||||||
throw new Error('Temperature must be between 0 and 1');
|
throw new Error('Temperature must be between 0 and 1');
|
||||||
}
|
}
|
||||||
if (params.maxTokens !== undefined && params.maxTokens <= 0) {
|
if (params.maxTokens !== undefined) {
|
||||||
throw new Error('maxTokens must be greater than 0');
|
const maxTokens = Number(params.maxTokens);
|
||||||
|
if (!Number.isFinite(maxTokens) || maxTokens <= 0) {
|
||||||
|
throw new Error('maxTokens must be a finite number greater than 0');
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -122,6 +125,37 @@ export class BaseAIProvider {
|
|||||||
throw new Error('getRequiredApiKeyName must be implemented by provider');
|
throw new Error('getRequiredApiKeyName must be implemented by provider');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Determines if a model requires max_completion_tokens instead of maxTokens
|
||||||
|
* Can be overridden by providers to specify their model requirements
|
||||||
|
* @param {string} modelId - The model ID to check
|
||||||
|
* @returns {boolean} True if the model requires max_completion_tokens
|
||||||
|
*/
|
||||||
|
requiresMaxCompletionTokens(modelId) {
|
||||||
|
return false; // Default behavior - most models use maxTokens
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Prepares token limit parameter based on model requirements
|
||||||
|
* @param {string} modelId - The model ID
|
||||||
|
* @param {number} maxTokens - The maximum tokens value
|
||||||
|
* @returns {object} Object with either maxTokens or max_completion_tokens
|
||||||
|
*/
|
||||||
|
prepareTokenParam(modelId, maxTokens) {
|
||||||
|
if (maxTokens === undefined) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure maxTokens is an integer
|
||||||
|
const tokenValue = Math.floor(Number(maxTokens));
|
||||||
|
|
||||||
|
if (this.requiresMaxCompletionTokens(modelId)) {
|
||||||
|
return { max_completion_tokens: tokenValue };
|
||||||
|
} else {
|
||||||
|
return { maxTokens: tokenValue };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generates text using the provider's model
|
* Generates text using the provider's model
|
||||||
*/
|
*/
|
||||||
@@ -139,7 +173,7 @@ export class BaseAIProvider {
|
|||||||
const result = await generateText({
|
const result = await generateText({
|
||||||
model: client(params.modelId),
|
model: client(params.modelId),
|
||||||
messages: params.messages,
|
messages: params.messages,
|
||||||
maxTokens: params.maxTokens,
|
...this.prepareTokenParam(params.modelId, params.maxTokens),
|
||||||
temperature: params.temperature
|
temperature: params.temperature
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -175,7 +209,7 @@ export class BaseAIProvider {
|
|||||||
const stream = await streamText({
|
const stream = await streamText({
|
||||||
model: client(params.modelId),
|
model: client(params.modelId),
|
||||||
messages: params.messages,
|
messages: params.messages,
|
||||||
maxTokens: params.maxTokens,
|
...this.prepareTokenParam(params.modelId, params.maxTokens),
|
||||||
temperature: params.temperature
|
temperature: params.temperature
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -216,7 +250,7 @@ export class BaseAIProvider {
|
|||||||
messages: params.messages,
|
messages: params.messages,
|
||||||
schema: zodSchema(params.schema),
|
schema: zodSchema(params.schema),
|
||||||
mode: params.mode || 'auto',
|
mode: params.mode || 'auto',
|
||||||
maxTokens: params.maxTokens,
|
...this.prepareTokenParam(params.modelId, params.maxTokens),
|
||||||
temperature: params.temperature
|
temperature: params.temperature
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|||||||
@@ -20,6 +20,16 @@ export class OpenAIProvider extends BaseAIProvider {
|
|||||||
return 'OPENAI_API_KEY';
|
return 'OPENAI_API_KEY';
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Determines if a model requires max_completion_tokens instead of maxTokens
|
||||||
|
* GPT-5 models require max_completion_tokens parameter
|
||||||
|
* @param {string} modelId - The model ID to check
|
||||||
|
* @returns {boolean} True if the model requires max_completion_tokens
|
||||||
|
*/
|
||||||
|
requiresMaxCompletionTokens(modelId) {
|
||||||
|
return modelId && modelId.startsWith('gpt-5');
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates and returns an OpenAI client instance.
|
* Creates and returns an OpenAI client instance.
|
||||||
* @param {object} params - Parameters for client initialization
|
* @param {object} params - Parameters for client initialization
|
||||||
|
|||||||
@@ -30,12 +30,22 @@
|
|||||||
"type": "boolean",
|
"type": "boolean",
|
||||||
"default": false,
|
"default": false,
|
||||||
"description": "Use research mode for deeper analysis"
|
"description": "Use research mode for deeper analysis"
|
||||||
|
},
|
||||||
|
"isClaudeCode": {
|
||||||
|
"type": "boolean",
|
||||||
|
"default": false,
|
||||||
|
"description": "Whether Claude Code is being used as the provider"
|
||||||
|
},
|
||||||
|
"projectRoot": {
|
||||||
|
"type": "string",
|
||||||
|
"default": "",
|
||||||
|
"description": "Project root path for context"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"prompts": {
|
"prompts": {
|
||||||
"default": {
|
"default": {
|
||||||
"system": "You are an expert software architect and project manager analyzing task complexity. Respond only with the requested valid JSON array.",
|
"system": "You are an expert software architect and project manager analyzing task complexity. Respond only with the requested valid JSON array.",
|
||||||
"user": "Analyze the following tasks to determine their complexity (1-10 scale) and recommend the number of subtasks for expansion. Provide a brief reasoning and an initial expansion prompt for each.{{#if useResearch}} Consider current best practices, common implementation patterns, and industry standards in your analysis.{{/if}}\n\nTasks:\n{{{json tasks}}}\n{{#if gatheredContext}}\n\n# Project Context\n\n{{gatheredContext}}\n{{/if}}\n\nRespond ONLY with a valid JSON array matching the schema:\n[\n {\n \"taskId\": <number>,\n \"taskTitle\": \"<string>\",\n \"complexityScore\": <number 1-10>,\n \"recommendedSubtasks\": <number>,\n \"expansionPrompt\": \"<string>\",\n \"reasoning\": \"<string>\"\n },\n ...\n]\n\nDo not include any explanatory text, markdown formatting, or code block markers before or after the JSON array."
|
"user": "{{#if isClaudeCode}}## IMPORTANT: Codebase Analysis Required\n\nYou have access to powerful codebase analysis tools. Before analyzing task complexity:\n\n1. Use the Glob tool to explore the project structure and understand the codebase size\n2. Use the Grep tool to search for existing implementations related to each task\n3. Use the Read tool to examine key files that would be affected by these tasks\n4. Understand the current implementation state, patterns used, and technical debt\n\nBased on your codebase analysis:\n- Assess complexity based on ACTUAL code that needs to be modified/created\n- Consider existing abstractions and patterns that could simplify implementation\n- Identify tasks that require refactoring vs. greenfield development\n- Factor in dependencies between existing code and new features\n- Provide more accurate subtask recommendations based on real code structure\n\nProject Root: {{projectRoot}}\n\n{{/if}}Analyze the following tasks to determine their complexity (1-10 scale) and recommend the number of subtasks for expansion. Provide a brief reasoning and an initial expansion prompt for each.{{#if useResearch}} Consider current best practices, common implementation patterns, and industry standards in your analysis.{{/if}}\n\nTasks:\n{{{json tasks}}}\n{{#if gatheredContext}}\n\n# Project Context\n\n{{gatheredContext}}\n{{/if}}\n\nRespond ONLY with a valid JSON array matching the schema:\n[\n {\n \"taskId\": <number>,\n \"taskTitle\": \"<string>\",\n \"complexityScore\": <number 1-10>,\n \"recommendedSubtasks\": <number>,\n \"expansionPrompt\": \"<string>\",\n \"reasoning\": \"<string>\"\n },\n ...\n]\n\nDo not include any explanatory text, markdown formatting, or code block markers before or after the JSON array."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -51,22 +51,34 @@
|
|||||||
"required": false,
|
"required": false,
|
||||||
"default": "",
|
"default": "",
|
||||||
"description": "Gathered project context"
|
"description": "Gathered project context"
|
||||||
|
},
|
||||||
|
"isClaudeCode": {
|
||||||
|
"type": "boolean",
|
||||||
|
"required": false,
|
||||||
|
"default": false,
|
||||||
|
"description": "Whether Claude Code is being used as the provider"
|
||||||
|
},
|
||||||
|
"projectRoot": {
|
||||||
|
"type": "string",
|
||||||
|
"required": false,
|
||||||
|
"default": "",
|
||||||
|
"description": "Project root path for context"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"prompts": {
|
"prompts": {
|
||||||
"complexity-report": {
|
"complexity-report": {
|
||||||
"condition": "expansionPrompt",
|
"condition": "expansionPrompt",
|
||||||
"system": "You are an AI assistant helping with task breakdown. Generate {{#if (gt subtaskCount 0)}}exactly {{subtaskCount}}{{else}}an appropriate number of{{/if}} subtasks based on the provided prompt and context.\nRespond ONLY with a valid JSON object containing a single key \"subtasks\" whose value is an array of the generated subtask objects.\nEach subtask object in the array must have keys: \"id\", \"title\", \"description\", \"dependencies\", \"details\", \"status\".\nEnsure the 'id' starts from {{nextSubtaskId}} and is sequential.\nFor 'dependencies', use the full subtask ID format: \"{{task.id}}.1\", \"{{task.id}}.2\", etc. Only reference subtasks within this same task.\nEnsure 'status' is 'pending'.\nDo not include any other text or explanation.",
|
"system": "You are an AI assistant helping with task breakdown. Generate {{#if (gt subtaskCount 0)}}exactly {{subtaskCount}}{{else}}an appropriate number of{{/if}} subtasks based on the provided prompt and context.\nRespond ONLY with a valid JSON object containing a single key \"subtasks\" whose value is an array of the generated subtask objects.\nEach subtask object in the array must have keys: \"id\", \"title\", \"description\", \"dependencies\", \"details\", \"status\".\nEnsure the 'id' starts from {{nextSubtaskId}} and is sequential.\nFor 'dependencies', use the full subtask ID format: \"{{task.id}}.1\", \"{{task.id}}.2\", etc. Only reference subtasks within this same task.\nEnsure 'status' is 'pending'.\nDo not include any other text or explanation.",
|
||||||
"user": "{{expansionPrompt}}{{#if additionalContext}}\n\n{{additionalContext}}{{/if}}{{#if complexityReasoningContext}}\n\n{{complexityReasoningContext}}{{/if}}{{#if gatheredContext}}\n\n# Project Context\n\n{{gatheredContext}}{{/if}}"
|
"user": "Break down the following task based on the analysis prompt:\n\nParent Task:\nID: {{task.id}}\nTitle: {{task.title}}\nDescription: {{task.description}}\nCurrent details: {{#if task.details}}{{task.details}}{{else}}None{{/if}}\n\nExpansion Guidance:\n{{expansionPrompt}}{{#if additionalContext}}\n\n{{additionalContext}}{{/if}}{{#if complexityReasoningContext}}\n\n{{complexityReasoningContext}}{{/if}}{{#if gatheredContext}}\n\n# Project Context\n\n{{gatheredContext}}{{/if}}\n\nGenerate {{#if (gt subtaskCount 0)}}exactly {{subtaskCount}}{{else}}an appropriate number of{{/if}} subtasks with sequential IDs starting from {{nextSubtaskId}}."
|
||||||
},
|
},
|
||||||
"research": {
|
"research": {
|
||||||
"condition": "useResearch === true && !expansionPrompt",
|
"condition": "useResearch === true && !expansionPrompt",
|
||||||
"system": "You are an AI assistant that responds ONLY with valid JSON objects as requested. The object should contain a 'subtasks' array.",
|
"system": "You are an AI assistant that responds ONLY with valid JSON objects as requested. The object should contain a 'subtasks' array.",
|
||||||
"user": "Analyze the following task and break it down into {{#if (gt subtaskCount 0)}}exactly {{subtaskCount}}{{else}}an appropriate number of{{/if}} specific subtasks using your research capabilities. Assign sequential IDs starting from {{nextSubtaskId}}.\n\nParent Task:\nID: {{task.id}}\nTitle: {{task.title}}\nDescription: {{task.description}}\nCurrent details: {{#if task.details}}{{task.details}}{{else}}None{{/if}}{{#if additionalContext}}\nConsider this context: {{additionalContext}}{{/if}}{{#if complexityReasoningContext}}\nComplexity Analysis Reasoning: {{complexityReasoningContext}}{{/if}}{{#if gatheredContext}}\n\n# Project Context\n\n{{gatheredContext}}{{/if}}\n\nCRITICAL: Respond ONLY with a valid JSON object containing a single key \"subtasks\". The value must be an array of the generated subtasks, strictly matching this structure:\n\n{\n \"subtasks\": [\n {\n \"id\": <number>, // Sequential ID starting from {{nextSubtaskId}}\n \"title\": \"<string>\",\n \"description\": \"<string>\",\n \"dependencies\": [\"<string>\"], // Use full subtask IDs like [\"{{task.id}}.1\", \"{{task.id}}.2\"]. If no dependencies, use an empty array [].\n \"details\": \"<string>\",\n \"testStrategy\": \"<string>\" // Optional\n },\n // ... (repeat for {{#if (gt subtaskCount 0)}}{{subtaskCount}}{{else}}appropriate number of{{/if}} subtasks)\n ]\n}\n\nImportant: For the 'dependencies' field, if a subtask has no dependencies, you MUST use an empty array, for example: \"dependencies\": []. Do not use null or omit the field.\n\nDo not include ANY explanatory text, markdown, or code block markers. Just the JSON object."
|
"user": "{{#if isClaudeCode}}## IMPORTANT: Codebase Analysis Required\n\nYou have access to powerful codebase analysis tools. Before generating subtasks:\n\n1. Use the Glob tool to explore relevant files for this task (e.g., \"**/*.js\", \"src/**/*.ts\")\n2. Use the Grep tool to search for existing implementations related to this task\n3. Use the Read tool to examine files that would be affected by this task\n4. Understand the current implementation state and patterns used\n\nBased on your analysis:\n- Identify existing code that relates to this task\n- Understand patterns and conventions to follow\n- Generate subtasks that integrate smoothly with existing code\n- Ensure subtasks are specific and actionable based on the actual codebase\n\nProject Root: {{projectRoot}}\n\n{{/if}}Analyze the following task and break it down into {{#if (gt subtaskCount 0)}}exactly {{subtaskCount}}{{else}}an appropriate number of{{/if}} specific subtasks using your research capabilities. Assign sequential IDs starting from {{nextSubtaskId}}.\n\nParent Task:\nID: {{task.id}}\nTitle: {{task.title}}\nDescription: {{task.description}}\nCurrent details: {{#if task.details}}{{task.details}}{{else}}None{{/if}}{{#if additionalContext}}\nConsider this context: {{additionalContext}}{{/if}}{{#if complexityReasoningContext}}\nComplexity Analysis Reasoning: {{complexityReasoningContext}}{{/if}}{{#if gatheredContext}}\n\n# Project Context\n\n{{gatheredContext}}{{/if}}\n\nCRITICAL: Respond ONLY with a valid JSON object containing a single key \"subtasks\". The value must be an array of the generated subtasks, strictly matching this structure:\n\n{\n \"subtasks\": [\n {\n \"id\": <number>, // Sequential ID starting from {{nextSubtaskId}}\n \"title\": \"<string>\",\n \"description\": \"<string>\",\n \"dependencies\": [\"<string>\"], // Use full subtask IDs like [\"{{task.id}}.1\", \"{{task.id}}.2\"]. If no dependencies, use an empty array [].\n \"details\": \"<string>\",\n \"testStrategy\": \"<string>\" // Optional\n },\n // ... (repeat for {{#if (gt subtaskCount 0)}}{{subtaskCount}}{{else}}appropriate number of{{/if}} subtasks)\n ]\n}\n\nImportant: For the 'dependencies' field, if a subtask has no dependencies, you MUST use an empty array, for example: \"dependencies\": []. Do not use null or omit the field.\n\nDo not include ANY explanatory text, markdown, or code block markers. Just the JSON object."
|
||||||
},
|
},
|
||||||
"default": {
|
"default": {
|
||||||
"system": "You are an AI assistant helping with task breakdown for software development.\nYou need to break down a high-level task into {{#if (gt subtaskCount 0)}}{{subtaskCount}}{{else}}an appropriate number of{{/if}} specific subtasks that can be implemented one by one.\n\nSubtasks should:\n1. Be specific and actionable implementation steps\n2. Follow a logical sequence\n3. Each handle a distinct part of the parent task\n4. Include clear guidance on implementation approach\n5. Have appropriate dependency chains between subtasks (using full subtask IDs)\n6. Collectively cover all aspects of the parent task\n\nFor each subtask, provide:\n- id: Sequential integer starting from the provided nextSubtaskId\n- title: Clear, specific title\n- description: Detailed description\n- dependencies: Array of prerequisite subtask IDs using full format like [\"{{task.id}}.1\", \"{{task.id}}.2\"]\n- details: Implementation details, the output should be in string\n- testStrategy: Optional testing approach\n\nRespond ONLY with a valid JSON object containing a single key \"subtasks\" whose value is an array matching the structure described. Do not include any explanatory text, markdown formatting, or code block markers.",
|
"system": "You are an AI assistant helping with task breakdown for software development.\nYou need to break down a high-level task into {{#if (gt subtaskCount 0)}}{{subtaskCount}}{{else}}an appropriate number of{{/if}} specific subtasks that can be implemented one by one.\n\nSubtasks should:\n1. Be specific and actionable implementation steps\n2. Follow a logical sequence\n3. Each handle a distinct part of the parent task\n4. Include clear guidance on implementation approach\n5. Have appropriate dependency chains between subtasks (using full subtask IDs)\n6. Collectively cover all aspects of the parent task\n\nFor each subtask, provide:\n- id: Sequential integer starting from the provided nextSubtaskId\n- title: Clear, specific title\n- description: Detailed description\n- dependencies: Array of prerequisite subtask IDs using full format like [\"{{task.id}}.1\", \"{{task.id}}.2\"]\n- details: Implementation details, the output should be in string\n- testStrategy: Optional testing approach\n\nRespond ONLY with a valid JSON object containing a single key \"subtasks\" whose value is an array matching the structure described. Do not include any explanatory text, markdown formatting, or code block markers.",
|
||||||
"user": "Break down this task into {{#if (gt subtaskCount 0)}}exactly {{subtaskCount}}{{else}}an appropriate number of{{/if}} specific subtasks:\n\nTask ID: {{task.id}}\nTitle: {{task.title}}\nDescription: {{task.description}}\nCurrent details: {{#if task.details}}{{task.details}}{{else}}None{{/if}}{{#if additionalContext}}\nAdditional context: {{additionalContext}}{{/if}}{{#if complexityReasoningContext}}\nComplexity Analysis Reasoning: {{complexityReasoningContext}}{{/if}}{{#if gatheredContext}}\n\n# Project Context\n\n{{gatheredContext}}{{/if}}\n\nReturn ONLY the JSON object containing the \"subtasks\" array, matching this structure:\n\n{\n \"subtasks\": [\n {\n \"id\": {{nextSubtaskId}}, // First subtask ID\n \"title\": \"Specific subtask title\",\n \"description\": \"Detailed description\",\n \"dependencies\": [], // e.g., [\"{{task.id}}.1\", \"{{task.id}}.2\"] for dependencies. Use empty array [] if no dependencies\n \"details\": \"Implementation guidance\",\n \"testStrategy\": \"Optional testing approach\"\n },\n // ... (repeat for {{#if (gt subtaskCount 0)}}a total of {{subtaskCount}}{{else}}an appropriate number of{{/if}} subtasks with sequential IDs)\n ]\n}"
|
"user": "{{#if isClaudeCode}}## IMPORTANT: Codebase Analysis Required\n\nYou have access to powerful codebase analysis tools. Before generating subtasks:\n\n1. Use the Glob tool to explore relevant files for this task (e.g., \"**/*.js\", \"src/**/*.ts\")\n2. Use the Grep tool to search for existing implementations related to this task\n3. Use the Read tool to examine files that would be affected by this task\n4. Understand the current implementation state and patterns used\n\nBased on your analysis:\n- Identify existing code that relates to this task\n- Understand patterns and conventions to follow\n- Generate subtasks that integrate smoothly with existing code\n- Ensure subtasks are specific and actionable based on the actual codebase\n\nProject Root: {{projectRoot}}\n\n{{/if}}Break down this task into {{#if (gt subtaskCount 0)}}exactly {{subtaskCount}}{{else}}an appropriate number of{{/if}} specific subtasks:\n\nTask ID: {{task.id}}\nTitle: {{task.title}}\nDescription: {{task.description}}\nCurrent details: {{#if task.details}}{{task.details}}{{else}}None{{/if}}{{#if additionalContext}}\nAdditional context: {{additionalContext}}{{/if}}{{#if complexityReasoningContext}}\nComplexity Analysis Reasoning: {{complexityReasoningContext}}{{/if}}{{#if gatheredContext}}\n\n# Project Context\n\n{{gatheredContext}}{{/if}}\n\nReturn ONLY the JSON object containing the \"subtasks\" array, matching this structure:\n\n{\n \"subtasks\": [\n {\n \"id\": {{nextSubtaskId}}, // First subtask ID\n \"title\": \"Specific subtask title\",\n \"description\": \"Detailed description\",\n \"dependencies\": [], // e.g., [\"{{task.id}}.1\", \"{{task.id}}.2\"] for dependencies. Use empty array [] if no dependencies\n \"details\": \"Implementation guidance\",\n \"testStrategy\": \"Optional testing approach\"\n },\n // ... (repeat for {{#if (gt subtaskCount 0)}}a total of {{subtaskCount}}{{else}}an appropriate number of{{/if}} subtasks with sequential IDs)\n ]\n}"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -40,12 +40,24 @@
|
|||||||
"default": "medium",
|
"default": "medium",
|
||||||
"enum": ["high", "medium", "low"],
|
"enum": ["high", "medium", "low"],
|
||||||
"description": "Default priority for generated tasks"
|
"description": "Default priority for generated tasks"
|
||||||
|
},
|
||||||
|
"isClaudeCode": {
|
||||||
|
"type": "boolean",
|
||||||
|
"required": false,
|
||||||
|
"default": false,
|
||||||
|
"description": "Whether Claude Code is being used as the provider"
|
||||||
|
},
|
||||||
|
"projectRoot": {
|
||||||
|
"type": "string",
|
||||||
|
"required": false,
|
||||||
|
"default": "",
|
||||||
|
"description": "Project root path for context"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"prompts": {
|
"prompts": {
|
||||||
"default": {
|
"default": {
|
||||||
"system": "You are an AI assistant specialized in analyzing Product Requirements Documents (PRDs) and generating a structured, logically ordered, dependency-aware and sequenced list of development tasks in JSON format.{{#if research}}\nBefore breaking down the PRD into tasks, you will:\n1. Research and analyze the latest technologies, libraries, frameworks, and best practices that would be appropriate for this project\n2. Identify any potential technical challenges, security concerns, or scalability issues not explicitly mentioned in the PRD without discarding any explicit requirements or going overboard with complexity -- always aim to provide the most direct path to implementation, avoiding over-engineering or roundabout approaches\n3. Consider current industry standards and evolving trends relevant to this project (this step aims to solve LLM hallucinations and out of date information due to training data cutoff dates)\n4. Evaluate alternative implementation approaches and recommend the most efficient path\n5. Include specific library versions, helpful APIs, and concrete implementation guidance based on your research\n6. Always aim to provide the most direct path to implementation, avoiding over-engineering or roundabout approaches\n\nYour task breakdown should incorporate this research, resulting in more detailed implementation guidance, more accurate dependency mapping, and more precise technology recommendations than would be possible from the PRD text alone, while maintaining all explicit requirements and best practices and all details and nuances of the PRD.{{/if}}\n\nAnalyze the provided PRD content and generate {{#if (gt numTasks 0)}}approximately {{numTasks}}{{else}}an appropriate number of{{/if}} top-level development tasks. If the complexity or the level of detail of the PRD is high, generate more tasks relative to the complexity of the PRD\nEach task should represent a logical unit of work needed to implement the requirements and focus on the most direct and effective way to implement the requirements without unnecessary complexity or overengineering. Include pseudo-code, implementation details, and test strategy for each task. Find the most up to date information to implement each task.\nAssign sequential IDs starting from {{nextId}}. Infer title, description, details, and test strategy for each task based *only* on the PRD content.\nSet status to 'pending', dependencies to an empty array [], and priority to '{{defaultTaskPriority}}' initially for all tasks.\nRespond ONLY with a valid JSON object containing a single key \"tasks\", where the value is an array of task objects adhering to the provided Zod schema. Do not include any explanation or markdown formatting.\n\nEach task should follow this JSON structure:\n{\n\t\"id\": number,\n\t\"title\": string,\n\t\"description\": string,\n\t\"status\": \"pending\",\n\t\"dependencies\": number[] (IDs of tasks this depends on),\n\t\"priority\": \"high\" | \"medium\" | \"low\",\n\t\"details\": string (implementation details),\n\t\"testStrategy\": string (validation approach)\n}\n\nGuidelines:\n1. {{#if (gt numTasks 0)}}Unless complexity warrants otherwise{{else}}Depending on the complexity{{/if}}, create {{#if (gt numTasks 0)}}exactly {{numTasks}}{{else}}an appropriate number of{{/if}} tasks, numbered sequentially starting from {{nextId}}\n2. Each task should be atomic and focused on a single responsibility following the most up to date best practices and standards\n3. Order tasks logically - consider dependencies and implementation sequence\n4. Early tasks should focus on setup, core functionality first, then advanced features\n5. Include clear validation/testing approach for each task\n6. Set appropriate dependency IDs (a task can only depend on tasks with lower IDs, potentially including existing tasks with IDs less than {{nextId}} if applicable)\n7. Assign priority (high/medium/low) based on criticality and dependency order\n8. Include detailed implementation guidance in the \"details\" field{{#if research}}, with specific libraries and version recommendations based on your research{{/if}}\n9. If the PRD contains specific requirements for libraries, database schemas, frameworks, tech stacks, or any other implementation details, STRICTLY ADHERE to these requirements in your task breakdown and do not discard them under any circumstance\n10. Focus on filling in any gaps left by the PRD or areas that aren't fully specified, while preserving all explicit requirements\n11. Always aim to provide the most direct path to implementation, avoiding over-engineering or roundabout approaches{{#if research}}\n12. For each task, include specific, actionable guidance based on current industry standards and best practices discovered through research{{/if}}",
|
"system": "You are an AI assistant specialized in analyzing Product Requirements Documents (PRDs) and generating a structured, logically ordered, dependency-aware and sequenced list of development tasks in JSON format.{{#if research}}\nBefore breaking down the PRD into tasks, you will:\n1. Research and analyze the latest technologies, libraries, frameworks, and best practices that would be appropriate for this project\n2. Identify any potential technical challenges, security concerns, or scalability issues not explicitly mentioned in the PRD without discarding any explicit requirements or going overboard with complexity -- always aim to provide the most direct path to implementation, avoiding over-engineering or roundabout approaches\n3. Consider current industry standards and evolving trends relevant to this project (this step aims to solve LLM hallucinations and out of date information due to training data cutoff dates)\n4. Evaluate alternative implementation approaches and recommend the most efficient path\n5. Include specific library versions, helpful APIs, and concrete implementation guidance based on your research\n6. Always aim to provide the most direct path to implementation, avoiding over-engineering or roundabout approaches\n\nYour task breakdown should incorporate this research, resulting in more detailed implementation guidance, more accurate dependency mapping, and more precise technology recommendations than would be possible from the PRD text alone, while maintaining all explicit requirements and best practices and all details and nuances of the PRD.{{/if}}\n\nAnalyze the provided PRD content and generate {{#if (gt numTasks 0)}}approximately {{numTasks}}{{else}}an appropriate number of{{/if}} top-level development tasks. If the complexity or the level of detail of the PRD is high, generate more tasks relative to the complexity of the PRD\nEach task should represent a logical unit of work needed to implement the requirements and focus on the most direct and effective way to implement the requirements without unnecessary complexity or overengineering. Include pseudo-code, implementation details, and test strategy for each task. Find the most up to date information to implement each task.\nAssign sequential IDs starting from {{nextId}}. Infer title, description, details, and test strategy for each task based *only* on the PRD content.\nSet status to 'pending', dependencies to an empty array [], and priority to '{{defaultTaskPriority}}' initially for all tasks.\nRespond ONLY with a valid JSON object containing a single key \"tasks\", where the value is an array of task objects adhering to the provided Zod schema. Do not include any explanation or markdown formatting.\n\nEach task should follow this JSON structure:\n{\n\t\"id\": number,\n\t\"title\": string,\n\t\"description\": string,\n\t\"status\": \"pending\",\n\t\"dependencies\": number[] (IDs of tasks this depends on),\n\t\"priority\": \"high\" | \"medium\" | \"low\",\n\t\"details\": string (implementation details),\n\t\"testStrategy\": string (validation approach)\n}\n\nGuidelines:\n1. {{#if (gt numTasks 0)}}Unless complexity warrants otherwise{{else}}Depending on the complexity{{/if}}, create {{#if (gt numTasks 0)}}exactly {{numTasks}}{{else}}an appropriate number of{{/if}} tasks, numbered sequentially starting from {{nextId}}\n2. Each task should be atomic and focused on a single responsibility following the most up to date best practices and standards\n3. Order tasks logically - consider dependencies and implementation sequence\n4. Early tasks should focus on setup, core functionality first, then advanced features\n5. Include clear validation/testing approach for each task\n6. Set appropriate dependency IDs (a task can only depend on tasks with lower IDs, potentially including existing tasks with IDs less than {{nextId}} if applicable)\n7. Assign priority (high/medium/low) based on criticality and dependency order\n8. Include detailed implementation guidance in the \"details\" field{{#if research}}, with specific libraries and version recommendations based on your research{{/if}}\n9. If the PRD contains specific requirements for libraries, database schemas, frameworks, tech stacks, or any other implementation details, STRICTLY ADHERE to these requirements in your task breakdown and do not discard them under any circumstance\n10. Focus on filling in any gaps left by the PRD or areas that aren't fully specified, while preserving all explicit requirements\n11. Always aim to provide the most direct path to implementation, avoiding over-engineering or roundabout approaches{{#if research}}\n12. For each task, include specific, actionable guidance based on current industry standards and best practices discovered through research{{/if}}",
|
||||||
"user": "Here's the Product Requirements Document (PRD) to break down into {{#if (gt numTasks 0)}}approximately {{numTasks}}{{else}}an appropriate number of{{/if}} tasks, starting IDs from {{nextId}}:{{#if research}}\n\nRemember to thoroughly research current best practices and technologies before task breakdown to provide specific, actionable implementation details.{{/if}}\n\n{{prdContent}}\n\n\n\t\tReturn your response in this format:\n{\n \"tasks\": [\n {\n \"id\": 1,\n \"title\": \"Setup Project Repository\",\n \"description\": \"...\",\n ...\n },\n ...\n ],\n \"metadata\": {\n \"projectName\": \"PRD Implementation\",\n \"totalTasks\": {{#if (gt numTasks 0)}}{{numTasks}}{{else}}{number of tasks}{{/if}},\n \"sourceFile\": \"{{prdPath}}\",\n \"generatedAt\": \"YYYY-MM-DD\"\n }\n}"
|
"user": "{{#if isClaudeCode}}## IMPORTANT: Codebase Analysis Required\n\nYou have access to powerful codebase analysis tools. Before generating tasks:\n\n1. Use the Glob tool to explore the project structure (e.g., \"**/*.js\", \"**/*.json\", \"**/README.md\")\n2. Use the Grep tool to search for existing implementations, patterns, and technologies\n3. Use the Read tool to examine key files like package.json, README.md, and main entry points\n4. Analyze the current state of implementation to understand what already exists\n\nBased on your analysis:\n- Identify what components/features are already implemented\n- Understand the technology stack, frameworks, and patterns in use\n- Generate tasks that build upon the existing codebase rather than duplicating work\n- Ensure tasks align with the project's current architecture and conventions\n\nProject Root: {{projectRoot}}\n\n{{/if}}Here's the Product Requirements Document (PRD) to break down into {{#if (gt numTasks 0)}}approximately {{numTasks}}{{else}}an appropriate number of{{/if}} tasks, starting IDs from {{nextId}}:{{#if research}}\n\nRemember to thoroughly research current best practices and technologies before task breakdown to provide specific, actionable implementation details.{{/if}}\n\n{{prdContent}}\n\n\n\t\tReturn your response in this format:\n{\n \"tasks\": [\n {\n \"id\": 1,\n \"title\": \"Setup Project Repository\",\n \"description\": \"...\",\n ...\n },\n ...\n ],\n \"metadata\": {\n \"projectName\": \"PRD Implementation\",\n \"totalTasks\": {{#if (gt numTasks 0)}}{{numTasks}}{{else}}{number of tasks}{{/if}},\n \"sourceFile\": \"{{prdPath}}\",\n \"generatedAt\": \"YYYY-MM-DD\"\n }\n}"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
238
tests/unit/ai-providers/openai.test.js
Normal file
238
tests/unit/ai-providers/openai.test.js
Normal file
@@ -0,0 +1,238 @@
|
|||||||
|
/**
|
||||||
|
* Tests for OpenAI Provider - Token parameter handling for GPT-5
|
||||||
|
*
|
||||||
|
* This test suite covers:
|
||||||
|
* 1. Correct identification of GPT-5 models requiring max_completion_tokens
|
||||||
|
* 2. Token parameter preparation for different model types
|
||||||
|
* 3. Validation of maxTokens parameter
|
||||||
|
* 4. Integer coercion of token values
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { jest } from '@jest/globals';
|
||||||
|
|
||||||
|
// Mock the utils module to prevent logging during tests
|
||||||
|
jest.mock('../../../scripts/modules/utils.js', () => ({
|
||||||
|
log: jest.fn()
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Import the provider
|
||||||
|
import { OpenAIProvider } from '../../../src/ai-providers/openai.js';
|
||||||
|
|
||||||
|
describe('OpenAIProvider', () => {
|
||||||
|
let provider;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
provider = new OpenAIProvider();
|
||||||
|
jest.clearAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('requiresMaxCompletionTokens', () => {
|
||||||
|
it('should return true for GPT-5 models', () => {
|
||||||
|
expect(provider.requiresMaxCompletionTokens('gpt-5')).toBe(true);
|
||||||
|
expect(provider.requiresMaxCompletionTokens('gpt-5-mini')).toBe(true);
|
||||||
|
expect(provider.requiresMaxCompletionTokens('gpt-5-nano')).toBe(true);
|
||||||
|
expect(provider.requiresMaxCompletionTokens('gpt-5-turbo')).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return false for non-GPT-5 models', () => {
|
||||||
|
expect(provider.requiresMaxCompletionTokens('gpt-4')).toBe(false);
|
||||||
|
expect(provider.requiresMaxCompletionTokens('gpt-4o')).toBe(false);
|
||||||
|
expect(provider.requiresMaxCompletionTokens('gpt-3.5-turbo')).toBe(false);
|
||||||
|
expect(provider.requiresMaxCompletionTokens('o1')).toBe(false);
|
||||||
|
expect(provider.requiresMaxCompletionTokens('o1-mini')).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle null/undefined modelId', () => {
|
||||||
|
expect(provider.requiresMaxCompletionTokens(null)).toBeFalsy();
|
||||||
|
expect(provider.requiresMaxCompletionTokens(undefined)).toBeFalsy();
|
||||||
|
expect(provider.requiresMaxCompletionTokens('')).toBeFalsy();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('prepareTokenParam', () => {
|
||||||
|
it('should return max_completion_tokens for GPT-5 models', () => {
|
||||||
|
const result = provider.prepareTokenParam('gpt-5', 1000);
|
||||||
|
expect(result).toEqual({ max_completion_tokens: 1000 });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return maxTokens for non-GPT-5 models', () => {
|
||||||
|
const result = provider.prepareTokenParam('gpt-4', 1000);
|
||||||
|
expect(result).toEqual({ maxTokens: 1000 });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should coerce token value to integer', () => {
|
||||||
|
// Float values
|
||||||
|
const result1 = provider.prepareTokenParam('gpt-5', 1000.7);
|
||||||
|
expect(result1).toEqual({ max_completion_tokens: 1000 });
|
||||||
|
|
||||||
|
const result2 = provider.prepareTokenParam('gpt-4', 1000.7);
|
||||||
|
expect(result2).toEqual({ maxTokens: 1000 });
|
||||||
|
|
||||||
|
// String float
|
||||||
|
const result3 = provider.prepareTokenParam('gpt-5', '1000.7');
|
||||||
|
expect(result3).toEqual({ max_completion_tokens: 1000 });
|
||||||
|
|
||||||
|
// String integers (common CLI input path)
|
||||||
|
expect(provider.prepareTokenParam('gpt-5', '1000')).toEqual({
|
||||||
|
max_completion_tokens: 1000
|
||||||
|
});
|
||||||
|
expect(provider.prepareTokenParam('gpt-4', '1000')).toEqual({
|
||||||
|
maxTokens: 1000
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return empty object for undefined maxTokens', () => {
|
||||||
|
const result = provider.prepareTokenParam('gpt-5', undefined);
|
||||||
|
expect(result).toEqual({});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle edge cases', () => {
|
||||||
|
// Test with 0 (should still pass through as 0)
|
||||||
|
const result1 = provider.prepareTokenParam('gpt-5', 0);
|
||||||
|
expect(result1).toEqual({ max_completion_tokens: 0 });
|
||||||
|
|
||||||
|
// Test with string number
|
||||||
|
const result2 = provider.prepareTokenParam('gpt-5', '100');
|
||||||
|
expect(result2).toEqual({ max_completion_tokens: 100 });
|
||||||
|
|
||||||
|
// Test with negative number (will be floored, validation happens elsewhere)
|
||||||
|
const result3 = provider.prepareTokenParam('gpt-4', -10.5);
|
||||||
|
expect(result3).toEqual({ maxTokens: -11 });
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('validateOptionalParams', () => {
|
||||||
|
it('should accept valid maxTokens values', () => {
|
||||||
|
expect(() =>
|
||||||
|
provider.validateOptionalParams({ maxTokens: 1000 })
|
||||||
|
).not.toThrow();
|
||||||
|
expect(() =>
|
||||||
|
provider.validateOptionalParams({ maxTokens: 1 })
|
||||||
|
).not.toThrow();
|
||||||
|
expect(() =>
|
||||||
|
provider.validateOptionalParams({ maxTokens: '1000' })
|
||||||
|
).not.toThrow();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should reject invalid maxTokens values', () => {
|
||||||
|
expect(() => provider.validateOptionalParams({ maxTokens: 0 })).toThrow(
|
||||||
|
Error
|
||||||
|
);
|
||||||
|
expect(() => provider.validateOptionalParams({ maxTokens: -1 })).toThrow(
|
||||||
|
Error
|
||||||
|
);
|
||||||
|
expect(() => provider.validateOptionalParams({ maxTokens: NaN })).toThrow(
|
||||||
|
Error
|
||||||
|
);
|
||||||
|
expect(() =>
|
||||||
|
provider.validateOptionalParams({ maxTokens: Infinity })
|
||||||
|
).toThrow(Error);
|
||||||
|
expect(() =>
|
||||||
|
provider.validateOptionalParams({ maxTokens: 'invalid' })
|
||||||
|
).toThrow(Error);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should accept valid temperature values', () => {
|
||||||
|
expect(() =>
|
||||||
|
provider.validateOptionalParams({ temperature: 0 })
|
||||||
|
).not.toThrow();
|
||||||
|
expect(() =>
|
||||||
|
provider.validateOptionalParams({ temperature: 0.5 })
|
||||||
|
).not.toThrow();
|
||||||
|
expect(() =>
|
||||||
|
provider.validateOptionalParams({ temperature: 1 })
|
||||||
|
).not.toThrow();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should reject invalid temperature values', () => {
|
||||||
|
expect(() =>
|
||||||
|
provider.validateOptionalParams({ temperature: -0.1 })
|
||||||
|
).toThrow(Error);
|
||||||
|
expect(() =>
|
||||||
|
provider.validateOptionalParams({ temperature: 1.1 })
|
||||||
|
).toThrow(Error);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getRequiredApiKeyName', () => {
|
||||||
|
it('should return OPENAI_API_KEY', () => {
|
||||||
|
expect(provider.getRequiredApiKeyName()).toBe('OPENAI_API_KEY');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getClient', () => {
|
||||||
|
it('should throw error if API key is missing', () => {
|
||||||
|
expect(() => provider.getClient({})).toThrow(Error);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create client with apiKey only', () => {
|
||||||
|
const params = {
|
||||||
|
apiKey: 'sk-test-123'
|
||||||
|
};
|
||||||
|
|
||||||
|
// The getClient method should return a function
|
||||||
|
const client = provider.getClient(params);
|
||||||
|
expect(typeof client).toBe('function');
|
||||||
|
|
||||||
|
// The client function should be callable and return a model object
|
||||||
|
const model = client('gpt-4');
|
||||||
|
expect(model).toBeDefined();
|
||||||
|
expect(model.modelId).toBe('gpt-4');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create client with apiKey and baseURL', () => {
|
||||||
|
const params = {
|
||||||
|
apiKey: 'sk-test-456',
|
||||||
|
baseURL: 'https://api.openai.example'
|
||||||
|
};
|
||||||
|
|
||||||
|
// Should not throw when baseURL is provided
|
||||||
|
const client = provider.getClient(params);
|
||||||
|
expect(typeof client).toBe('function');
|
||||||
|
|
||||||
|
// The client function should be callable and return a model object
|
||||||
|
const model = client('gpt-5');
|
||||||
|
expect(model).toBeDefined();
|
||||||
|
expect(model.modelId).toBe('gpt-5');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return the same client instance for the same parameters', () => {
|
||||||
|
const params = {
|
||||||
|
apiKey: 'sk-test-789'
|
||||||
|
};
|
||||||
|
|
||||||
|
// Multiple calls with same params should work
|
||||||
|
const client1 = provider.getClient(params);
|
||||||
|
const client2 = provider.getClient(params);
|
||||||
|
|
||||||
|
expect(typeof client1).toBe('function');
|
||||||
|
expect(typeof client2).toBe('function');
|
||||||
|
|
||||||
|
// Both clients should be able to create models
|
||||||
|
const model1 = client1('gpt-4');
|
||||||
|
const model2 = client2('gpt-4');
|
||||||
|
expect(model1.modelId).toBe('gpt-4');
|
||||||
|
expect(model2.modelId).toBe('gpt-4');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle different model IDs correctly', () => {
|
||||||
|
const client = provider.getClient({ apiKey: 'sk-test-models' });
|
||||||
|
|
||||||
|
// Test with different models
|
||||||
|
const gpt4 = client('gpt-4');
|
||||||
|
expect(gpt4.modelId).toBe('gpt-4');
|
||||||
|
|
||||||
|
const gpt5 = client('gpt-5');
|
||||||
|
expect(gpt5.modelId).toBe('gpt-5');
|
||||||
|
|
||||||
|
const gpt35 = client('gpt-3.5-turbo');
|
||||||
|
expect(gpt35.modelId).toBe('gpt-3.5-turbo');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('name property', () => {
|
||||||
|
it('should have OpenAI as the provider name', () => {
|
||||||
|
expect(provider.name).toBe('OpenAI');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
134
tests/unit/prompts/expand-task-prompt.test.js
Normal file
134
tests/unit/prompts/expand-task-prompt.test.js
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
import { jest } from '@jest/globals';
|
||||||
|
import { PromptManager } from '../../../scripts/modules/prompt-manager.js';
|
||||||
|
|
||||||
|
describe('expand-task prompt template', () => {
|
||||||
|
let promptManager;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
promptManager = new PromptManager();
|
||||||
|
});
|
||||||
|
|
||||||
|
const testTask = {
|
||||||
|
id: 1,
|
||||||
|
title: 'Setup AWS Infrastructure',
|
||||||
|
description: 'Provision core AWS services',
|
||||||
|
details: 'Create VPC, subnets, and security groups'
|
||||||
|
};
|
||||||
|
|
||||||
|
const baseParams = {
|
||||||
|
task: testTask,
|
||||||
|
subtaskCount: 3,
|
||||||
|
nextSubtaskId: 1,
|
||||||
|
additionalContext: '',
|
||||||
|
complexityReasoningContext: '',
|
||||||
|
gatheredContext: '',
|
||||||
|
useResearch: false,
|
||||||
|
expansionPrompt: undefined
|
||||||
|
};
|
||||||
|
|
||||||
|
test('default variant includes task context', () => {
|
||||||
|
const { userPrompt } = promptManager.loadPrompt(
|
||||||
|
'expand-task',
|
||||||
|
baseParams,
|
||||||
|
'default'
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(userPrompt).toContain(testTask.title);
|
||||||
|
expect(userPrompt).toContain(testTask.description);
|
||||||
|
expect(userPrompt).toContain(testTask.details);
|
||||||
|
expect(userPrompt).toContain('Task ID: 1');
|
||||||
|
});
|
||||||
|
|
||||||
|
test('research variant includes task context', () => {
|
||||||
|
const params = { ...baseParams, useResearch: true };
|
||||||
|
const { userPrompt } = promptManager.loadPrompt(
|
||||||
|
'expand-task',
|
||||||
|
params,
|
||||||
|
'research'
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(userPrompt).toContain(testTask.title);
|
||||||
|
expect(userPrompt).toContain(testTask.description);
|
||||||
|
expect(userPrompt).toContain(testTask.details);
|
||||||
|
expect(userPrompt).toContain('Parent Task:');
|
||||||
|
expect(userPrompt).toContain('ID: 1');
|
||||||
|
});
|
||||||
|
|
||||||
|
test('complexity-report variant includes task context', () => {
|
||||||
|
const params = {
|
||||||
|
...baseParams,
|
||||||
|
expansionPrompt: 'Focus on security best practices',
|
||||||
|
complexityReasoningContext: 'High complexity due to security requirements'
|
||||||
|
};
|
||||||
|
const { userPrompt } = promptManager.loadPrompt(
|
||||||
|
'expand-task',
|
||||||
|
params,
|
||||||
|
'complexity-report'
|
||||||
|
);
|
||||||
|
|
||||||
|
// The fix ensures task context is included
|
||||||
|
expect(userPrompt).toContain('Parent Task:');
|
||||||
|
expect(userPrompt).toContain(`ID: ${testTask.id}`);
|
||||||
|
expect(userPrompt).toContain(`Title: ${testTask.title}`);
|
||||||
|
expect(userPrompt).toContain(`Description: ${testTask.description}`);
|
||||||
|
expect(userPrompt).toContain(`Current details: ${testTask.details}`);
|
||||||
|
|
||||||
|
// Also includes the expansion prompt
|
||||||
|
expect(userPrompt).toContain('Expansion Guidance:');
|
||||||
|
expect(userPrompt).toContain(params.expansionPrompt);
|
||||||
|
expect(userPrompt).toContain(params.complexityReasoningContext);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('all variants request JSON format with subtasks array', () => {
|
||||||
|
const variants = ['default', 'research', 'complexity-report'];
|
||||||
|
|
||||||
|
variants.forEach((variant) => {
|
||||||
|
const params =
|
||||||
|
variant === 'complexity-report'
|
||||||
|
? { ...baseParams, expansionPrompt: 'test' }
|
||||||
|
: baseParams;
|
||||||
|
|
||||||
|
const { systemPrompt, userPrompt } = promptManager.loadPrompt(
|
||||||
|
'expand-task',
|
||||||
|
params,
|
||||||
|
variant
|
||||||
|
);
|
||||||
|
const combined = systemPrompt + userPrompt;
|
||||||
|
|
||||||
|
expect(combined.toLowerCase()).toContain('subtasks');
|
||||||
|
expect(combined).toContain('JSON');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
test('complexity-report variant fails without task context regression test', () => {
|
||||||
|
// This test ensures we don't regress to the old behavior where
|
||||||
|
// complexity-report variant only used expansionPrompt without task context
|
||||||
|
const params = {
|
||||||
|
...baseParams,
|
||||||
|
expansionPrompt: 'Generic expansion prompt'
|
||||||
|
};
|
||||||
|
|
||||||
|
const { userPrompt } = promptManager.loadPrompt(
|
||||||
|
'expand-task',
|
||||||
|
params,
|
||||||
|
'complexity-report'
|
||||||
|
);
|
||||||
|
|
||||||
|
// Count occurrences of task-specific content
|
||||||
|
const titleOccurrences = (
|
||||||
|
userPrompt.match(new RegExp(testTask.title, 'g')) || []
|
||||||
|
).length;
|
||||||
|
const descriptionOccurrences = (
|
||||||
|
userPrompt.match(new RegExp(testTask.description, 'g')) || []
|
||||||
|
).length;
|
||||||
|
|
||||||
|
// Should have at least one occurrence of title and description
|
||||||
|
expect(titleOccurrences).toBeGreaterThanOrEqual(1);
|
||||||
|
expect(descriptionOccurrences).toBeGreaterThanOrEqual(1);
|
||||||
|
|
||||||
|
// Should not be ONLY the expansion prompt
|
||||||
|
expect(userPrompt.length).toBeGreaterThan(
|
||||||
|
params.expansionPrompt.length + 100
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
@@ -123,7 +123,9 @@ jest.unstable_mockModule(
|
|||||||
() => ({
|
() => ({
|
||||||
getDefaultSubtasks: jest.fn(() => 3),
|
getDefaultSubtasks: jest.fn(() => 3),
|
||||||
getDebugFlag: jest.fn(() => false),
|
getDebugFlag: jest.fn(() => false),
|
||||||
getDefaultNumTasks: jest.fn(() => 10)
|
getDefaultNumTasks: jest.fn(() => 10),
|
||||||
|
getMainProvider: jest.fn(() => 'openai'),
|
||||||
|
getResearchProvider: jest.fn(() => 'perplexity')
|
||||||
})
|
})
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|||||||
@@ -49,7 +49,9 @@ jest.unstable_mockModule(
|
|||||||
() => ({
|
() => ({
|
||||||
getDebugFlag: jest.fn(() => false),
|
getDebugFlag: jest.fn(() => false),
|
||||||
getDefaultNumTasks: jest.fn(() => 10),
|
getDefaultNumTasks: jest.fn(() => 10),
|
||||||
getDefaultPriority: jest.fn(() => 'medium')
|
getDefaultPriority: jest.fn(() => 'medium'),
|
||||||
|
getMainProvider: jest.fn(() => 'openai'),
|
||||||
|
getResearchProvider: jest.fn(() => 'perplexity')
|
||||||
})
|
})
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user