Compare commits
266 Commits
v0.20.0
...
docs/auto-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
22fa529ce7 | ||
|
|
6bc75c0ac6 | ||
|
|
d7fca1844f | ||
|
|
a98d96ef04 | ||
|
|
a69d8c91dc | ||
|
|
474a86cebb | ||
|
|
3283506444 | ||
|
|
9acb900153 | ||
|
|
c4f5d89e72 | ||
|
|
e308cf4f46 | ||
|
|
11b7354010 | ||
|
|
4c1ef2ca94 | ||
|
|
663aa2dfe9 | ||
|
|
8f60a0561e | ||
|
|
9a22622e9c | ||
|
|
8d3c7e4116 | ||
|
|
3010b90d98 | ||
|
|
90e6bdcf1c | ||
|
|
25a00dca67 | ||
|
|
f263d4b2e0 | ||
|
|
f12a16d096 | ||
|
|
aaf903ff2f | ||
|
|
2a910a40ba | ||
|
|
0df6595245 | ||
|
|
33e3fbb20f | ||
|
|
5cb7ed557a | ||
|
|
b9e644c556 | ||
|
|
7265a6cf53 | ||
|
|
db6f405f23 | ||
|
|
7b5a7c4495 | ||
|
|
caee040907 | ||
|
|
4b5473860b | ||
|
|
b43b7ce201 | ||
|
|
86027f1ee4 | ||
|
|
4f984f8a69 | ||
|
|
f7646f41b5 | ||
|
|
20004a39ea | ||
|
|
f1393f47b1 | ||
|
|
738ec51c04 | ||
|
|
c7418c4594 | ||
|
|
0747f1c772 | ||
|
|
ffe24a2e35 | ||
|
|
604b94baa9 | ||
|
|
2ea4bb6a81 | ||
|
|
3e96387715 | ||
|
|
100c3dc47d | ||
|
|
986ac117ae | ||
|
|
18aa416035 | ||
|
|
3b3dbabed1 | ||
|
|
af53525cbc | ||
|
|
0079b7defd | ||
|
|
0b2c6967c4 | ||
|
|
c0682ac795 | ||
|
|
01a7faea8f | ||
|
|
b7f32eac5a | ||
|
|
044a7bfc98 | ||
|
|
814265cd33 | ||
|
|
9b7b2ca7b2 | ||
|
|
949f091179 | ||
|
|
51a351760c | ||
|
|
732b2c61ad | ||
|
|
32c2b03c23 | ||
|
|
3bfd999d81 | ||
|
|
9fa79eb026 | ||
|
|
875134247a | ||
|
|
4c2801d5eb | ||
|
|
c911608f60 | ||
|
|
8f1497407f | ||
|
|
10b64ec6f5 | ||
|
|
1a1879483b | ||
|
|
d691cbb7ae | ||
|
|
1b7c9637a5 | ||
|
|
9ff5f158d5 | ||
|
|
b2ff06e8c5 | ||
|
|
c2fc61ddb3 | ||
|
|
aaacc3dae3 | ||
|
|
46cd5dc186 | ||
|
|
49a31be416 | ||
|
|
2b69936ee7 | ||
|
|
6438f6c7c8 | ||
|
|
6bbd777552 | ||
|
|
100482722f | ||
|
|
7ff882bf23 | ||
|
|
6ab768f6ec | ||
|
|
b5fe723f8e | ||
|
|
f487736670 | ||
|
|
d67b81d25d | ||
|
|
66c05053c0 | ||
|
|
d7ab4609aa | ||
|
|
05f6242f7e | ||
|
|
a58719cf50 | ||
|
|
674d1f6de7 | ||
|
|
f106fb8e0b | ||
|
|
fd9dd43ee0 | ||
|
|
c395e93696 | ||
|
|
a621ff05ea | ||
|
|
47ddb60231 | ||
|
|
fce841490a | ||
|
|
4e126430a0 | ||
|
|
a33abe6c21 | ||
|
|
2b0cbdbc84 | ||
|
|
f1cdf78aa6 | ||
|
|
e6de285cea | ||
|
|
cf3339fa48 | ||
|
|
255b9f0334 | ||
|
|
cb2c266b2d | ||
|
|
170d6f2f65 | ||
|
|
137ef36278 | ||
|
|
1a3a528bf7 | ||
|
|
c164adc6ff | ||
|
|
9d61e0447d | ||
|
|
ee11b735b3 | ||
|
|
6d978228d9 | ||
|
|
ea9341e7af | ||
|
|
4296e383ea | ||
|
|
97b2781709 | ||
|
|
96553e4a5f | ||
|
|
7582219365 | ||
|
|
84baedc3d2 | ||
|
|
78da39edff | ||
|
|
4d1416b175 | ||
|
|
dc811eb45e | ||
|
|
3c41a113fe | ||
|
|
0e8c42c7cb | ||
|
|
799d1d2cce | ||
|
|
83af314879 | ||
|
|
dd03374496 | ||
|
|
4ab0affba7 | ||
|
|
77e1ddc237 | ||
|
|
3eeb19590a | ||
|
|
587745046f | ||
|
|
c61c73f827 | ||
|
|
15900d9fd5 | ||
|
|
7cf4004038 | ||
|
|
0f3ab00f26 | ||
|
|
e81040def5 | ||
|
|
597f6b03b4 | ||
|
|
a7ad4c8e92 | ||
|
|
0d54747894 | ||
|
|
df26c65632 | ||
|
|
e80e5bb7cd | ||
|
|
c4f92f6a0a | ||
|
|
be0c0f267c | ||
|
|
a983f75d4f | ||
|
|
e743aaa8c2 | ||
|
|
16ffffaf68 | ||
|
|
f254aed4a6 | ||
|
|
dd3b47bb2b | ||
|
|
37af0f1912 | ||
|
|
8783708e5e | ||
|
|
4dad2fd613 | ||
|
|
4cae2991d4 | ||
|
|
0d7ff627c9 | ||
|
|
db720a954d | ||
|
|
89335578ff | ||
|
|
781b8ef2af | ||
|
|
7d564920b5 | ||
|
|
2737fbaa67 | ||
|
|
9feb8d2dbf | ||
|
|
8a991587f1 | ||
|
|
7ceba2f572 | ||
|
|
10565f07d3 | ||
|
|
f27ce34fe9 | ||
|
|
71be933a8d | ||
|
|
5d94f1b471 | ||
|
|
3dee60dc3d | ||
|
|
f469515228 | ||
|
|
2fd0f026d3 | ||
|
|
e3ed4d7c14 | ||
|
|
fc47714340 | ||
|
|
30ae0e9a57 | ||
|
|
95640dcde8 | ||
|
|
311b2433e2 | ||
|
|
04e11b5e82 | ||
|
|
782728ff95 | ||
|
|
30ca144231 | ||
|
|
0220d0e994 | ||
|
|
41a8c2406a | ||
|
|
a003041cd8 | ||
|
|
6b57ead106 | ||
|
|
7b6e117b1d | ||
|
|
03b045e9cd | ||
|
|
699afdae59 | ||
|
|
80c09802e8 | ||
|
|
cf8f0f4b1c | ||
|
|
75c514cf5b | ||
|
|
41d1e671b1 | ||
|
|
a464e550b8 | ||
|
|
3a852afdae | ||
|
|
4bb63706b8 | ||
|
|
fcf14e09be | ||
|
|
4357af3f13 | ||
|
|
59f7676051 | ||
|
|
36468f3c93 | ||
|
|
ca4d93ee6a | ||
|
|
37fb569a62 | ||
|
|
ed0d4e6641 | ||
|
|
5184f8e7b2 | ||
|
|
587523a23b | ||
|
|
7a50f0c6ec | ||
|
|
adeb76ee15 | ||
|
|
d342070375 | ||
|
|
5e4dbac525 | ||
|
|
fb15c2eaf7 | ||
|
|
e8ceb08341 | ||
|
|
e495b2b559 | ||
|
|
e0d1d03f33 | ||
|
|
4a4bca905d | ||
|
|
9d5f50ac8e | ||
|
|
bbeaa9163a | ||
|
|
a4a172be94 | ||
|
|
028ed9c444 | ||
|
|
53903f1e8e | ||
|
|
36c56231cc | ||
|
|
b82d858f81 | ||
|
|
9808967d6b | ||
|
|
3fee7515f3 | ||
|
|
82b17bdb57 | ||
|
|
72ca68edeb | ||
|
|
64302dc191 | ||
|
|
60c03c548d | ||
|
|
2ae6e7e6be | ||
|
|
45a14c323d | ||
|
|
29e67fafa4 | ||
|
|
43e4d7c9d3 | ||
|
|
1bd1e64cac | ||
|
|
dc44ed9de8 | ||
|
|
31b8407dbc | ||
|
|
2df4f13f65 | ||
|
|
a37017e5a5 | ||
|
|
fb7d588137 | ||
|
|
bdb11fb2db | ||
|
|
4423119a5e | ||
|
|
7b90568326 | ||
|
|
9b0630fdf1 | ||
|
|
ced04bddd3 | ||
|
|
6ae66b2afb | ||
|
|
8781794c56 | ||
|
|
fede909fe1 | ||
|
|
77cc5e4537 | ||
|
|
d31ef7a39c | ||
|
|
66555099ca | ||
|
|
1e565eab53 | ||
|
|
d87a7f1076 | ||
|
|
5b3dd3f29b | ||
|
|
b7804302a1 | ||
|
|
b2841c261f | ||
|
|
444aa5ae19 | ||
|
|
858d4a1c54 | ||
|
|
fd005c4c54 | ||
|
|
0451ebcc32 | ||
|
|
9c58a92243 | ||
|
|
f772a96d00 | ||
|
|
0886c83d0c | ||
|
|
806ec99939 | ||
|
|
36c4a7a869 | ||
|
|
88c434a939 | ||
|
|
b0e09c76ed | ||
|
|
6c5e0f97f8 | ||
|
|
8774e7d5ae | ||
|
|
58a301c380 | ||
|
|
624922ca59 | ||
|
|
0a70ab6179 | ||
|
|
901eec1058 | ||
|
|
4629128943 | ||
|
|
6d69d02fe0 |
@@ -2,13 +2,16 @@
|
||||
"$schema": "https://unpkg.com/@changesets/config@3.1.1/schema.json",
|
||||
"changelog": [
|
||||
"@changesets/changelog-github",
|
||||
{ "repo": "eyaltoledano/claude-task-master" }
|
||||
{
|
||||
"repo": "eyaltoledano/claude-task-master"
|
||||
}
|
||||
],
|
||||
"commit": false,
|
||||
"fixed": [],
|
||||
"linked": [],
|
||||
"access": "public",
|
||||
"baseBranch": "main",
|
||||
"updateInternalDependencies": "patch",
|
||||
"ignore": []
|
||||
}
|
||||
"ignore": [
|
||||
"docs",
|
||||
"@tm/claude-code-plugin"
|
||||
]
|
||||
}
|
||||
5
.changeset/dirty-hairs-know.md
Normal file
5
.changeset/dirty-hairs-know.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
Improve auth token refresh flow
|
||||
7
.changeset/fix-parent-directory-traversal.md
Normal file
7
.changeset/fix-parent-directory-traversal.md
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
Enable Task Master commands to traverse parent directories to find project root from nested paths
|
||||
|
||||
Fixes #1301
|
||||
5
.changeset/fix-warning-box-alignment.md
Normal file
5
.changeset/fix-warning-box-alignment.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
"@tm/cli": patch
|
||||
---
|
||||
|
||||
Fix warning message box width to match dashboard box width for consistent UI alignment
|
||||
35
.changeset/light-owls-stay.md
Normal file
35
.changeset/light-owls-stay.md
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
Add configurable MCP tool loading to optimize LLM context usage
|
||||
|
||||
You can now control which Task Master MCP tools are loaded by setting the `TASK_MASTER_TOOLS` environment variable in your MCP configuration. This helps reduce context usage for LLMs by only loading the tools you need.
|
||||
|
||||
**Configuration Options:**
|
||||
|
||||
- `all` (default): Load all 36 tools
|
||||
- `core` or `lean`: Load only 7 essential tools for daily development
|
||||
- Includes: `get_tasks`, `next_task`, `get_task`, `set_task_status`, `update_subtask`, `parse_prd`, `expand_task`
|
||||
- `standard`: Load 15 commonly used tools (all core tools plus 8 more)
|
||||
- Additional tools: `initialize_project`, `analyze_project_complexity`, `expand_all`, `add_subtask`, `remove_task`, `generate`, `add_task`, `complexity_report`
|
||||
- Custom list: Comma-separated tool names (e.g., `get_tasks,next_task,set_task_status`)
|
||||
|
||||
**Example .mcp.json configuration:**
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"task-master-ai": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "task-master-ai"],
|
||||
"env": {
|
||||
"TASK_MASTER_TOOLS": "standard",
|
||||
"ANTHROPIC_API_KEY": "your_key_here"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
For complete details on all available tools, configuration examples, and usage guidelines, see the [MCP Tools documentation](https://docs.task-master.dev/capabilities/mcp#configurable-tool-loading).
|
||||
5
.changeset/metal-rocks-help.md
Normal file
5
.changeset/metal-rocks-help.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
Improve next command to work with remote
|
||||
32
.claude-plugin/marketplace.json
Normal file
32
.claude-plugin/marketplace.json
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"name": "taskmaster",
|
||||
"owner": {
|
||||
"name": "Hamster",
|
||||
"email": "ralph@tryhamster.com"
|
||||
},
|
||||
"metadata": {
|
||||
"description": "Official marketplace for Taskmaster AI - AI-powered task management for ambitious development",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
"plugins": [
|
||||
{
|
||||
"name": "taskmaster",
|
||||
"source": "./packages/claude-code-plugin",
|
||||
"description": "AI-powered task management system for ambitious development workflows with intelligent orchestration, complexity analysis, and automated coordination",
|
||||
"author": {
|
||||
"name": "Hamster"
|
||||
},
|
||||
"homepage": "https://github.com/eyaltoledano/claude-task-master",
|
||||
"repository": "https://github.com/eyaltoledano/claude-task-master",
|
||||
"keywords": [
|
||||
"task-management",
|
||||
"ai",
|
||||
"workflow",
|
||||
"orchestration",
|
||||
"automation",
|
||||
"mcp"
|
||||
],
|
||||
"category": "productivity"
|
||||
}
|
||||
]
|
||||
}
|
||||
38
.claude/commands/dedupe.md
Normal file
38
.claude/commands/dedupe.md
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
allowed-tools: Bash(gh issue view:*), Bash(gh search:*), Bash(gh issue list:*), Bash(gh api:*), Bash(gh issue comment:*)
|
||||
description: Find duplicate GitHub issues
|
||||
---
|
||||
|
||||
Find up to 3 likely duplicate issues for a given GitHub issue.
|
||||
|
||||
To do this, follow these steps precisely:
|
||||
|
||||
1. Use an agent to check if the Github issue (a) is closed, (b) does not need to be deduped (eg. because it is broad product feedback without a specific solution, or positive feedback), or (c) already has a duplicates comment that you made earlier. If so, do not proceed.
|
||||
2. Use an agent to view a Github issue, and ask the agent to return a summary of the issue
|
||||
3. Then, launch 5 parallel agents to search Github for duplicates of this issue, using diverse keywords and search approaches, using the summary from #1
|
||||
4. Next, feed the results from #1 and #2 into another agent, so that it can filter out false positives, that are likely not actually duplicates of the original issue. If there are no duplicates remaining, do not proceed.
|
||||
5. Finally, comment back on the issue with a list of up to three duplicate issues (or zero, if there are no likely duplicates)
|
||||
|
||||
Notes (be sure to tell this to your agents, too):
|
||||
|
||||
- Use `gh` to interact with Github, rather than web fetch
|
||||
- Do not use other tools, beyond `gh` (eg. don't use other MCP servers, file edit, etc.)
|
||||
- Make a todo list first
|
||||
- For your comment, follow the following format precisely (assuming for this example that you found 3 suspected duplicates):
|
||||
|
||||
---
|
||||
|
||||
Found 3 possible duplicate issues:
|
||||
|
||||
1. <link to issue>
|
||||
2. <link to issue>
|
||||
3. <link to issue>
|
||||
|
||||
This issue will be automatically closed as a duplicate in 3 days.
|
||||
|
||||
- If your issue is a duplicate, please close it and 👍 the existing issue instead
|
||||
- To prevent auto-closure, add a comment or 👎 this comment
|
||||
|
||||
🤖 Generated with \[Task Master Bot\]
|
||||
|
||||
---
|
||||
@@ -1,55 +0,0 @@
|
||||
Add a dependency between tasks.
|
||||
|
||||
Arguments: $ARGUMENTS
|
||||
|
||||
Parse the task IDs to establish dependency relationship.
|
||||
|
||||
## Adding Dependencies
|
||||
|
||||
Creates a dependency where one task must be completed before another can start.
|
||||
|
||||
## Argument Parsing
|
||||
|
||||
Parse natural language or IDs:
|
||||
- "make 5 depend on 3" → task 5 depends on task 3
|
||||
- "5 needs 3" → task 5 depends on task 3
|
||||
- "5 3" → task 5 depends on task 3
|
||||
- "5 after 3" → task 5 depends on task 3
|
||||
|
||||
## Execution
|
||||
|
||||
```bash
|
||||
task-master add-dependency --id=<task-id> --depends-on=<dependency-id>
|
||||
```
|
||||
|
||||
## Validation
|
||||
|
||||
Before adding:
|
||||
1. **Verify both tasks exist**
|
||||
2. **Check for circular dependencies**
|
||||
3. **Ensure dependency makes logical sense**
|
||||
4. **Warn if creating complex chains**
|
||||
|
||||
## Smart Features
|
||||
|
||||
- Detect if dependency already exists
|
||||
- Suggest related dependencies
|
||||
- Show impact on task flow
|
||||
- Update task priorities if needed
|
||||
|
||||
## Post-Addition
|
||||
|
||||
After adding dependency:
|
||||
1. Show updated dependency graph
|
||||
2. Identify any newly blocked tasks
|
||||
3. Suggest task order changes
|
||||
4. Update project timeline
|
||||
|
||||
## Example Flows
|
||||
|
||||
```
|
||||
/project:tm/add-dependency 5 needs 3
|
||||
→ Task #5 now depends on Task #3
|
||||
→ Task #5 is now blocked until #3 completes
|
||||
→ Suggested: Also consider if #5 needs #4
|
||||
```
|
||||
@@ -1,71 +0,0 @@
|
||||
Convert an existing task into a subtask.
|
||||
|
||||
Arguments: $ARGUMENTS
|
||||
|
||||
Parse parent ID and task ID to convert.
|
||||
|
||||
## Task Conversion
|
||||
|
||||
Converts an existing standalone task into a subtask of another task.
|
||||
|
||||
## Argument Parsing
|
||||
|
||||
- "move task 8 under 5"
|
||||
- "make 8 a subtask of 5"
|
||||
- "nest 8 in 5"
|
||||
- "5 8" → make task 8 a subtask of task 5
|
||||
|
||||
## Execution
|
||||
|
||||
```bash
|
||||
task-master add-subtask --parent=<parent-id> --task-id=<task-to-convert>
|
||||
```
|
||||
|
||||
## Pre-Conversion Checks
|
||||
|
||||
1. **Validation**
|
||||
- Both tasks exist and are valid
|
||||
- No circular parent relationships
|
||||
- Task isn't already a subtask
|
||||
- Logical hierarchy makes sense
|
||||
|
||||
2. **Impact Analysis**
|
||||
- Dependencies that will be affected
|
||||
- Tasks that depend on converting task
|
||||
- Priority alignment needed
|
||||
- Status compatibility
|
||||
|
||||
## Conversion Process
|
||||
|
||||
1. Change task ID from "8" to "5.1" (next available)
|
||||
2. Update all dependency references
|
||||
3. Inherit parent's context where appropriate
|
||||
4. Adjust priorities if needed
|
||||
5. Update time estimates
|
||||
|
||||
## Smart Features
|
||||
|
||||
- Preserve task history
|
||||
- Maintain dependencies
|
||||
- Update all references
|
||||
- Create conversion log
|
||||
|
||||
## Example
|
||||
|
||||
```
|
||||
/project:tm/add-subtask/from-task 5 8
|
||||
→ Converting: Task #8 becomes subtask #5.1
|
||||
→ Updated: 3 dependency references
|
||||
→ Parent task #5 now has 1 subtask
|
||||
→ Note: Subtask inherits parent's priority
|
||||
|
||||
Before: #8 "Implement validation" (standalone)
|
||||
After: #5.1 "Implement validation" (subtask of #5)
|
||||
```
|
||||
|
||||
## Post-Conversion
|
||||
|
||||
- Show new task hierarchy
|
||||
- List updated dependencies
|
||||
- Verify project integrity
|
||||
- Suggest related conversions
|
||||
@@ -1,76 +0,0 @@
|
||||
Add a subtask to a parent task.
|
||||
|
||||
Arguments: $ARGUMENTS
|
||||
|
||||
Parse arguments to create a new subtask or convert existing task.
|
||||
|
||||
## Adding Subtasks
|
||||
|
||||
Creates subtasks to break down complex parent tasks into manageable pieces.
|
||||
|
||||
## Argument Parsing
|
||||
|
||||
Flexible natural language:
|
||||
- "add subtask to 5: implement login form"
|
||||
- "break down 5 with: setup, implement, test"
|
||||
- "subtask for 5: handle edge cases"
|
||||
- "5: validate user input" → adds subtask to task 5
|
||||
|
||||
## Execution Modes
|
||||
|
||||
### 1. Create New Subtask
|
||||
```bash
|
||||
task-master add-subtask --parent=<id> --title="<title>" --description="<desc>"
|
||||
```
|
||||
|
||||
### 2. Convert Existing Task
|
||||
```bash
|
||||
task-master add-subtask --parent=<id> --task-id=<existing-id>
|
||||
```
|
||||
|
||||
## Smart Features
|
||||
|
||||
1. **Automatic Subtask Generation**
|
||||
- If title contains "and" or commas, create multiple
|
||||
- Suggest common subtask patterns
|
||||
- Inherit parent's context
|
||||
|
||||
2. **Intelligent Defaults**
|
||||
- Priority based on parent
|
||||
- Appropriate time estimates
|
||||
- Logical dependencies between subtasks
|
||||
|
||||
3. **Validation**
|
||||
- Check parent task complexity
|
||||
- Warn if too many subtasks
|
||||
- Ensure subtask makes sense
|
||||
|
||||
## Creation Process
|
||||
|
||||
1. Parse parent task context
|
||||
2. Generate subtask with ID like "5.1"
|
||||
3. Set appropriate defaults
|
||||
4. Link to parent task
|
||||
5. Update parent's time estimate
|
||||
|
||||
## Example Flows
|
||||
|
||||
```
|
||||
/project:tm/add-subtask to 5: implement user authentication
|
||||
→ Created subtask #5.1: "implement user authentication"
|
||||
→ Parent task #5 now has 1 subtask
|
||||
→ Suggested next subtasks: tests, documentation
|
||||
|
||||
/project:tm/add-subtask 5: setup, implement, test
|
||||
→ Created 3 subtasks:
|
||||
#5.1: setup
|
||||
#5.2: implement
|
||||
#5.3: test
|
||||
```
|
||||
|
||||
## Post-Creation
|
||||
|
||||
- Show updated task hierarchy
|
||||
- Suggest logical next subtasks
|
||||
- Update complexity estimates
|
||||
- Recommend subtask order
|
||||
@@ -1,78 +0,0 @@
|
||||
Add new tasks with intelligent parsing and context awareness.
|
||||
|
||||
Arguments: $ARGUMENTS
|
||||
|
||||
## Smart Task Addition
|
||||
|
||||
Parse natural language to create well-structured tasks.
|
||||
|
||||
### 1. **Input Understanding**
|
||||
|
||||
I'll intelligently parse your request:
|
||||
- Natural language → Structured task
|
||||
- Detect priority from keywords (urgent, ASAP, important)
|
||||
- Infer dependencies from context
|
||||
- Suggest complexity based on description
|
||||
- Determine task type (feature, bug, refactor, test, docs)
|
||||
|
||||
### 2. **Smart Parsing Examples**
|
||||
|
||||
**"Add urgent task to fix login bug"**
|
||||
→ Title: Fix login bug
|
||||
→ Priority: high
|
||||
→ Type: bug
|
||||
→ Suggested complexity: medium
|
||||
|
||||
**"Create task for API documentation after task 23 is done"**
|
||||
→ Title: API documentation
|
||||
→ Dependencies: [23]
|
||||
→ Type: documentation
|
||||
→ Priority: medium
|
||||
|
||||
**"Need to refactor auth module - depends on 12 and 15, high complexity"**
|
||||
→ Title: Refactor auth module
|
||||
→ Dependencies: [12, 15]
|
||||
→ Complexity: high
|
||||
→ Type: refactor
|
||||
|
||||
### 3. **Context Enhancement**
|
||||
|
||||
Based on current project state:
|
||||
- Suggest related existing tasks
|
||||
- Warn about potential conflicts
|
||||
- Recommend dependencies
|
||||
- Propose subtasks if complex
|
||||
|
||||
### 4. **Interactive Refinement**
|
||||
|
||||
```yaml
|
||||
Task Preview:
|
||||
─────────────
|
||||
Title: [Extracted title]
|
||||
Priority: [Inferred priority]
|
||||
Dependencies: [Detected dependencies]
|
||||
Complexity: [Estimated complexity]
|
||||
|
||||
Suggestions:
|
||||
- Similar task #34 exists, consider as dependency?
|
||||
- This seems complex, break into subtasks?
|
||||
- Tasks #45-47 work on same module
|
||||
```
|
||||
|
||||
### 5. **Validation & Creation**
|
||||
|
||||
Before creating:
|
||||
- Validate dependencies exist
|
||||
- Check for duplicates
|
||||
- Ensure logical ordering
|
||||
- Verify task completeness
|
||||
|
||||
### 6. **Smart Defaults**
|
||||
|
||||
Intelligent defaults based on:
|
||||
- Task type patterns
|
||||
- Team conventions
|
||||
- Historical data
|
||||
- Current sprint/phase
|
||||
|
||||
Result: High-quality tasks from minimal input.
|
||||
@@ -1,121 +0,0 @@
|
||||
Analyze task complexity and generate expansion recommendations.
|
||||
|
||||
Arguments: $ARGUMENTS
|
||||
|
||||
Perform deep analysis of task complexity across the project.
|
||||
|
||||
## Complexity Analysis
|
||||
|
||||
Uses AI to analyze tasks and recommend which ones need breakdown.
|
||||
|
||||
## Execution Options
|
||||
|
||||
```bash
|
||||
task-master analyze-complexity [--research] [--threshold=5]
|
||||
```
|
||||
|
||||
## Analysis Parameters
|
||||
|
||||
- `--research` → Use research AI for deeper analysis
|
||||
- `--threshold=5` → Only flag tasks above complexity 5
|
||||
- Default: Analyze all pending tasks
|
||||
|
||||
## Analysis Process
|
||||
|
||||
### 1. **Task Evaluation**
|
||||
For each task, AI evaluates:
|
||||
- Technical complexity
|
||||
- Time requirements
|
||||
- Dependency complexity
|
||||
- Risk factors
|
||||
- Knowledge requirements
|
||||
|
||||
### 2. **Complexity Scoring**
|
||||
Assigns score 1-10 based on:
|
||||
- Implementation difficulty
|
||||
- Integration challenges
|
||||
- Testing requirements
|
||||
- Unknown factors
|
||||
- Technical debt risk
|
||||
|
||||
### 3. **Recommendations**
|
||||
For complex tasks:
|
||||
- Suggest expansion approach
|
||||
- Recommend subtask breakdown
|
||||
- Identify risk areas
|
||||
- Propose mitigation strategies
|
||||
|
||||
## Smart Analysis Features
|
||||
|
||||
1. **Pattern Recognition**
|
||||
- Similar task comparisons
|
||||
- Historical complexity accuracy
|
||||
- Team velocity consideration
|
||||
- Technology stack factors
|
||||
|
||||
2. **Contextual Factors**
|
||||
- Team expertise
|
||||
- Available resources
|
||||
- Timeline constraints
|
||||
- Business criticality
|
||||
|
||||
3. **Risk Assessment**
|
||||
- Technical risks
|
||||
- Timeline risks
|
||||
- Dependency risks
|
||||
- Knowledge gaps
|
||||
|
||||
## Output Format
|
||||
|
||||
```
|
||||
Task Complexity Analysis Report
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
High Complexity Tasks (>7):
|
||||
📍 #5 "Implement real-time sync" - Score: 9/10
|
||||
Factors: WebSocket complexity, state management, conflict resolution
|
||||
Recommendation: Expand into 5-7 subtasks
|
||||
Risks: Performance, data consistency
|
||||
|
||||
📍 #12 "Migrate database schema" - Score: 8/10
|
||||
Factors: Data migration, zero downtime, rollback strategy
|
||||
Recommendation: Expand into 4-5 subtasks
|
||||
Risks: Data loss, downtime
|
||||
|
||||
Medium Complexity Tasks (5-7):
|
||||
📍 #23 "Add export functionality" - Score: 6/10
|
||||
Consider expansion if timeline tight
|
||||
|
||||
Low Complexity Tasks (<5):
|
||||
✅ 15 tasks - No expansion needed
|
||||
|
||||
Summary:
|
||||
- Expand immediately: 2 tasks
|
||||
- Consider expanding: 5 tasks
|
||||
- Keep as-is: 15 tasks
|
||||
```
|
||||
|
||||
## Actionable Output
|
||||
|
||||
For each high-complexity task:
|
||||
1. Complexity score with reasoning
|
||||
2. Specific expansion suggestions
|
||||
3. Risk mitigation approaches
|
||||
4. Recommended subtask structure
|
||||
|
||||
## Integration
|
||||
|
||||
Results are:
|
||||
- Saved to `.taskmaster/reports/complexity-analysis.md`
|
||||
- Used by expand command
|
||||
- Inform sprint planning
|
||||
- Guide resource allocation
|
||||
|
||||
## Next Steps
|
||||
|
||||
After analysis:
|
||||
```
|
||||
/project:tm/expand 5 # Expand specific task
|
||||
/project:tm/expand/all # Expand all recommended
|
||||
/project:tm/complexity-report # View detailed report
|
||||
```
|
||||
@@ -1,86 +0,0 @@
|
||||
Clear all subtasks from a specific task.
|
||||
|
||||
Arguments: $ARGUMENTS (task ID)
|
||||
|
||||
Remove all subtasks from a parent task at once.
|
||||
|
||||
## Clearing Subtasks
|
||||
|
||||
Bulk removal of all subtasks from a parent task.
|
||||
|
||||
## Execution
|
||||
|
||||
```bash
|
||||
task-master clear-subtasks --id=<task-id>
|
||||
```
|
||||
|
||||
## Pre-Clear Analysis
|
||||
|
||||
1. **Subtask Summary**
|
||||
- Number of subtasks
|
||||
- Completion status of each
|
||||
- Work already done
|
||||
- Dependencies affected
|
||||
|
||||
2. **Impact Assessment**
|
||||
- Data that will be lost
|
||||
- Dependencies to be removed
|
||||
- Effect on project timeline
|
||||
- Parent task implications
|
||||
|
||||
## Confirmation Required
|
||||
|
||||
```
|
||||
Clear Subtasks Confirmation
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
Parent Task: #5 "Implement user authentication"
|
||||
Subtasks to remove: 4
|
||||
- #5.1 "Setup auth framework" (done)
|
||||
- #5.2 "Create login form" (in-progress)
|
||||
- #5.3 "Add validation" (pending)
|
||||
- #5.4 "Write tests" (pending)
|
||||
|
||||
⚠️ This will permanently delete all subtask data
|
||||
Continue? (y/n)
|
||||
```
|
||||
|
||||
## Smart Features
|
||||
|
||||
- Option to convert to standalone tasks
|
||||
- Backup task data before clearing
|
||||
- Preserve completed work history
|
||||
- Update parent task appropriately
|
||||
|
||||
## Process
|
||||
|
||||
1. List all subtasks for confirmation
|
||||
2. Check for in-progress work
|
||||
3. Remove all subtasks
|
||||
4. Update parent task
|
||||
5. Clean up dependencies
|
||||
|
||||
## Alternative Options
|
||||
|
||||
Suggest alternatives:
|
||||
- Convert important subtasks to tasks
|
||||
- Keep completed subtasks
|
||||
- Archive instead of delete
|
||||
- Export subtask data first
|
||||
|
||||
## Post-Clear
|
||||
|
||||
- Show updated parent task
|
||||
- Recalculate time estimates
|
||||
- Update task complexity
|
||||
- Suggest next steps
|
||||
|
||||
## Example
|
||||
|
||||
```
|
||||
/project:tm/clear-subtasks 5
|
||||
→ Found 4 subtasks to remove
|
||||
→ Warning: Subtask #5.2 is in-progress
|
||||
→ Cleared all subtasks from task #5
|
||||
→ Updated parent task estimates
|
||||
→ Suggestion: Consider re-expanding with better breakdown
|
||||
```
|
||||
@@ -1,117 +0,0 @@
|
||||
Display the task complexity analysis report.
|
||||
|
||||
Arguments: $ARGUMENTS
|
||||
|
||||
View the detailed complexity analysis generated by analyze-complexity command.
|
||||
|
||||
## Viewing Complexity Report
|
||||
|
||||
Shows comprehensive task complexity analysis with actionable insights.
|
||||
|
||||
## Execution
|
||||
|
||||
```bash
|
||||
task-master complexity-report [--file=<path>]
|
||||
```
|
||||
|
||||
## Report Location
|
||||
|
||||
Default: `.taskmaster/reports/complexity-analysis.md`
|
||||
Custom: Specify with --file parameter
|
||||
|
||||
## Report Contents
|
||||
|
||||
### 1. **Executive Summary**
|
||||
```
|
||||
Complexity Analysis Summary
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
Analysis Date: 2024-01-15
|
||||
Tasks Analyzed: 32
|
||||
High Complexity: 5 (16%)
|
||||
Medium Complexity: 12 (37%)
|
||||
Low Complexity: 15 (47%)
|
||||
|
||||
Critical Findings:
|
||||
- 5 tasks need immediate expansion
|
||||
- 3 tasks have high technical risk
|
||||
- 2 tasks block critical path
|
||||
```
|
||||
|
||||
### 2. **Detailed Task Analysis**
|
||||
For each complex task:
|
||||
- Complexity score breakdown
|
||||
- Contributing factors
|
||||
- Specific risks identified
|
||||
- Expansion recommendations
|
||||
- Similar completed tasks
|
||||
|
||||
### 3. **Risk Matrix**
|
||||
Visual representation:
|
||||
```
|
||||
Risk vs Complexity Matrix
|
||||
━━━━━━━━━━━━━━━━━━━━━━━
|
||||
High Risk | #5(9) #12(8) | #23(6)
|
||||
Med Risk | #34(7) | #45(5) #67(5)
|
||||
Low Risk | #78(8) | [15 tasks]
|
||||
| High Complex | Med Complex
|
||||
```
|
||||
|
||||
### 4. **Recommendations**
|
||||
|
||||
**Immediate Actions:**
|
||||
1. Expand task #5 - Critical path + high complexity
|
||||
2. Expand task #12 - High risk + dependencies
|
||||
3. Review task #34 - Consider splitting
|
||||
|
||||
**Sprint Planning:**
|
||||
- Don't schedule multiple high-complexity tasks together
|
||||
- Ensure expertise available for complex tasks
|
||||
- Build in buffer time for unknowns
|
||||
|
||||
## Interactive Features
|
||||
|
||||
When viewing report:
|
||||
1. **Quick Actions**
|
||||
- Press 'e' to expand a task
|
||||
- Press 'd' for task details
|
||||
- Press 'r' to refresh analysis
|
||||
|
||||
2. **Filtering**
|
||||
- View by complexity level
|
||||
- Filter by risk factors
|
||||
- Show only actionable items
|
||||
|
||||
3. **Export Options**
|
||||
- Markdown format
|
||||
- CSV for spreadsheets
|
||||
- JSON for tools
|
||||
|
||||
## Report Intelligence
|
||||
|
||||
- Compares with historical data
|
||||
- Shows complexity trends
|
||||
- Identifies patterns
|
||||
- Suggests process improvements
|
||||
|
||||
## Integration
|
||||
|
||||
Use report for:
|
||||
- Sprint planning sessions
|
||||
- Resource allocation
|
||||
- Risk assessment
|
||||
- Team discussions
|
||||
- Client updates
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
/project:tm/complexity-report
|
||||
→ Opens latest analysis
|
||||
|
||||
/project:tm/complexity-report --file=archived/2024-01-01.md
|
||||
→ View historical analysis
|
||||
|
||||
After viewing:
|
||||
/project:tm/expand 5
|
||||
→ Expand high-complexity task
|
||||
```
|
||||
@@ -1,51 +0,0 @@
|
||||
Expand all pending tasks that need subtasks.
|
||||
|
||||
## Bulk Task Expansion
|
||||
|
||||
Intelligently expands all tasks that would benefit from breakdown.
|
||||
|
||||
## Execution
|
||||
|
||||
```bash
|
||||
task-master expand --all
|
||||
```
|
||||
|
||||
## Smart Selection
|
||||
|
||||
Only expands tasks that:
|
||||
- Are marked as pending
|
||||
- Have high complexity (>5)
|
||||
- Lack existing subtasks
|
||||
- Would benefit from breakdown
|
||||
|
||||
## Expansion Process
|
||||
|
||||
1. **Analysis Phase**
|
||||
- Identify expansion candidates
|
||||
- Group related tasks
|
||||
- Plan expansion strategy
|
||||
|
||||
2. **Batch Processing**
|
||||
- Expand tasks in logical order
|
||||
- Maintain consistency
|
||||
- Preserve relationships
|
||||
- Optimize for parallelism
|
||||
|
||||
3. **Quality Control**
|
||||
- Ensure subtask quality
|
||||
- Avoid over-decomposition
|
||||
- Maintain task coherence
|
||||
- Update dependencies
|
||||
|
||||
## Options
|
||||
|
||||
- Add `force` to expand all regardless of complexity
|
||||
- Add `research` for enhanced AI analysis
|
||||
|
||||
## Results
|
||||
|
||||
After bulk expansion:
|
||||
- Summary of tasks expanded
|
||||
- New subtask count
|
||||
- Updated complexity metrics
|
||||
- Suggested task order
|
||||
@@ -1,49 +0,0 @@
|
||||
Break down a complex task into subtasks.
|
||||
|
||||
Arguments: $ARGUMENTS (task ID)
|
||||
|
||||
## Intelligent Task Expansion
|
||||
|
||||
Analyzes a task and creates detailed subtasks for better manageability.
|
||||
|
||||
## Execution
|
||||
|
||||
```bash
|
||||
task-master expand --id=$ARGUMENTS
|
||||
```
|
||||
|
||||
## Expansion Process
|
||||
|
||||
1. **Task Analysis**
|
||||
- Review task complexity
|
||||
- Identify components
|
||||
- Detect technical challenges
|
||||
- Estimate time requirements
|
||||
|
||||
2. **Subtask Generation**
|
||||
- Create 3-7 subtasks typically
|
||||
- Each subtask 1-4 hours
|
||||
- Logical implementation order
|
||||
- Clear acceptance criteria
|
||||
|
||||
3. **Smart Breakdown**
|
||||
- Setup/configuration tasks
|
||||
- Core implementation
|
||||
- Testing components
|
||||
- Integration steps
|
||||
- Documentation updates
|
||||
|
||||
## Enhanced Features
|
||||
|
||||
Based on task type:
|
||||
- **Feature**: Setup → Implement → Test → Integrate
|
||||
- **Bug Fix**: Reproduce → Diagnose → Fix → Verify
|
||||
- **Refactor**: Analyze → Plan → Refactor → Validate
|
||||
|
||||
## Post-Expansion
|
||||
|
||||
After expansion:
|
||||
1. Show subtask hierarchy
|
||||
2. Update time estimates
|
||||
3. Suggest implementation order
|
||||
4. Highlight critical path
|
||||
@@ -1,81 +0,0 @@
|
||||
Automatically fix dependency issues found during validation.
|
||||
|
||||
## Automatic Dependency Repair
|
||||
|
||||
Intelligently fixes common dependency problems while preserving project logic.
|
||||
|
||||
## Execution
|
||||
|
||||
```bash
|
||||
task-master fix-dependencies
|
||||
```
|
||||
|
||||
## What Gets Fixed
|
||||
|
||||
### 1. **Auto-Fixable Issues**
|
||||
- Remove references to deleted tasks
|
||||
- Break simple circular dependencies
|
||||
- Remove self-dependencies
|
||||
- Clean up duplicate dependencies
|
||||
|
||||
### 2. **Smart Resolutions**
|
||||
- Reorder dependencies to maintain logic
|
||||
- Suggest task merging for over-dependent tasks
|
||||
- Flatten unnecessary dependency chains
|
||||
- Remove redundant transitive dependencies
|
||||
|
||||
### 3. **Manual Review Required**
|
||||
- Complex circular dependencies
|
||||
- Critical path modifications
|
||||
- Business logic dependencies
|
||||
- High-impact changes
|
||||
|
||||
## Fix Process
|
||||
|
||||
1. **Analysis Phase**
|
||||
- Run validation check
|
||||
- Categorize issues by type
|
||||
- Determine fix strategy
|
||||
|
||||
2. **Execution Phase**
|
||||
- Apply automatic fixes
|
||||
- Log all changes made
|
||||
- Preserve task relationships
|
||||
|
||||
3. **Verification Phase**
|
||||
- Re-validate after fixes
|
||||
- Show before/after comparison
|
||||
- Highlight manual fixes needed
|
||||
|
||||
## Smart Features
|
||||
|
||||
- Preserves intended task flow
|
||||
- Minimal disruption approach
|
||||
- Creates fix history/log
|
||||
- Suggests manual interventions
|
||||
|
||||
## Output Example
|
||||
|
||||
```
|
||||
Dependency Auto-Fix Report
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
Fixed Automatically:
|
||||
✅ Removed 2 references to deleted tasks
|
||||
✅ Resolved 1 self-dependency
|
||||
✅ Cleaned 3 redundant dependencies
|
||||
|
||||
Manual Review Needed:
|
||||
⚠️ Complex circular dependency: #12 → #15 → #18 → #12
|
||||
Suggestion: Make #15 not depend on #12
|
||||
⚠️ Task #45 has 8 dependencies
|
||||
Suggestion: Break into subtasks
|
||||
|
||||
Run '/project:tm/validate-dependencies' to verify fixes
|
||||
```
|
||||
|
||||
## Safety
|
||||
|
||||
- Preview mode available
|
||||
- Rollback capability
|
||||
- Change logging
|
||||
- No data loss
|
||||
@@ -1,121 +0,0 @@
|
||||
Generate individual task files from tasks.json.
|
||||
|
||||
## Task File Generation
|
||||
|
||||
Creates separate markdown files for each task, perfect for AI agents or documentation.
|
||||
|
||||
## Execution
|
||||
|
||||
```bash
|
||||
task-master generate
|
||||
```
|
||||
|
||||
## What It Creates
|
||||
|
||||
For each task, generates a file like `task_001.txt`:
|
||||
|
||||
```
|
||||
Task ID: 1
|
||||
Title: Implement user authentication
|
||||
Status: pending
|
||||
Priority: high
|
||||
Dependencies: []
|
||||
Created: 2024-01-15
|
||||
Complexity: 7
|
||||
|
||||
## Description
|
||||
Create a secure user authentication system with login, logout, and session management.
|
||||
|
||||
## Details
|
||||
- Use JWT tokens for session management
|
||||
- Implement secure password hashing
|
||||
- Add remember me functionality
|
||||
- Include password reset flow
|
||||
|
||||
## Test Strategy
|
||||
- Unit tests for auth functions
|
||||
- Integration tests for login flow
|
||||
- Security testing for vulnerabilities
|
||||
- Performance tests for concurrent logins
|
||||
|
||||
## Subtasks
|
||||
1.1 Setup authentication framework (pending)
|
||||
1.2 Create login endpoints (pending)
|
||||
1.3 Implement session management (pending)
|
||||
1.4 Add password reset (pending)
|
||||
```
|
||||
|
||||
## File Organization
|
||||
|
||||
Creates structure:
|
||||
```
|
||||
.taskmaster/
|
||||
└── tasks/
|
||||
├── task_001.txt
|
||||
├── task_002.txt
|
||||
├── task_003.txt
|
||||
└── ...
|
||||
```
|
||||
|
||||
## Smart Features
|
||||
|
||||
1. **Consistent Formatting**
|
||||
- Standardized structure
|
||||
- Clear sections
|
||||
- AI-readable format
|
||||
- Markdown compatible
|
||||
|
||||
2. **Contextual Information**
|
||||
- Full task details
|
||||
- Related task references
|
||||
- Progress indicators
|
||||
- Implementation notes
|
||||
|
||||
3. **Incremental Updates**
|
||||
- Only regenerate changed tasks
|
||||
- Preserve custom additions
|
||||
- Track generation timestamp
|
||||
- Version control friendly
|
||||
|
||||
## Use Cases
|
||||
|
||||
- **AI Context**: Provide task context to AI assistants
|
||||
- **Documentation**: Standalone task documentation
|
||||
- **Archival**: Task history preservation
|
||||
- **Sharing**: Send specific tasks to team members
|
||||
- **Review**: Easier task review process
|
||||
|
||||
## Generation Options
|
||||
|
||||
Based on arguments:
|
||||
- Filter by status
|
||||
- Include/exclude completed
|
||||
- Custom templates
|
||||
- Different formats
|
||||
|
||||
## Post-Generation
|
||||
|
||||
```
|
||||
Task File Generation Complete
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
Generated: 45 task files
|
||||
Location: .taskmaster/tasks/
|
||||
Total size: 156 KB
|
||||
|
||||
New files: 5
|
||||
Updated files: 12
|
||||
Unchanged: 28
|
||||
|
||||
Ready for:
|
||||
- AI agent consumption
|
||||
- Version control
|
||||
- Team distribution
|
||||
```
|
||||
|
||||
## Integration Benefits
|
||||
|
||||
- Git-trackable task history
|
||||
- Easy task sharing
|
||||
- AI tool compatibility
|
||||
- Offline task access
|
||||
- Backup redundancy
|
||||
@@ -1,81 +0,0 @@
|
||||
Show help for Task Master commands.
|
||||
|
||||
Arguments: $ARGUMENTS
|
||||
|
||||
Display help for Task Master commands. If arguments provided, show specific command help.
|
||||
|
||||
## Task Master Command Help
|
||||
|
||||
### Quick Navigation
|
||||
|
||||
Type `/project:tm/` and use tab completion to explore all commands.
|
||||
|
||||
### Command Categories
|
||||
|
||||
#### 🚀 Setup & Installation
|
||||
- `/project:tm/setup/install` - Comprehensive installation guide
|
||||
- `/project:tm/setup/quick-install` - One-line global install
|
||||
|
||||
#### 📋 Project Setup
|
||||
- `/project:tm/init` - Initialize new project
|
||||
- `/project:tm/init/quick` - Quick setup with auto-confirm
|
||||
- `/project:tm/models` - View AI configuration
|
||||
- `/project:tm/models/setup` - Configure AI providers
|
||||
|
||||
#### 🎯 Task Generation
|
||||
- `/project:tm/parse-prd` - Generate tasks from PRD
|
||||
- `/project:tm/parse-prd/with-research` - Enhanced parsing
|
||||
- `/project:tm/generate` - Create task files
|
||||
|
||||
#### 📝 Task Management
|
||||
- `/project:tm/list` - List tasks (natural language filters)
|
||||
- `/project:tm/show <id>` - Display task details
|
||||
- `/project:tm/add-task` - Create new task
|
||||
- `/project:tm/update` - Update tasks naturally
|
||||
- `/project:tm/next` - Get next task recommendation
|
||||
|
||||
#### 🔄 Status Management
|
||||
- `/project:tm/set-status/to-pending <id>`
|
||||
- `/project:tm/set-status/to-in-progress <id>`
|
||||
- `/project:tm/set-status/to-done <id>`
|
||||
- `/project:tm/set-status/to-review <id>`
|
||||
- `/project:tm/set-status/to-deferred <id>`
|
||||
- `/project:tm/set-status/to-cancelled <id>`
|
||||
|
||||
#### 🔍 Analysis & Breakdown
|
||||
- `/project:tm/analyze-complexity` - Analyze task complexity
|
||||
- `/project:tm/expand <id>` - Break down complex task
|
||||
- `/project:tm/expand/all` - Expand all eligible tasks
|
||||
|
||||
#### 🔗 Dependencies
|
||||
- `/project:tm/add-dependency` - Add task dependency
|
||||
- `/project:tm/remove-dependency` - Remove dependency
|
||||
- `/project:tm/validate-dependencies` - Check for issues
|
||||
|
||||
#### 🤖 Workflows
|
||||
- `/project:tm/workflows/smart-flow` - Intelligent workflows
|
||||
- `/project:tm/workflows/pipeline` - Command chaining
|
||||
- `/project:tm/workflows/auto-implement` - Auto-implementation
|
||||
|
||||
#### 📊 Utilities
|
||||
- `/project:tm/utils/analyze` - Project analysis
|
||||
- `/project:tm/status` - Project dashboard
|
||||
- `/project:tm/learn` - Interactive learning
|
||||
|
||||
### Natural Language Examples
|
||||
|
||||
```
|
||||
/project:tm/list pending high priority
|
||||
/project:tm/update mark all API tasks as done
|
||||
/project:tm/add-task create login system with OAuth
|
||||
/project:tm/show current
|
||||
```
|
||||
|
||||
### Getting Started
|
||||
|
||||
1. Install: `/project:tm/setup/quick-install`
|
||||
2. Initialize: `/project:tm/init/quick`
|
||||
3. Learn: `/project:tm/learn start`
|
||||
4. Work: `/project:tm/workflows/smart-flow`
|
||||
|
||||
For detailed command info: `/project:tm/help <command-name>`
|
||||
@@ -1,130 +0,0 @@
|
||||
# Task Master Command Reference
|
||||
|
||||
Comprehensive command structure for Task Master integration with Claude Code.
|
||||
|
||||
## Command Organization
|
||||
|
||||
Commands are organized hierarchically to match Task Master's CLI structure while providing enhanced Claude Code integration.
|
||||
|
||||
## Project Setup & Configuration
|
||||
|
||||
### `/project:tm/init`
|
||||
- `index` - Initialize new project (handles PRD files intelligently)
|
||||
- `quick` - Quick setup with auto-confirmation (-y flag)
|
||||
|
||||
### `/project:tm/models`
|
||||
- `index` - View current AI model configuration
|
||||
- `setup` - Interactive model configuration
|
||||
- `set-main` - Set primary generation model
|
||||
- `set-research` - Set research model
|
||||
- `set-fallback` - Set fallback model
|
||||
|
||||
## Task Generation
|
||||
|
||||
### `/project:tm/parse-prd`
|
||||
- `index` - Generate tasks from PRD document
|
||||
- `with-research` - Enhanced parsing with research mode
|
||||
|
||||
### `/project:tm/generate`
|
||||
- Create individual task files from tasks.json
|
||||
|
||||
## Task Management
|
||||
|
||||
### `/project:tm/list`
|
||||
- `index` - Smart listing with natural language filters
|
||||
- `with-subtasks` - Include subtasks in hierarchical view
|
||||
- `by-status` - Filter by specific status
|
||||
|
||||
### `/project:tm/set-status`
|
||||
- `to-pending` - Reset task to pending
|
||||
- `to-in-progress` - Start working on task
|
||||
- `to-done` - Mark task complete
|
||||
- `to-review` - Submit for review
|
||||
- `to-deferred` - Defer task
|
||||
- `to-cancelled` - Cancel task
|
||||
|
||||
### `/project:tm/sync-readme`
|
||||
- Export tasks to README.md with formatting
|
||||
|
||||
### `/project:tm/update`
|
||||
- `index` - Update tasks with natural language
|
||||
- `from-id` - Update multiple tasks from a starting point
|
||||
- `single` - Update specific task
|
||||
|
||||
### `/project:tm/add-task`
|
||||
- `index` - Add new task with AI assistance
|
||||
|
||||
### `/project:tm/remove-task`
|
||||
- `index` - Remove task with confirmation
|
||||
|
||||
## Subtask Management
|
||||
|
||||
### `/project:tm/add-subtask`
|
||||
- `index` - Add new subtask to parent
|
||||
- `from-task` - Convert existing task to subtask
|
||||
|
||||
### `/project:tm/remove-subtask`
|
||||
- Remove subtask (with optional conversion)
|
||||
|
||||
### `/project:tm/clear-subtasks`
|
||||
- `index` - Clear subtasks from specific task
|
||||
- `all` - Clear all subtasks globally
|
||||
|
||||
## Task Analysis & Breakdown
|
||||
|
||||
### `/project:tm/analyze-complexity`
|
||||
- Analyze and generate expansion recommendations
|
||||
|
||||
### `/project:tm/complexity-report`
|
||||
- Display complexity analysis report
|
||||
|
||||
### `/project:tm/expand`
|
||||
- `index` - Break down specific task
|
||||
- `all` - Expand all eligible tasks
|
||||
- `with-research` - Enhanced expansion
|
||||
|
||||
## Task Navigation
|
||||
|
||||
### `/project:tm/next`
|
||||
- Intelligent next task recommendation
|
||||
|
||||
### `/project:tm/show`
|
||||
- Display detailed task information
|
||||
|
||||
### `/project:tm/status`
|
||||
- Comprehensive project dashboard
|
||||
|
||||
## Dependency Management
|
||||
|
||||
### `/project:tm/add-dependency`
|
||||
- Add task dependency
|
||||
|
||||
### `/project:tm/remove-dependency`
|
||||
- Remove task dependency
|
||||
|
||||
### `/project:tm/validate-dependencies`
|
||||
- Check for dependency issues
|
||||
|
||||
### `/project:tm/fix-dependencies`
|
||||
- Automatically fix dependency problems
|
||||
|
||||
## Usage Patterns
|
||||
|
||||
### Natural Language
|
||||
Most commands accept natural language arguments:
|
||||
```
|
||||
/project:tm/add-task create user authentication system
|
||||
/project:tm/update mark all API tasks as high priority
|
||||
/project:tm/list show blocked tasks
|
||||
```
|
||||
|
||||
### ID-Based Commands
|
||||
Commands requiring IDs intelligently parse from $ARGUMENTS:
|
||||
```
|
||||
/project:tm/show 45
|
||||
/project:tm/expand 23
|
||||
/project:tm/set-status/to-done 67
|
||||
```
|
||||
|
||||
### Smart Defaults
|
||||
Commands provide intelligent defaults and suggestions based on context.
|
||||
@@ -1,50 +0,0 @@
|
||||
Initialize a new Task Master project.
|
||||
|
||||
Arguments: $ARGUMENTS
|
||||
|
||||
Parse arguments to determine initialization preferences.
|
||||
|
||||
## Initialization Process
|
||||
|
||||
1. **Parse Arguments**
|
||||
- PRD file path (if provided)
|
||||
- Project name
|
||||
- Auto-confirm flag (-y)
|
||||
|
||||
2. **Project Setup**
|
||||
```bash
|
||||
task-master init
|
||||
```
|
||||
|
||||
3. **Smart Initialization**
|
||||
- Detect existing project files
|
||||
- Suggest project name from directory
|
||||
- Check for git repository
|
||||
- Verify AI provider configuration
|
||||
|
||||
## Configuration Options
|
||||
|
||||
Based on arguments:
|
||||
- `quick` / `-y` → Skip confirmations
|
||||
- `<file.md>` → Use as PRD after init
|
||||
- `--name=<name>` → Set project name
|
||||
- `--description=<desc>` → Set description
|
||||
|
||||
## Post-Initialization
|
||||
|
||||
After successful init:
|
||||
1. Show project structure created
|
||||
2. Verify AI models configured
|
||||
3. Suggest next steps:
|
||||
- Parse PRD if available
|
||||
- Configure AI providers
|
||||
- Set up git hooks
|
||||
- Create first tasks
|
||||
|
||||
## Integration
|
||||
|
||||
If PRD file provided:
|
||||
```
|
||||
/project:tm/init my-prd.md
|
||||
→ Automatically runs parse-prd after init
|
||||
```
|
||||
@@ -1,46 +0,0 @@
|
||||
Quick initialization with auto-confirmation.
|
||||
|
||||
Arguments: $ARGUMENTS
|
||||
|
||||
Initialize a Task Master project without prompts, accepting all defaults.
|
||||
|
||||
## Quick Setup
|
||||
|
||||
```bash
|
||||
task-master init -y
|
||||
```
|
||||
|
||||
## What It Does
|
||||
|
||||
1. Creates `.taskmaster/` directory structure
|
||||
2. Initializes empty `tasks.json`
|
||||
3. Sets up default configuration
|
||||
4. Uses directory name as project name
|
||||
5. Skips all confirmation prompts
|
||||
|
||||
## Smart Defaults
|
||||
|
||||
- Project name: Current directory name
|
||||
- Description: "Task Master Project"
|
||||
- Model config: Existing environment vars
|
||||
- Task structure: Standard format
|
||||
|
||||
## Next Steps
|
||||
|
||||
After quick init:
|
||||
1. Configure AI models if needed:
|
||||
```
|
||||
/project:tm/models/setup
|
||||
```
|
||||
|
||||
2. Parse PRD if available:
|
||||
```
|
||||
/project:tm/parse-prd <file>
|
||||
```
|
||||
|
||||
3. Or create first task:
|
||||
```
|
||||
/project:tm/add-task create initial setup
|
||||
```
|
||||
|
||||
Perfect for rapid project setup!
|
||||
@@ -1,39 +0,0 @@
|
||||
List tasks filtered by a specific status.
|
||||
|
||||
Arguments: $ARGUMENTS
|
||||
|
||||
Parse the status from arguments and list only tasks matching that status.
|
||||
|
||||
## Status Options
|
||||
- `pending` - Not yet started
|
||||
- `in-progress` - Currently being worked on
|
||||
- `done` - Completed
|
||||
- `review` - Awaiting review
|
||||
- `deferred` - Postponed
|
||||
- `cancelled` - Cancelled
|
||||
|
||||
## Execution
|
||||
|
||||
Based on $ARGUMENTS, run:
|
||||
```bash
|
||||
task-master list --status=$ARGUMENTS
|
||||
```
|
||||
|
||||
## Enhanced Display
|
||||
|
||||
For the filtered results:
|
||||
- Group by priority within the status
|
||||
- Show time in current status
|
||||
- Highlight tasks approaching deadlines
|
||||
- Display blockers and dependencies
|
||||
- Suggest next actions for each status group
|
||||
|
||||
## Intelligent Insights
|
||||
|
||||
Based on the status filter:
|
||||
- **Pending**: Show recommended start order
|
||||
- **In-Progress**: Display idle time warnings
|
||||
- **Done**: Show newly unblocked tasks
|
||||
- **Review**: Indicate review duration
|
||||
- **Deferred**: Show reactivation criteria
|
||||
- **Cancelled**: Display impact analysis
|
||||
@@ -1,43 +0,0 @@
|
||||
List tasks with intelligent argument parsing.
|
||||
|
||||
Parse arguments to determine filters and display options:
|
||||
- Status: pending, in-progress, done, review, deferred, cancelled
|
||||
- Priority: high, medium, low (or priority:high)
|
||||
- Special: subtasks, tree, dependencies, blocked
|
||||
- IDs: Direct numbers (e.g., "1,3,5" or "1-5")
|
||||
- Complex: "pending high" = pending AND high priority
|
||||
|
||||
Arguments: $ARGUMENTS
|
||||
|
||||
Let me parse your request intelligently:
|
||||
|
||||
1. **Detect Filter Intent**
|
||||
- If arguments contain status keywords → filter by status
|
||||
- If arguments contain priority → filter by priority
|
||||
- If arguments contain "subtasks" → include subtasks
|
||||
- If arguments contain "tree" → hierarchical view
|
||||
- If arguments contain numbers → show specific tasks
|
||||
- If arguments contain "blocked" → show blocked tasks only
|
||||
|
||||
2. **Smart Combinations**
|
||||
Examples of what I understand:
|
||||
- "pending high" → pending tasks with high priority
|
||||
- "done today" → tasks completed today
|
||||
- "blocked" → tasks with unmet dependencies
|
||||
- "1-5" → tasks 1 through 5
|
||||
- "subtasks tree" → hierarchical view with subtasks
|
||||
|
||||
3. **Execute Appropriate Query**
|
||||
Based on parsed intent, run the most specific task-master command
|
||||
|
||||
4. **Enhanced Display**
|
||||
- Group by relevant criteria
|
||||
- Show most important information first
|
||||
- Use visual indicators for quick scanning
|
||||
- Include relevant metrics
|
||||
|
||||
5. **Intelligent Suggestions**
|
||||
Based on what you're viewing, suggest next actions:
|
||||
- Many pending? → Suggest priority order
|
||||
- Many blocked? → Show dependency resolution
|
||||
- Looking at specific tasks? → Show related tasks
|
||||
@@ -1,29 +0,0 @@
|
||||
List all tasks including their subtasks in a hierarchical view.
|
||||
|
||||
This command shows all tasks with their nested subtasks, providing a complete project overview.
|
||||
|
||||
## Execution
|
||||
|
||||
Run the Task Master list command with subtasks flag:
|
||||
```bash
|
||||
task-master list --with-subtasks
|
||||
```
|
||||
|
||||
## Enhanced Display
|
||||
|
||||
I'll organize the output to show:
|
||||
- Parent tasks with clear indicators
|
||||
- Nested subtasks with proper indentation
|
||||
- Status badges for quick scanning
|
||||
- Dependencies and blockers highlighted
|
||||
- Progress indicators for tasks with subtasks
|
||||
|
||||
## Smart Filtering
|
||||
|
||||
Based on the task hierarchy:
|
||||
- Show completion percentage for parent tasks
|
||||
- Highlight blocked subtask chains
|
||||
- Group by functional areas
|
||||
- Indicate critical path items
|
||||
|
||||
This gives you a complete tree view of your project structure.
|
||||
@@ -1,51 +0,0 @@
|
||||
View current AI model configuration.
|
||||
|
||||
## Model Configuration Display
|
||||
|
||||
Shows the currently configured AI providers and models for Task Master.
|
||||
|
||||
## Execution
|
||||
|
||||
```bash
|
||||
task-master models
|
||||
```
|
||||
|
||||
## Information Displayed
|
||||
|
||||
1. **Main Provider**
|
||||
- Model ID and name
|
||||
- API key status (configured/missing)
|
||||
- Usage: Primary task generation
|
||||
|
||||
2. **Research Provider**
|
||||
- Model ID and name
|
||||
- API key status
|
||||
- Usage: Enhanced research mode
|
||||
|
||||
3. **Fallback Provider**
|
||||
- Model ID and name
|
||||
- API key status
|
||||
- Usage: Backup when main fails
|
||||
|
||||
## Visual Status
|
||||
|
||||
```
|
||||
Task Master AI Model Configuration
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
Main: ✅ claude-3-5-sonnet (configured)
|
||||
Research: ✅ perplexity-sonar (configured)
|
||||
Fallback: ⚠️ Not configured (optional)
|
||||
|
||||
Available Models:
|
||||
- claude-3-5-sonnet
|
||||
- gpt-4-turbo
|
||||
- gpt-3.5-turbo
|
||||
- perplexity-sonar
|
||||
```
|
||||
|
||||
## Next Actions
|
||||
|
||||
Based on configuration:
|
||||
- If missing API keys → Suggest setup
|
||||
- If no research model → Explain benefits
|
||||
- If all configured → Show usage tips
|
||||
@@ -1,51 +0,0 @@
|
||||
Run interactive setup to configure AI models.
|
||||
|
||||
## Interactive Model Configuration
|
||||
|
||||
Guides you through setting up AI providers for Task Master.
|
||||
|
||||
## Execution
|
||||
|
||||
```bash
|
||||
task-master models --setup
|
||||
```
|
||||
|
||||
## Setup Process
|
||||
|
||||
1. **Environment Check**
|
||||
- Detect existing API keys
|
||||
- Show current configuration
|
||||
- Identify missing providers
|
||||
|
||||
2. **Provider Selection**
|
||||
- Choose main provider (required)
|
||||
- Select research provider (recommended)
|
||||
- Configure fallback (optional)
|
||||
|
||||
3. **API Key Configuration**
|
||||
- Prompt for missing keys
|
||||
- Validate key format
|
||||
- Test connectivity
|
||||
- Save configuration
|
||||
|
||||
## Smart Recommendations
|
||||
|
||||
Based on your needs:
|
||||
- **For best results**: Claude + Perplexity
|
||||
- **Budget conscious**: GPT-3.5 + Perplexity
|
||||
- **Maximum capability**: GPT-4 + Perplexity + Claude fallback
|
||||
|
||||
## Configuration Storage
|
||||
|
||||
Keys can be stored in:
|
||||
1. Environment variables (recommended)
|
||||
2. `.env` file in project
|
||||
3. Global `.taskmaster/config`
|
||||
|
||||
## Post-Setup
|
||||
|
||||
After configuration:
|
||||
- Test each provider
|
||||
- Show usage examples
|
||||
- Suggest next steps
|
||||
- Verify parse-prd works
|
||||
@@ -1,66 +0,0 @@
|
||||
Intelligently determine and prepare the next action based on comprehensive context.
|
||||
|
||||
This enhanced version of 'next' considers:
|
||||
- Current task states
|
||||
- Recent activity
|
||||
- Time constraints
|
||||
- Dependencies
|
||||
- Your working patterns
|
||||
|
||||
Arguments: $ARGUMENTS
|
||||
|
||||
## Intelligent Next Action
|
||||
|
||||
### 1. **Context Gathering**
|
||||
Let me analyze the current situation:
|
||||
- Active tasks (in-progress)
|
||||
- Recently completed tasks
|
||||
- Blocked tasks
|
||||
- Time since last activity
|
||||
- Arguments provided: $ARGUMENTS
|
||||
|
||||
### 2. **Smart Decision Tree**
|
||||
|
||||
**If you have an in-progress task:**
|
||||
- Has it been idle > 2 hours? → Suggest resuming or switching
|
||||
- Near completion? → Show remaining steps
|
||||
- Blocked? → Find alternative task
|
||||
|
||||
**If no in-progress tasks:**
|
||||
- Unblocked high-priority tasks? → Start highest
|
||||
- Complex tasks need breakdown? → Suggest expansion
|
||||
- All tasks blocked? → Show dependency resolution
|
||||
|
||||
**Special arguments handling:**
|
||||
- "quick" → Find task < 2 hours
|
||||
- "easy" → Find low complexity task
|
||||
- "important" → Find high priority regardless of complexity
|
||||
- "continue" → Resume last worked task
|
||||
|
||||
### 3. **Preparation Workflow**
|
||||
|
||||
Based on selected task:
|
||||
1. Show full context and history
|
||||
2. Set up development environment
|
||||
3. Run relevant tests
|
||||
4. Open related files
|
||||
5. Show similar completed tasks
|
||||
6. Estimate completion time
|
||||
|
||||
### 4. **Alternative Suggestions**
|
||||
|
||||
Always provide options:
|
||||
- Primary recommendation
|
||||
- Quick alternative (< 1 hour)
|
||||
- Strategic option (unblocks most tasks)
|
||||
- Learning option (new technology/skill)
|
||||
|
||||
### 5. **Workflow Integration**
|
||||
|
||||
Seamlessly connect to:
|
||||
- `/project:task-master:start [selected]`
|
||||
- `/project:workflows:auto-implement`
|
||||
- `/project:task-master:expand` (if complex)
|
||||
- `/project:utils:complexity-report` (if unsure)
|
||||
|
||||
The goal: Zero friction from decision to implementation.
|
||||
@@ -1,49 +0,0 @@
|
||||
Parse a PRD document to generate tasks.
|
||||
|
||||
Arguments: $ARGUMENTS (PRD file path)
|
||||
|
||||
## Intelligent PRD Parsing
|
||||
|
||||
Analyzes your requirements document and generates a complete task breakdown.
|
||||
|
||||
## Execution
|
||||
|
||||
```bash
|
||||
task-master parse-prd --input=$ARGUMENTS
|
||||
```
|
||||
|
||||
## Parsing Process
|
||||
|
||||
1. **Document Analysis**
|
||||
- Extract key requirements
|
||||
- Identify technical components
|
||||
- Detect dependencies
|
||||
- Estimate complexity
|
||||
|
||||
2. **Task Generation**
|
||||
- Create 10-15 tasks by default
|
||||
- Include implementation tasks
|
||||
- Add testing tasks
|
||||
- Include documentation tasks
|
||||
- Set logical dependencies
|
||||
|
||||
3. **Smart Enhancements**
|
||||
- Group related functionality
|
||||
- Set appropriate priorities
|
||||
- Add acceptance criteria
|
||||
- Include test strategies
|
||||
|
||||
## Options
|
||||
|
||||
Parse arguments for modifiers:
|
||||
- Number after filename → `--num-tasks`
|
||||
- `research` → Use research mode
|
||||
- `comprehensive` → Generate more tasks
|
||||
|
||||
## Post-Generation
|
||||
|
||||
After parsing:
|
||||
1. Display task summary
|
||||
2. Show dependency graph
|
||||
3. Suggest task expansion for complex items
|
||||
4. Recommend sprint planning
|
||||
@@ -1,48 +0,0 @@
|
||||
Parse PRD with enhanced research mode for better task generation.
|
||||
|
||||
Arguments: $ARGUMENTS (PRD file path)
|
||||
|
||||
## Research-Enhanced Parsing
|
||||
|
||||
Uses the research AI provider (typically Perplexity) for more comprehensive task generation with current best practices.
|
||||
|
||||
## Execution
|
||||
|
||||
```bash
|
||||
task-master parse-prd --input=$ARGUMENTS --research
|
||||
```
|
||||
|
||||
## Research Benefits
|
||||
|
||||
1. **Current Best Practices**
|
||||
- Latest framework patterns
|
||||
- Security considerations
|
||||
- Performance optimizations
|
||||
- Accessibility requirements
|
||||
|
||||
2. **Technical Deep Dive**
|
||||
- Implementation approaches
|
||||
- Library recommendations
|
||||
- Architecture patterns
|
||||
- Testing strategies
|
||||
|
||||
3. **Comprehensive Coverage**
|
||||
- Edge cases consideration
|
||||
- Error handling tasks
|
||||
- Monitoring setup
|
||||
- Deployment tasks
|
||||
|
||||
## Enhanced Output
|
||||
|
||||
Research mode typically:
|
||||
- Generates more detailed tasks
|
||||
- Includes industry standards
|
||||
- Adds compliance considerations
|
||||
- Suggests modern tooling
|
||||
|
||||
## When to Use
|
||||
|
||||
- New technology domains
|
||||
- Complex requirements
|
||||
- Regulatory compliance needed
|
||||
- Best practices crucial
|
||||
@@ -1,62 +0,0 @@
|
||||
Remove a dependency between tasks.
|
||||
|
||||
Arguments: $ARGUMENTS
|
||||
|
||||
Parse the task IDs to remove dependency relationship.
|
||||
|
||||
## Removing Dependencies
|
||||
|
||||
Removes a dependency relationship, potentially unblocking tasks.
|
||||
|
||||
## Argument Parsing
|
||||
|
||||
Parse natural language or IDs:
|
||||
- "remove dependency between 5 and 3"
|
||||
- "5 no longer needs 3"
|
||||
- "unblock 5 from 3"
|
||||
- "5 3" → remove dependency of 5 on 3
|
||||
|
||||
## Execution
|
||||
|
||||
```bash
|
||||
task-master remove-dependency --id=<task-id> --depends-on=<dependency-id>
|
||||
```
|
||||
|
||||
## Pre-Removal Checks
|
||||
|
||||
1. **Verify dependency exists**
|
||||
2. **Check impact on task flow**
|
||||
3. **Warn if it breaks logical sequence**
|
||||
4. **Show what will be unblocked**
|
||||
|
||||
## Smart Analysis
|
||||
|
||||
Before removing:
|
||||
- Show why dependency might have existed
|
||||
- Check if removal makes tasks executable
|
||||
- Verify no critical path disruption
|
||||
- Suggest alternative dependencies
|
||||
|
||||
## Post-Removal
|
||||
|
||||
After removing:
|
||||
1. Show updated task status
|
||||
2. List newly unblocked tasks
|
||||
3. Update project timeline
|
||||
4. Suggest next actions
|
||||
|
||||
## Safety Features
|
||||
|
||||
- Confirm if removing critical dependency
|
||||
- Show tasks that become immediately actionable
|
||||
- Warn about potential issues
|
||||
- Keep removal history
|
||||
|
||||
## Example
|
||||
|
||||
```
|
||||
/project:tm/remove-dependency 5 from 3
|
||||
→ Removed: Task #5 no longer depends on #3
|
||||
→ Task #5 is now UNBLOCKED and ready to start
|
||||
→ Warning: Consider if #5 still needs #2 completed first
|
||||
```
|
||||
@@ -1,84 +0,0 @@
|
||||
Remove a subtask from its parent task.
|
||||
|
||||
Arguments: $ARGUMENTS
|
||||
|
||||
Parse subtask ID to remove, with option to convert to standalone task.
|
||||
|
||||
## Removing Subtasks
|
||||
|
||||
Remove a subtask and optionally convert it back to a standalone task.
|
||||
|
||||
## Argument Parsing
|
||||
|
||||
- "remove subtask 5.1"
|
||||
- "delete 5.1"
|
||||
- "convert 5.1 to task" → remove and convert
|
||||
- "5.1 standalone" → convert to standalone
|
||||
|
||||
## Execution Options
|
||||
|
||||
### 1. Delete Subtask
|
||||
```bash
|
||||
task-master remove-subtask --id=<parentId.subtaskId>
|
||||
```
|
||||
|
||||
### 2. Convert to Standalone
|
||||
```bash
|
||||
task-master remove-subtask --id=<parentId.subtaskId> --convert
|
||||
```
|
||||
|
||||
## Pre-Removal Checks
|
||||
|
||||
1. **Validate Subtask**
|
||||
- Verify subtask exists
|
||||
- Check completion status
|
||||
- Review dependencies
|
||||
|
||||
2. **Impact Analysis**
|
||||
- Other subtasks that depend on it
|
||||
- Parent task implications
|
||||
- Data that will be lost
|
||||
|
||||
## Removal Process
|
||||
|
||||
### For Deletion:
|
||||
1. Confirm if subtask has work done
|
||||
2. Update parent task estimates
|
||||
3. Remove subtask and its data
|
||||
4. Clean up dependencies
|
||||
|
||||
### For Conversion:
|
||||
1. Assign new standalone task ID
|
||||
2. Preserve all task data
|
||||
3. Update dependency references
|
||||
4. Maintain task history
|
||||
|
||||
## Smart Features
|
||||
|
||||
- Warn if subtask is in-progress
|
||||
- Show impact on parent task
|
||||
- Preserve important data
|
||||
- Update related estimates
|
||||
|
||||
## Example Flows
|
||||
|
||||
```
|
||||
/project:tm/remove-subtask 5.1
|
||||
→ Warning: Subtask #5.1 is in-progress
|
||||
→ This will delete all subtask data
|
||||
→ Parent task #5 will be updated
|
||||
Confirm deletion? (y/n)
|
||||
|
||||
/project:tm/remove-subtask 5.1 convert
|
||||
→ Converting subtask #5.1 to standalone task #89
|
||||
→ Preserved: All task data and history
|
||||
→ Updated: 2 dependency references
|
||||
→ New task #89 is now independent
|
||||
```
|
||||
|
||||
## Post-Removal
|
||||
|
||||
- Update parent task status
|
||||
- Recalculate estimates
|
||||
- Show updated hierarchy
|
||||
- Suggest next actions
|
||||
@@ -1,107 +0,0 @@
|
||||
Remove a task permanently from the project.
|
||||
|
||||
Arguments: $ARGUMENTS (task ID)
|
||||
|
||||
Delete a task and handle all its relationships properly.
|
||||
|
||||
## Task Removal
|
||||
|
||||
Permanently removes a task while maintaining project integrity.
|
||||
|
||||
## Argument Parsing
|
||||
|
||||
- "remove task 5"
|
||||
- "delete 5"
|
||||
- "5" → remove task 5
|
||||
- Can include "-y" for auto-confirm
|
||||
|
||||
## Execution
|
||||
|
||||
```bash
|
||||
task-master remove-task --id=<id> [-y]
|
||||
```
|
||||
|
||||
## Pre-Removal Analysis
|
||||
|
||||
1. **Task Details**
|
||||
- Current status
|
||||
- Work completed
|
||||
- Time invested
|
||||
- Associated data
|
||||
|
||||
2. **Relationship Check**
|
||||
- Tasks that depend on this
|
||||
- Dependencies this task has
|
||||
- Subtasks that will be removed
|
||||
- Blocking implications
|
||||
|
||||
3. **Impact Assessment**
|
||||
```
|
||||
Task Removal Impact
|
||||
━━━━━━━━━━━━━━━━━━
|
||||
Task: #5 "Implement authentication" (in-progress)
|
||||
Status: 60% complete (~8 hours work)
|
||||
|
||||
Will affect:
|
||||
- 3 tasks depend on this (will be blocked)
|
||||
- Has 4 subtasks (will be deleted)
|
||||
- Part of critical path
|
||||
|
||||
⚠️ This action cannot be undone
|
||||
```
|
||||
|
||||
## Smart Warnings
|
||||
|
||||
- Warn if task is in-progress
|
||||
- Show dependent tasks that will be blocked
|
||||
- Highlight if part of critical path
|
||||
- Note any completed work being lost
|
||||
|
||||
## Removal Process
|
||||
|
||||
1. Show comprehensive impact
|
||||
2. Require confirmation (unless -y)
|
||||
3. Update dependent task references
|
||||
4. Remove task and subtasks
|
||||
5. Clean up orphaned dependencies
|
||||
6. Log removal with timestamp
|
||||
|
||||
## Alternative Actions
|
||||
|
||||
Suggest before deletion:
|
||||
- Mark as cancelled instead
|
||||
- Convert to documentation
|
||||
- Archive task data
|
||||
- Transfer work to another task
|
||||
|
||||
## Post-Removal
|
||||
|
||||
- List affected tasks
|
||||
- Show broken dependencies
|
||||
- Update project statistics
|
||||
- Suggest dependency fixes
|
||||
- Recalculate timeline
|
||||
|
||||
## Example Flows
|
||||
|
||||
```
|
||||
/project:tm/remove-task 5
|
||||
→ Task #5 is in-progress with 8 hours logged
|
||||
→ 3 other tasks depend on this
|
||||
→ Suggestion: Mark as cancelled instead?
|
||||
Remove anyway? (y/n)
|
||||
|
||||
/project:tm/remove-task 5 -y
|
||||
→ Removed: Task #5 and 4 subtasks
|
||||
→ Updated: 3 task dependencies
|
||||
→ Warning: Tasks #7, #8, #9 now have missing dependency
|
||||
→ Run /project:tm/fix-dependencies to resolve
|
||||
```
|
||||
|
||||
## Safety Features
|
||||
|
||||
- Confirmation required
|
||||
- Impact preview
|
||||
- Removal logging
|
||||
- Suggest alternatives
|
||||
- No cascade delete of dependents
|
||||
@@ -1,117 +0,0 @@
|
||||
Check if Task Master is installed and install it if needed.
|
||||
|
||||
This command helps you get Task Master set up globally on your system.
|
||||
|
||||
## Detection and Installation Process
|
||||
|
||||
1. **Check Current Installation**
|
||||
```bash
|
||||
# Check if task-master command exists
|
||||
which task-master || echo "Task Master not found"
|
||||
|
||||
# Check npm global packages
|
||||
npm list -g task-master-ai
|
||||
```
|
||||
|
||||
2. **System Requirements Check**
|
||||
```bash
|
||||
# Verify Node.js is installed
|
||||
node --version
|
||||
|
||||
# Verify npm is installed
|
||||
npm --version
|
||||
|
||||
# Check Node version (need 16+)
|
||||
```
|
||||
|
||||
3. **Install Task Master Globally**
|
||||
If not installed, run:
|
||||
```bash
|
||||
npm install -g task-master-ai
|
||||
```
|
||||
|
||||
4. **Verify Installation**
|
||||
```bash
|
||||
# Check version
|
||||
task-master --version
|
||||
|
||||
# Verify command is available
|
||||
which task-master
|
||||
```
|
||||
|
||||
5. **Initial Setup**
|
||||
```bash
|
||||
# Initialize in current directory
|
||||
task-master init
|
||||
```
|
||||
|
||||
6. **Configure AI Provider**
|
||||
Ensure you have at least one AI provider API key set:
|
||||
```bash
|
||||
# Check current configuration
|
||||
task-master models --status
|
||||
|
||||
# If no API keys found, guide setup
|
||||
echo "You'll need at least one API key:"
|
||||
echo "- ANTHROPIC_API_KEY for Claude"
|
||||
echo "- OPENAI_API_KEY for GPT models"
|
||||
echo "- PERPLEXITY_API_KEY for research"
|
||||
echo ""
|
||||
echo "Set them in your shell profile or .env file"
|
||||
```
|
||||
|
||||
7. **Quick Test**
|
||||
```bash
|
||||
# Create a test PRD
|
||||
echo "Build a simple hello world API" > test-prd.txt
|
||||
|
||||
# Try parsing it
|
||||
task-master parse-prd test-prd.txt -n 3
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If installation fails:
|
||||
|
||||
**Permission Errors:**
|
||||
```bash
|
||||
# Try with sudo (macOS/Linux)
|
||||
sudo npm install -g task-master-ai
|
||||
|
||||
# Or fix npm permissions
|
||||
npm config set prefix ~/.npm-global
|
||||
export PATH=~/.npm-global/bin:$PATH
|
||||
```
|
||||
|
||||
**Network Issues:**
|
||||
```bash
|
||||
# Use different registry
|
||||
npm install -g task-master-ai --registry https://registry.npmjs.org/
|
||||
```
|
||||
|
||||
**Node Version Issues:**
|
||||
```bash
|
||||
# Install Node 18+ via nvm
|
||||
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.0/install.sh | bash
|
||||
nvm install 18
|
||||
nvm use 18
|
||||
```
|
||||
|
||||
## Success Confirmation
|
||||
|
||||
Once installed, you should see:
|
||||
```
|
||||
✅ Task Master v0.16.2 (or higher) installed
|
||||
✅ Command 'task-master' available globally
|
||||
✅ AI provider configured
|
||||
✅ Ready to use slash commands!
|
||||
|
||||
Try: /project:task-master:init your-prd.md
|
||||
```
|
||||
|
||||
## Next Steps
|
||||
|
||||
After installation:
|
||||
1. Run `/project:utils:check-health` to verify setup
|
||||
2. Configure AI providers with `/project:task-master:models`
|
||||
3. Start using Task Master commands!
|
||||
@@ -1,22 +0,0 @@
|
||||
Quick install Task Master globally if not already installed.
|
||||
|
||||
Execute this streamlined installation:
|
||||
|
||||
```bash
|
||||
# Check and install in one command
|
||||
task-master --version 2>/dev/null || npm install -g task-master-ai
|
||||
|
||||
# Verify installation
|
||||
task-master --version
|
||||
|
||||
# Quick setup check
|
||||
task-master models --status || echo "Note: You'll need to set up an AI provider API key"
|
||||
```
|
||||
|
||||
If you see "command not found" after installation, you may need to:
|
||||
1. Restart your terminal
|
||||
2. Or add npm global bin to PATH: `export PATH=$(npm bin -g):$PATH`
|
||||
|
||||
Once installed, you can use all the Task Master commands!
|
||||
|
||||
Quick test: Run `/project:help` to see all available commands.
|
||||
@@ -1,82 +0,0 @@
|
||||
Show detailed task information with rich context and insights.
|
||||
|
||||
Arguments: $ARGUMENTS
|
||||
|
||||
## Enhanced Task Display
|
||||
|
||||
Parse arguments to determine what to show and how.
|
||||
|
||||
### 1. **Smart Task Selection**
|
||||
|
||||
Based on $ARGUMENTS:
|
||||
- Number → Show specific task with full context
|
||||
- "current" → Show active in-progress task(s)
|
||||
- "next" → Show recommended next task
|
||||
- "blocked" → Show all blocked tasks with reasons
|
||||
- "critical" → Show critical path tasks
|
||||
- Multiple IDs → Comparative view
|
||||
|
||||
### 2. **Contextual Information**
|
||||
|
||||
For each task, intelligently include:
|
||||
|
||||
**Core Details**
|
||||
- Full task information (id, title, description, details)
|
||||
- Current status with history
|
||||
- Test strategy and acceptance criteria
|
||||
- Priority and complexity analysis
|
||||
|
||||
**Relationships**
|
||||
- Dependencies (what it needs)
|
||||
- Dependents (what needs it)
|
||||
- Parent/subtask hierarchy
|
||||
- Related tasks (similar work)
|
||||
|
||||
**Time Intelligence**
|
||||
- Created/updated timestamps
|
||||
- Time in current status
|
||||
- Estimated vs actual time
|
||||
- Historical completion patterns
|
||||
|
||||
### 3. **Visual Enhancements**
|
||||
|
||||
```
|
||||
📋 Task #45: Implement User Authentication
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
Status: 🟡 in-progress (2 hours)
|
||||
Priority: 🔴 High | Complexity: 73/100
|
||||
|
||||
Dependencies: ✅ #41, ✅ #42, ⏳ #43 (blocked)
|
||||
Blocks: #46, #47, #52
|
||||
|
||||
Progress: ████████░░ 80% complete
|
||||
|
||||
Recent Activity:
|
||||
- 2h ago: Status changed to in-progress
|
||||
- 4h ago: Dependency #42 completed
|
||||
- Yesterday: Task expanded with 3 subtasks
|
||||
```
|
||||
|
||||
### 4. **Intelligent Insights**
|
||||
|
||||
Based on task analysis:
|
||||
- **Risk Assessment**: Complexity vs time remaining
|
||||
- **Bottleneck Analysis**: Is this blocking critical work?
|
||||
- **Recommendation**: Suggested approach or concerns
|
||||
- **Similar Tasks**: How others completed similar work
|
||||
|
||||
### 5. **Action Suggestions**
|
||||
|
||||
Context-aware next steps:
|
||||
- If blocked → Show how to unblock
|
||||
- If complex → Suggest expansion
|
||||
- If in-progress → Show completion checklist
|
||||
- If done → Show dependent tasks ready to start
|
||||
|
||||
### 6. **Multi-Task View**
|
||||
|
||||
When showing multiple tasks:
|
||||
- Common dependencies
|
||||
- Optimal completion order
|
||||
- Parallel work opportunities
|
||||
- Combined complexity analysis
|
||||
@@ -1,64 +0,0 @@
|
||||
Enhanced status command with comprehensive project insights.
|
||||
|
||||
Arguments: $ARGUMENTS
|
||||
|
||||
## Intelligent Status Overview
|
||||
|
||||
### 1. **Executive Summary**
|
||||
Quick dashboard view:
|
||||
- 🏃 Active work (in-progress tasks)
|
||||
- 📊 Progress metrics (% complete, velocity)
|
||||
- 🚧 Blockers and risks
|
||||
- ⏱️ Time analysis (estimated vs actual)
|
||||
- 🎯 Sprint/milestone progress
|
||||
|
||||
### 2. **Contextual Analysis**
|
||||
|
||||
Based on $ARGUMENTS, focus on:
|
||||
- "sprint" → Current sprint progress and burndown
|
||||
- "blocked" → Dependency chains and resolution paths
|
||||
- "team" → Task distribution and workload
|
||||
- "timeline" → Schedule adherence and projections
|
||||
- "risk" → High complexity or overdue items
|
||||
|
||||
### 3. **Smart Insights**
|
||||
|
||||
**Workflow Health:**
|
||||
- Idle tasks (in-progress > 24h without updates)
|
||||
- Bottlenecks (multiple tasks waiting on same dependency)
|
||||
- Quick wins (low complexity, high impact)
|
||||
|
||||
**Predictive Analytics:**
|
||||
- Completion projections based on velocity
|
||||
- Risk of missing deadlines
|
||||
- Recommended task order for optimal flow
|
||||
|
||||
### 4. **Visual Intelligence**
|
||||
|
||||
Dynamic visualization based on data:
|
||||
```
|
||||
Sprint Progress: ████████░░ 80% (16/20 tasks)
|
||||
Velocity Trend: ↗️ +15% this week
|
||||
Blocked Tasks: 🔴 3 critical path items
|
||||
|
||||
Priority Distribution:
|
||||
High: ████████ 8 tasks (2 blocked)
|
||||
Medium: ████░░░░ 4 tasks
|
||||
Low: ██░░░░░░ 2 tasks
|
||||
```
|
||||
|
||||
### 5. **Actionable Recommendations**
|
||||
|
||||
Based on analysis:
|
||||
1. **Immediate actions** (unblock critical path)
|
||||
2. **Today's focus** (optimal task sequence)
|
||||
3. **Process improvements** (recurring patterns)
|
||||
4. **Resource needs** (skills, time, dependencies)
|
||||
|
||||
### 6. **Historical Context**
|
||||
|
||||
Compare to previous periods:
|
||||
- Velocity changes
|
||||
- Pattern recognition
|
||||
- Improvement areas
|
||||
- Success patterns to repeat
|
||||
@@ -1,117 +0,0 @@
|
||||
Export tasks to README.md with professional formatting.
|
||||
|
||||
Arguments: $ARGUMENTS
|
||||
|
||||
Generate a well-formatted README with current task information.
|
||||
|
||||
## README Synchronization
|
||||
|
||||
Creates or updates README.md with beautifully formatted task information.
|
||||
|
||||
## Argument Parsing
|
||||
|
||||
Optional filters:
|
||||
- "pending" → Only pending tasks
|
||||
- "with-subtasks" → Include subtask details
|
||||
- "by-priority" → Group by priority
|
||||
- "sprint" → Current sprint only
|
||||
|
||||
## Execution
|
||||
|
||||
```bash
|
||||
task-master sync-readme [--with-subtasks] [--status=<status>]
|
||||
```
|
||||
|
||||
## README Generation
|
||||
|
||||
### 1. **Project Header**
|
||||
```markdown
|
||||
# Project Name
|
||||
|
||||
## 📋 Task Progress
|
||||
|
||||
Last Updated: 2024-01-15 10:30 AM
|
||||
|
||||
### Summary
|
||||
- Total Tasks: 45
|
||||
- Completed: 15 (33%)
|
||||
- In Progress: 5 (11%)
|
||||
- Pending: 25 (56%)
|
||||
```
|
||||
|
||||
### 2. **Task Sections**
|
||||
Organized by status or priority:
|
||||
- Progress indicators
|
||||
- Task descriptions
|
||||
- Dependencies noted
|
||||
- Time estimates
|
||||
|
||||
### 3. **Visual Elements**
|
||||
- Progress bars
|
||||
- Status badges
|
||||
- Priority indicators
|
||||
- Completion checkmarks
|
||||
|
||||
## Smart Features
|
||||
|
||||
1. **Intelligent Grouping**
|
||||
- By feature area
|
||||
- By sprint/milestone
|
||||
- By assigned developer
|
||||
- By priority
|
||||
|
||||
2. **Progress Tracking**
|
||||
- Overall completion
|
||||
- Sprint velocity
|
||||
- Burndown indication
|
||||
- Time tracking
|
||||
|
||||
3. **Formatting Options**
|
||||
- GitHub-flavored markdown
|
||||
- Task checkboxes
|
||||
- Collapsible sections
|
||||
- Table format available
|
||||
|
||||
## Example Output
|
||||
|
||||
```markdown
|
||||
## 🚀 Current Sprint
|
||||
|
||||
### In Progress
|
||||
- [ ] 🔄 #5 **Implement user authentication** (60% complete)
|
||||
- Dependencies: API design (#3 ✅)
|
||||
- Subtasks: 4 (2 completed)
|
||||
- Est: 8h / Spent: 5h
|
||||
|
||||
### Pending (High Priority)
|
||||
- [ ] ⚡ #8 **Create dashboard UI**
|
||||
- Blocked by: #5
|
||||
- Complexity: High
|
||||
- Est: 12h
|
||||
```
|
||||
|
||||
## Customization
|
||||
|
||||
Based on arguments:
|
||||
- Include/exclude sections
|
||||
- Detail level control
|
||||
- Custom grouping
|
||||
- Filter by criteria
|
||||
|
||||
## Post-Sync
|
||||
|
||||
After generation:
|
||||
1. Show diff preview
|
||||
2. Backup existing README
|
||||
3. Write new content
|
||||
4. Commit reminder
|
||||
5. Update timestamp
|
||||
|
||||
## Integration
|
||||
|
||||
Works well with:
|
||||
- Git workflows
|
||||
- CI/CD pipelines
|
||||
- Project documentation
|
||||
- Team updates
|
||||
- Client reports
|
||||
@@ -1,108 +0,0 @@
|
||||
Update multiple tasks starting from a specific ID.
|
||||
|
||||
Arguments: $ARGUMENTS
|
||||
|
||||
Parse starting task ID and update context.
|
||||
|
||||
## Bulk Task Updates
|
||||
|
||||
Update multiple related tasks based on new requirements or context changes.
|
||||
|
||||
## Argument Parsing
|
||||
|
||||
- "from 5: add security requirements"
|
||||
- "5 onwards: update API endpoints"
|
||||
- "starting at 5: change to use new framework"
|
||||
|
||||
## Execution
|
||||
|
||||
```bash
|
||||
task-master update --from=<id> --prompt="<context>"
|
||||
```
|
||||
|
||||
## Update Process
|
||||
|
||||
### 1. **Task Selection**
|
||||
Starting from specified ID:
|
||||
- Include the task itself
|
||||
- Include all dependent tasks
|
||||
- Include related subtasks
|
||||
- Smart boundary detection
|
||||
|
||||
### 2. **Context Application**
|
||||
AI analyzes the update context and:
|
||||
- Identifies what needs changing
|
||||
- Maintains consistency
|
||||
- Preserves completed work
|
||||
- Updates related information
|
||||
|
||||
### 3. **Intelligent Updates**
|
||||
- Modify descriptions appropriately
|
||||
- Update test strategies
|
||||
- Adjust time estimates
|
||||
- Revise dependencies if needed
|
||||
|
||||
## Smart Features
|
||||
|
||||
1. **Scope Detection**
|
||||
- Find natural task groupings
|
||||
- Identify related features
|
||||
- Stop at logical boundaries
|
||||
- Avoid over-updating
|
||||
|
||||
2. **Consistency Maintenance**
|
||||
- Keep naming conventions
|
||||
- Preserve relationships
|
||||
- Update cross-references
|
||||
- Maintain task flow
|
||||
|
||||
3. **Change Preview**
|
||||
```
|
||||
Bulk Update Preview
|
||||
━━━━━━━━━━━━━━━━━━
|
||||
Starting from: Task #5
|
||||
Tasks to update: 8 tasks + 12 subtasks
|
||||
|
||||
Context: "add security requirements"
|
||||
|
||||
Changes will include:
|
||||
- Add security sections to descriptions
|
||||
- Update test strategies for security
|
||||
- Add security-related subtasks where needed
|
||||
- Adjust time estimates (+20% average)
|
||||
|
||||
Continue? (y/n)
|
||||
```
|
||||
|
||||
## Example Updates
|
||||
|
||||
```
|
||||
/project:tm/update/from-id 5: change database to PostgreSQL
|
||||
→ Analyzing impact starting from task #5
|
||||
→ Found 6 related tasks to update
|
||||
→ Updates will maintain consistency
|
||||
→ Preview changes? (y/n)
|
||||
|
||||
Applied updates:
|
||||
✓ Task #5: Updated connection logic references
|
||||
✓ Task #6: Changed migration approach
|
||||
✓ Task #7: Updated query syntax notes
|
||||
✓ Task #8: Revised testing strategy
|
||||
✓ Task #9: Updated deployment steps
|
||||
✓ Task #12: Changed backup procedures
|
||||
```
|
||||
|
||||
## Safety Features
|
||||
|
||||
- Preview all changes
|
||||
- Selective confirmation
|
||||
- Rollback capability
|
||||
- Change logging
|
||||
- Validation checks
|
||||
|
||||
## Post-Update
|
||||
|
||||
- Summary of changes
|
||||
- Consistency verification
|
||||
- Suggest review tasks
|
||||
- Update timeline if needed
|
||||
@@ -1,72 +0,0 @@
|
||||
Update tasks with intelligent field detection and bulk operations.
|
||||
|
||||
Arguments: $ARGUMENTS
|
||||
|
||||
## Intelligent Task Updates
|
||||
|
||||
Parse arguments to determine update intent and execute smartly.
|
||||
|
||||
### 1. **Natural Language Processing**
|
||||
|
||||
Understand update requests like:
|
||||
- "mark 23 as done" → Update status to done
|
||||
- "increase priority of 45" → Set priority to high
|
||||
- "add dependency on 12 to task 34" → Add dependency
|
||||
- "tasks 20-25 need review" → Bulk status update
|
||||
- "all API tasks high priority" → Pattern-based update
|
||||
|
||||
### 2. **Smart Field Detection**
|
||||
|
||||
Automatically detect what to update:
|
||||
- Status keywords: done, complete, start, pause, review
|
||||
- Priority changes: urgent, high, low, deprioritize
|
||||
- Dependency updates: depends on, blocks, after
|
||||
- Assignment: assign to, owner, responsible
|
||||
- Time: estimate, spent, deadline
|
||||
|
||||
### 3. **Bulk Operations**
|
||||
|
||||
Support for multiple task updates:
|
||||
```
|
||||
Examples:
|
||||
- "complete tasks 12, 15, 18"
|
||||
- "all pending auth tasks to in-progress"
|
||||
- "increase priority for tasks blocking 45"
|
||||
- "defer all documentation tasks"
|
||||
```
|
||||
|
||||
### 4. **Contextual Validation**
|
||||
|
||||
Before updating, check:
|
||||
- Status transitions are valid
|
||||
- Dependencies don't create cycles
|
||||
- Priority changes make sense
|
||||
- Bulk updates won't break project flow
|
||||
|
||||
Show preview:
|
||||
```
|
||||
Update Preview:
|
||||
─────────────────
|
||||
Tasks to update: #23, #24, #25
|
||||
Change: status → in-progress
|
||||
Impact: Will unblock tasks #30, #31
|
||||
Warning: Task #24 has unmet dependencies
|
||||
```
|
||||
|
||||
### 5. **Smart Suggestions**
|
||||
|
||||
Based on update:
|
||||
- Completing task? → Show newly unblocked tasks
|
||||
- Changing priority? → Show impact on sprint
|
||||
- Adding dependency? → Check for conflicts
|
||||
- Bulk update? → Show summary of changes
|
||||
|
||||
### 6. **Workflow Integration**
|
||||
|
||||
After updates:
|
||||
- Auto-update dependent task states
|
||||
- Trigger status recalculation
|
||||
- Update sprint/milestone progress
|
||||
- Log changes with context
|
||||
|
||||
Result: Flexible, intelligent task updates with safety checks.
|
||||
@@ -1,119 +0,0 @@
|
||||
Update a single specific task with new information.
|
||||
|
||||
Arguments: $ARGUMENTS
|
||||
|
||||
Parse task ID and update details.
|
||||
|
||||
## Single Task Update
|
||||
|
||||
Precisely update one task with AI assistance to maintain consistency.
|
||||
|
||||
## Argument Parsing
|
||||
|
||||
Natural language updates:
|
||||
- "5: add caching requirement"
|
||||
- "update 5 to include error handling"
|
||||
- "task 5 needs rate limiting"
|
||||
- "5 change priority to high"
|
||||
|
||||
## Execution
|
||||
|
||||
```bash
|
||||
task-master update-task --id=<id> --prompt="<context>"
|
||||
```
|
||||
|
||||
## Update Types
|
||||
|
||||
### 1. **Content Updates**
|
||||
- Enhance description
|
||||
- Add requirements
|
||||
- Clarify details
|
||||
- Update acceptance criteria
|
||||
|
||||
### 2. **Metadata Updates**
|
||||
- Change priority
|
||||
- Adjust time estimates
|
||||
- Update complexity
|
||||
- Modify dependencies
|
||||
|
||||
### 3. **Strategic Updates**
|
||||
- Revise approach
|
||||
- Change test strategy
|
||||
- Update implementation notes
|
||||
- Adjust subtask needs
|
||||
|
||||
## AI-Powered Updates
|
||||
|
||||
The AI:
|
||||
1. **Understands Context**
|
||||
- Reads current task state
|
||||
- Identifies update intent
|
||||
- Maintains consistency
|
||||
- Preserves important info
|
||||
|
||||
2. **Applies Changes**
|
||||
- Updates relevant fields
|
||||
- Keeps style consistent
|
||||
- Adds without removing
|
||||
- Enhances clarity
|
||||
|
||||
3. **Validates Results**
|
||||
- Checks coherence
|
||||
- Verifies completeness
|
||||
- Maintains relationships
|
||||
- Suggests related updates
|
||||
|
||||
## Example Updates
|
||||
|
||||
```
|
||||
/project:tm/update/single 5: add rate limiting
|
||||
→ Updating Task #5: "Implement API endpoints"
|
||||
|
||||
Current: Basic CRUD endpoints
|
||||
Adding: Rate limiting requirements
|
||||
|
||||
Updated sections:
|
||||
✓ Description: Added rate limiting mention
|
||||
✓ Details: Added specific limits (100/min)
|
||||
✓ Test Strategy: Added rate limit tests
|
||||
✓ Complexity: Increased from 5 to 6
|
||||
✓ Time Estimate: Increased by 2 hours
|
||||
|
||||
Suggestion: Also update task #6 (API Gateway) for consistency?
|
||||
```
|
||||
|
||||
## Smart Features
|
||||
|
||||
1. **Incremental Updates**
|
||||
- Adds without overwriting
|
||||
- Preserves work history
|
||||
- Tracks what changed
|
||||
- Shows diff view
|
||||
|
||||
2. **Consistency Checks**
|
||||
- Related task alignment
|
||||
- Subtask compatibility
|
||||
- Dependency validity
|
||||
- Timeline impact
|
||||
|
||||
3. **Update History**
|
||||
- Timestamp changes
|
||||
- Track who/what updated
|
||||
- Reason for update
|
||||
- Previous versions
|
||||
|
||||
## Field-Specific Updates
|
||||
|
||||
Quick syntax for specific fields:
|
||||
- "5 priority:high" → Update priority only
|
||||
- "5 add-time:4h" → Add to time estimate
|
||||
- "5 status:review" → Change status
|
||||
- "5 depends:3,4" → Add dependencies
|
||||
|
||||
## Post-Update
|
||||
|
||||
- Show updated task
|
||||
- Highlight changes
|
||||
- Check related tasks
|
||||
- Update suggestions
|
||||
- Timeline adjustments
|
||||
@@ -1,97 +0,0 @@
|
||||
Advanced project analysis with actionable insights and recommendations.
|
||||
|
||||
Arguments: $ARGUMENTS
|
||||
|
||||
## Comprehensive Project Analysis
|
||||
|
||||
Multi-dimensional analysis based on requested focus area.
|
||||
|
||||
### 1. **Analysis Modes**
|
||||
|
||||
Based on $ARGUMENTS:
|
||||
- "velocity" → Sprint velocity and trends
|
||||
- "quality" → Code quality metrics
|
||||
- "risk" → Risk assessment and mitigation
|
||||
- "dependencies" → Dependency graph analysis
|
||||
- "team" → Workload and skill distribution
|
||||
- "architecture" → System design coherence
|
||||
- Default → Full spectrum analysis
|
||||
|
||||
### 2. **Velocity Analytics**
|
||||
|
||||
```
|
||||
📊 Velocity Analysis
|
||||
━━━━━━━━━━━━━━━━━━━
|
||||
Current Sprint: 24 points/week ↗️ +20%
|
||||
Rolling Average: 20 points/week
|
||||
Efficiency: 85% (17/20 tasks on time)
|
||||
|
||||
Bottlenecks Detected:
|
||||
- Code review delays (avg 4h wait)
|
||||
- Test environment availability
|
||||
- Dependency on external team
|
||||
|
||||
Recommendations:
|
||||
1. Implement parallel review process
|
||||
2. Add staging environment
|
||||
3. Mock external dependencies
|
||||
```
|
||||
|
||||
### 3. **Risk Assessment**
|
||||
|
||||
**Technical Risks**
|
||||
- High complexity tasks without backup assignee
|
||||
- Single points of failure in architecture
|
||||
- Insufficient test coverage in critical paths
|
||||
- Technical debt accumulation rate
|
||||
|
||||
**Project Risks**
|
||||
- Critical path dependencies
|
||||
- Resource availability gaps
|
||||
- Deadline feasibility analysis
|
||||
- Scope creep indicators
|
||||
|
||||
### 4. **Dependency Intelligence**
|
||||
|
||||
Visual dependency analysis:
|
||||
```
|
||||
Critical Path:
|
||||
#12 → #15 → #23 → #45 → #50 (20 days)
|
||||
↘ #24 → #46 ↗
|
||||
|
||||
Optimization: Parallelize #15 and #24
|
||||
Time Saved: 3 days
|
||||
```
|
||||
|
||||
### 5. **Quality Metrics**
|
||||
|
||||
**Code Quality**
|
||||
- Test coverage trends
|
||||
- Complexity scores
|
||||
- Technical debt ratio
|
||||
- Review feedback patterns
|
||||
|
||||
**Process Quality**
|
||||
- Rework frequency
|
||||
- Bug introduction rate
|
||||
- Time to resolution
|
||||
- Knowledge distribution
|
||||
|
||||
### 6. **Predictive Insights**
|
||||
|
||||
Based on patterns:
|
||||
- Completion probability by deadline
|
||||
- Resource needs projection
|
||||
- Risk materialization likelihood
|
||||
- Suggested interventions
|
||||
|
||||
### 7. **Executive Dashboard**
|
||||
|
||||
High-level summary with:
|
||||
- Health score (0-100)
|
||||
- Top 3 risks
|
||||
- Top 3 opportunities
|
||||
- Recommended actions
|
||||
- Success probability
|
||||
|
||||
Result: Data-driven decisions with clear action paths.
|
||||
@@ -1,71 +0,0 @@
|
||||
Validate all task dependencies for issues.
|
||||
|
||||
## Dependency Validation
|
||||
|
||||
Comprehensive check for dependency problems across the entire project.
|
||||
|
||||
## Execution
|
||||
|
||||
```bash
|
||||
task-master validate-dependencies
|
||||
```
|
||||
|
||||
## Validation Checks
|
||||
|
||||
1. **Circular Dependencies**
|
||||
- A depends on B, B depends on A
|
||||
- Complex circular chains
|
||||
- Self-dependencies
|
||||
|
||||
2. **Missing Dependencies**
|
||||
- References to non-existent tasks
|
||||
- Deleted task references
|
||||
- Invalid task IDs
|
||||
|
||||
3. **Logical Issues**
|
||||
- Completed tasks depending on pending
|
||||
- Cancelled tasks in dependency chains
|
||||
- Impossible sequences
|
||||
|
||||
4. **Complexity Warnings**
|
||||
- Over-complex dependency chains
|
||||
- Too many dependencies per task
|
||||
- Bottleneck tasks
|
||||
|
||||
## Smart Analysis
|
||||
|
||||
The validation provides:
|
||||
- Visual dependency graph
|
||||
- Critical path analysis
|
||||
- Bottleneck identification
|
||||
- Suggested optimizations
|
||||
|
||||
## Report Format
|
||||
|
||||
```
|
||||
Dependency Validation Report
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
✅ No circular dependencies found
|
||||
⚠️ 2 warnings found:
|
||||
- Task #23 has 7 dependencies (consider breaking down)
|
||||
- Task #45 blocks 5 other tasks (potential bottleneck)
|
||||
❌ 1 error found:
|
||||
- Task #67 depends on deleted task #66
|
||||
|
||||
Critical Path: #1 → #5 → #23 → #45 → #50 (15 days)
|
||||
```
|
||||
|
||||
## Actionable Output
|
||||
|
||||
For each issue found:
|
||||
- Clear description
|
||||
- Impact assessment
|
||||
- Suggested fix
|
||||
- Command to resolve
|
||||
|
||||
## Next Steps
|
||||
|
||||
After validation:
|
||||
- Run `/project:tm/fix-dependencies` to auto-fix
|
||||
- Manually adjust problematic dependencies
|
||||
- Rerun to verify fixes
|
||||
@@ -1,97 +0,0 @@
|
||||
Enhanced auto-implementation with intelligent code generation and testing.
|
||||
|
||||
Arguments: $ARGUMENTS
|
||||
|
||||
## Intelligent Auto-Implementation
|
||||
|
||||
Advanced implementation with context awareness and quality checks.
|
||||
|
||||
### 1. **Pre-Implementation Analysis**
|
||||
|
||||
Before starting:
|
||||
- Analyze task complexity and requirements
|
||||
- Check codebase patterns and conventions
|
||||
- Identify similar completed tasks
|
||||
- Assess test coverage needs
|
||||
- Detect potential risks
|
||||
|
||||
### 2. **Smart Implementation Strategy**
|
||||
|
||||
Based on task type and context:
|
||||
|
||||
**Feature Tasks**
|
||||
1. Research existing patterns
|
||||
2. Design component architecture
|
||||
3. Implement with tests
|
||||
4. Integrate with system
|
||||
5. Update documentation
|
||||
|
||||
**Bug Fix Tasks**
|
||||
1. Reproduce issue
|
||||
2. Identify root cause
|
||||
3. Implement minimal fix
|
||||
4. Add regression tests
|
||||
5. Verify side effects
|
||||
|
||||
**Refactoring Tasks**
|
||||
1. Analyze current structure
|
||||
2. Plan incremental changes
|
||||
3. Maintain test coverage
|
||||
4. Refactor step-by-step
|
||||
5. Verify behavior unchanged
|
||||
|
||||
### 3. **Code Intelligence**
|
||||
|
||||
**Pattern Recognition**
|
||||
- Learn from existing code
|
||||
- Follow team conventions
|
||||
- Use preferred libraries
|
||||
- Match style guidelines
|
||||
|
||||
**Test-Driven Approach**
|
||||
- Write tests first when possible
|
||||
- Ensure comprehensive coverage
|
||||
- Include edge cases
|
||||
- Performance considerations
|
||||
|
||||
### 4. **Progressive Implementation**
|
||||
|
||||
Step-by-step with validation:
|
||||
```
|
||||
Step 1/5: Setting up component structure ✓
|
||||
Step 2/5: Implementing core logic ✓
|
||||
Step 3/5: Adding error handling ⚡ (in progress)
|
||||
Step 4/5: Writing tests ⏳
|
||||
Step 5/5: Integration testing ⏳
|
||||
|
||||
Current: Adding try-catch blocks and validation...
|
||||
```
|
||||
|
||||
### 5. **Quality Assurance**
|
||||
|
||||
Automated checks:
|
||||
- Linting and formatting
|
||||
- Test execution
|
||||
- Type checking
|
||||
- Dependency validation
|
||||
- Performance analysis
|
||||
|
||||
### 6. **Smart Recovery**
|
||||
|
||||
If issues arise:
|
||||
- Diagnostic analysis
|
||||
- Suggestion generation
|
||||
- Fallback strategies
|
||||
- Manual intervention points
|
||||
- Learning from failures
|
||||
|
||||
### 7. **Post-Implementation**
|
||||
|
||||
After completion:
|
||||
- Generate PR description
|
||||
- Update documentation
|
||||
- Log lessons learned
|
||||
- Suggest follow-up tasks
|
||||
- Update task relationships
|
||||
|
||||
Result: High-quality, production-ready implementations.
|
||||
@@ -1,77 +0,0 @@
|
||||
Execute a pipeline of commands based on a specification.
|
||||
|
||||
Arguments: $ARGUMENTS
|
||||
|
||||
## Command Pipeline Execution
|
||||
|
||||
Parse pipeline specification from arguments. Supported formats:
|
||||
|
||||
### Simple Pipeline
|
||||
`init → expand-all → sprint-plan`
|
||||
|
||||
### Conditional Pipeline
|
||||
`status → if:pending>10 → sprint-plan → else → next`
|
||||
|
||||
### Iterative Pipeline
|
||||
`for:pending-tasks → expand → complexity-check`
|
||||
|
||||
### Smart Pipeline Patterns
|
||||
|
||||
**1. Project Setup Pipeline**
|
||||
```
|
||||
init [prd] →
|
||||
expand-all →
|
||||
complexity-report →
|
||||
sprint-plan →
|
||||
show first-sprint
|
||||
```
|
||||
|
||||
**2. Daily Work Pipeline**
|
||||
```
|
||||
standup →
|
||||
if:in-progress → continue →
|
||||
else → next → start
|
||||
```
|
||||
|
||||
**3. Task Completion Pipeline**
|
||||
```
|
||||
complete [id] →
|
||||
git-commit →
|
||||
if:blocked-tasks-freed → show-freed →
|
||||
next
|
||||
```
|
||||
|
||||
**4. Quality Check Pipeline**
|
||||
```
|
||||
list in-progress →
|
||||
for:each → check-idle-time →
|
||||
if:idle>1day → prompt-update
|
||||
```
|
||||
|
||||
### Pipeline Features
|
||||
|
||||
**Variables**
|
||||
- Store results: `status → $count=pending-count`
|
||||
- Use in conditions: `if:$count>10`
|
||||
- Pass between commands: `expand $high-priority-tasks`
|
||||
|
||||
**Error Handling**
|
||||
- On failure: `try:complete → catch:show-blockers`
|
||||
- Skip on error: `optional:test-run`
|
||||
- Retry logic: `retry:3:commit`
|
||||
|
||||
**Parallel Execution**
|
||||
- Parallel branches: `[analyze | test | lint]`
|
||||
- Join results: `parallel → join:report`
|
||||
|
||||
### Execution Flow
|
||||
|
||||
1. Parse pipeline specification
|
||||
2. Validate command sequence
|
||||
3. Execute with state passing
|
||||
4. Handle conditions and loops
|
||||
5. Aggregate results
|
||||
6. Show summary
|
||||
|
||||
This enables complex workflows like:
|
||||
`parse-prd → expand-all → filter:complex>70 → assign:senior → sprint-plan:weighted`
|
||||
@@ -1,55 +0,0 @@
|
||||
Execute an intelligent workflow based on current project state and recent commands.
|
||||
|
||||
This command analyzes:
|
||||
1. Recent commands you've run
|
||||
2. Current project state
|
||||
3. Time of day / day of week
|
||||
4. Your working patterns
|
||||
|
||||
Arguments: $ARGUMENTS
|
||||
|
||||
## Intelligent Workflow Selection
|
||||
|
||||
Based on context, I'll determine the best workflow:
|
||||
|
||||
### Context Analysis
|
||||
- Previous command executed
|
||||
- Current task states
|
||||
- Unfinished work from last session
|
||||
- Your typical patterns
|
||||
|
||||
### Smart Execution
|
||||
|
||||
If last command was:
|
||||
- `status` → Likely starting work → Run daily standup
|
||||
- `complete` → Task finished → Find next task
|
||||
- `list pending` → Planning → Suggest sprint planning
|
||||
- `expand` → Breaking down work → Show complexity analysis
|
||||
- `init` → New project → Show onboarding workflow
|
||||
|
||||
If no recent commands:
|
||||
- Morning? → Daily standup workflow
|
||||
- Many pending tasks? → Sprint planning
|
||||
- Tasks blocked? → Dependency resolution
|
||||
- Friday? → Weekly review
|
||||
|
||||
### Workflow Composition
|
||||
|
||||
I'll chain appropriate commands:
|
||||
1. Analyze current state
|
||||
2. Execute primary workflow
|
||||
3. Suggest follow-up actions
|
||||
4. Prepare environment for coding
|
||||
|
||||
### Learning Mode
|
||||
|
||||
This command learns from your patterns:
|
||||
- Track command sequences
|
||||
- Note time preferences
|
||||
- Remember common workflows
|
||||
- Adapt to your style
|
||||
|
||||
Example flows detected:
|
||||
- Morning: standup → next → start
|
||||
- After lunch: status → continue task
|
||||
- End of day: complete → commit → status
|
||||
10
.coderabbit.yaml
Normal file
10
.coderabbit.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
reviews:
|
||||
profile: assertive
|
||||
poem: false
|
||||
auto_review:
|
||||
base_branches:
|
||||
- rc
|
||||
- beta
|
||||
- alpha
|
||||
- production
|
||||
- next
|
||||
@@ -2,7 +2,7 @@
|
||||
"mcpServers": {
|
||||
"task-master-ai": {
|
||||
"command": "node",
|
||||
"args": ["./mcp-server/server.js"],
|
||||
"args": ["./dist/mcp-server.js"],
|
||||
"env": {
|
||||
"ANTHROPIC_API_KEY": "ANTHROPIC_API_KEY_HERE",
|
||||
"PERPLEXITY_API_KEY": "PERPLEXITY_API_KEY_HERE",
|
||||
|
||||
@@ -523,7 +523,7 @@ For AI-powered commands that benefit from project context, follow the research c
|
||||
.option('--details <details>', 'Implementation details for the new subtask, optional')
|
||||
.option('--dependencies <ids>', 'Comma-separated list of subtask IDs this subtask depends on')
|
||||
.option('--status <status>', 'Initial status for the subtask', 'pending')
|
||||
.option('--skip-generate', 'Skip regenerating task files')
|
||||
.option('--generate', 'Regenerate task files after adding subtask')
|
||||
.action(async (options) => {
|
||||
// Validate required parameters
|
||||
if (!options.parent) {
|
||||
@@ -545,7 +545,7 @@ For AI-powered commands that benefit from project context, follow the research c
|
||||
.option('-f, --file <path>', 'Path to the tasks file', 'tasks/tasks.json')
|
||||
.option('-i, --id <id>', 'ID of the subtask to remove in format parentId.subtaskId, required')
|
||||
.option('-c, --convert', 'Convert the subtask to a standalone task instead of deleting')
|
||||
.option('--skip-generate', 'Skip regenerating task files')
|
||||
.option('--generate', 'Regenerate task files after removing subtask')
|
||||
.action(async (options) => {
|
||||
// Implementation with detailed error handling
|
||||
})
|
||||
@@ -633,11 +633,11 @@ function showAddSubtaskHelp() {
|
||||
' --dependencies <ids> Comma-separated list of dependency IDs\n' +
|
||||
' -s, --status <status> Status for the new subtask (default: "pending")\n' +
|
||||
' -f, --file <file> Path to the tasks file (default: "tasks/tasks.json")\n' +
|
||||
' --skip-generate Skip regenerating task files\n\n' +
|
||||
' --generate Regenerate task files after adding subtask\n\n' +
|
||||
chalk.cyan('Examples:') + '\n' +
|
||||
' task-master add-subtask --parent=\'5\' --task-id=\'8\'\n' +
|
||||
' task-master add-subtask -p \'5\' -t \'Implement login UI\' -d \'Create the login form\'\n' +
|
||||
' task-master add-subtask -p \'5\' -t \'Handle API Errors\' --details $\'Handle 401 Unauthorized.\nHandle 500 Server Error.\'',
|
||||
' task-master add-subtask -p \'5\' -t \'Handle API Errors\' --details "Handle 401 Unauthorized.\\nHandle 500 Server Error." --generate',
|
||||
{ padding: 1, borderColor: 'blue', borderStyle: 'round' }
|
||||
));
|
||||
}
|
||||
@@ -652,7 +652,7 @@ function showRemoveSubtaskHelp() {
|
||||
' -i, --id <id> Subtask ID(s) to remove in format "parentId.subtaskId" (can be comma-separated, required)\n' +
|
||||
' -c, --convert Convert the subtask to a standalone task instead of deleting it\n' +
|
||||
' -f, --file <file> Path to the tasks file (default: "tasks/tasks.json")\n' +
|
||||
' --skip-generate Skip regenerating task files\n\n' +
|
||||
' --generate Regenerate task files after removing subtask\n\n' +
|
||||
chalk.cyan('Examples:') + '\n' +
|
||||
' task-master remove-subtask --id=\'5.2\'\n' +
|
||||
' task-master remove-subtask --id=\'5.2,6.3,7.1\'\n' +
|
||||
|
||||
@@ -158,7 +158,7 @@ This document provides a detailed reference for interacting with Taskmaster, cov
|
||||
* `details`: `Provide implementation notes or details for the new subtask.` (CLI: `--details <text>`)
|
||||
* `dependencies`: `Specify IDs of other tasks or subtasks, e.g., '15' or '16.1', that must be done before this new subtask.` (CLI: `--dependencies <ids>`)
|
||||
* `status`: `Set the initial status for the new subtask. Default is 'pending'.` (CLI: `-s, --status <status>`)
|
||||
* `skipGenerate`: `Prevent Taskmaster from automatically regenerating markdown task files after adding the subtask.` (CLI: `--skip-generate`)
|
||||
* `generate`: `Enable Taskmaster to regenerate markdown task files after adding the subtask.` (CLI: `--generate`)
|
||||
* `tag`: `Specify which tag context to operate on. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Break down tasks manually or reorganize existing tasks.
|
||||
@@ -286,7 +286,7 @@ This document provides a detailed reference for interacting with Taskmaster, cov
|
||||
* **Key Parameters/Options:**
|
||||
* `id`: `Required. The ID(s) of the Taskmaster subtask(s) to remove, e.g., '15.2' or '16.1,16.3'.` (CLI: `-i, --id <id>`)
|
||||
* `convert`: `If used, Taskmaster will turn the subtask into a regular top-level task instead of deleting it.` (CLI: `-c, --convert`)
|
||||
* `skipGenerate`: `Prevent Taskmaster from automatically regenerating markdown task files after removing the subtask.` (CLI: `--skip-generate`)
|
||||
* `generate`: `Enable Taskmaster to regenerate markdown task files after removing the subtask.` (CLI: `--generate`)
|
||||
* `tag`: `Specify which tag context to operate on. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Delete unnecessary subtasks or promote a subtask to a top-level task.
|
||||
|
||||
803
.cursor/rules/test_workflow.mdc
Normal file
803
.cursor/rules/test_workflow.mdc
Normal file
@@ -0,0 +1,803 @@
|
||||
---
|
||||
description:
|
||||
globs:
|
||||
alwaysApply: true
|
||||
---
|
||||
# Test Workflow & Development Process
|
||||
|
||||
## **Initial Testing Framework Setup**
|
||||
|
||||
Before implementing the TDD workflow, ensure your project has a proper testing framework configured. This section covers setup for different technology stacks.
|
||||
|
||||
### **Detecting Project Type & Framework Needs**
|
||||
|
||||
**AI Agent Assessment Checklist:**
|
||||
1. **Language Detection**: Check for `package.json` (Node.js/JavaScript), `requirements.txt` (Python), `Cargo.toml` (Rust), etc.
|
||||
2. **Existing Tests**: Look for test files (`.test.`, `.spec.`, `_test.`) or test directories
|
||||
3. **Framework Detection**: Check for existing test runners in dependencies
|
||||
4. **Project Structure**: Analyze directory structure for testing patterns
|
||||
|
||||
### **JavaScript/Node.js Projects (Jest Setup)**
|
||||
|
||||
#### **Prerequisites Check**
|
||||
```bash
|
||||
# Verify Node.js project
|
||||
ls package.json # Should exist
|
||||
|
||||
# Check for existing testing setup
|
||||
ls jest.config.js jest.config.ts # Check for Jest config
|
||||
grep -E "(jest|vitest|mocha)" package.json # Check for test runners
|
||||
```
|
||||
|
||||
#### **Jest Installation & Configuration**
|
||||
|
||||
**Step 1: Install Dependencies**
|
||||
```bash
|
||||
# Core Jest dependencies
|
||||
npm install --save-dev jest
|
||||
|
||||
# TypeScript support (if using TypeScript)
|
||||
npm install --save-dev ts-jest @types/jest
|
||||
|
||||
# Additional useful packages
|
||||
npm install --save-dev supertest @types/supertest # For API testing
|
||||
npm install --save-dev jest-watch-typeahead # Enhanced watch mode
|
||||
```
|
||||
|
||||
**Step 2: Create Jest Configuration**
|
||||
|
||||
Create `jest.config.js` with the following production-ready configuration:
|
||||
|
||||
```javascript
|
||||
/** @type {import('jest').Config} */
|
||||
module.exports = {
|
||||
// Use ts-jest preset for TypeScript support
|
||||
preset: 'ts-jest',
|
||||
|
||||
// Test environment
|
||||
testEnvironment: 'node',
|
||||
|
||||
// Roots for test discovery
|
||||
roots: ['<rootDir>/src', '<rootDir>/tests'],
|
||||
|
||||
// Test file patterns
|
||||
testMatch: ['**/__tests__/**/*.ts', '**/?(*.)+(spec|test).ts'],
|
||||
|
||||
// Transform files
|
||||
transform: {
|
||||
'^.+\\.ts$': [
|
||||
'ts-jest',
|
||||
{
|
||||
tsconfig: {
|
||||
target: 'es2020',
|
||||
module: 'commonjs',
|
||||
esModuleInterop: true,
|
||||
allowSyntheticDefaultImports: true,
|
||||
skipLibCheck: true,
|
||||
strict: false,
|
||||
noImplicitAny: false,
|
||||
},
|
||||
},
|
||||
],
|
||||
'^.+\\.js$': [
|
||||
'ts-jest',
|
||||
{
|
||||
useESM: false,
|
||||
tsconfig: {
|
||||
target: 'es2020',
|
||||
module: 'commonjs',
|
||||
esModuleInterop: true,
|
||||
allowSyntheticDefaultImports: true,
|
||||
allowJs: true,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
// Module file extensions
|
||||
moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx', 'json', 'node'],
|
||||
|
||||
// Transform ignore patterns - adjust for ES modules
|
||||
transformIgnorePatterns: ['node_modules/(?!(your-es-module-deps|.*\\.mjs$))'],
|
||||
|
||||
// Coverage configuration
|
||||
collectCoverage: true,
|
||||
coverageDirectory: 'coverage',
|
||||
coverageReporters: [
|
||||
'text', // Console output
|
||||
'text-summary', // Brief summary
|
||||
'lcov', // For IDE integration
|
||||
'html', // Detailed HTML report
|
||||
],
|
||||
|
||||
// Files to collect coverage from
|
||||
collectCoverageFrom: [
|
||||
'src/**/*.ts',
|
||||
'!src/**/*.d.ts',
|
||||
'!src/**/*.test.ts',
|
||||
'!src/**/index.ts', // Often just exports
|
||||
'!src/generated/**', // Generated code
|
||||
'!src/config/database.ts', // Database config (tested via integration)
|
||||
],
|
||||
|
||||
// Coverage thresholds - TaskMaster standards
|
||||
coverageThreshold: {
|
||||
global: {
|
||||
branches: 70,
|
||||
functions: 80,
|
||||
lines: 80,
|
||||
statements: 80,
|
||||
},
|
||||
// Higher standards for critical business logic
|
||||
'./src/utils/': {
|
||||
branches: 85,
|
||||
functions: 90,
|
||||
lines: 90,
|
||||
statements: 90,
|
||||
},
|
||||
'./src/middleware/': {
|
||||
branches: 80,
|
||||
functions: 85,
|
||||
lines: 85,
|
||||
statements: 85,
|
||||
},
|
||||
},
|
||||
|
||||
// Setup files
|
||||
setupFilesAfterEnv: ['<rootDir>/tests/setup.ts'],
|
||||
|
||||
// Global teardown to prevent worker process leaks
|
||||
globalTeardown: '<rootDir>/tests/teardown.ts',
|
||||
|
||||
// Module path mapping (if needed)
|
||||
moduleNameMapper: {
|
||||
'^@/(.*)$': '<rootDir>/src/$1',
|
||||
},
|
||||
|
||||
// Clear mocks between tests
|
||||
clearMocks: true,
|
||||
|
||||
// Restore mocks after each test
|
||||
restoreMocks: true,
|
||||
|
||||
// Global test timeout
|
||||
testTimeout: 10000,
|
||||
|
||||
// Projects for different test types
|
||||
projects: [
|
||||
// Unit tests - for pure functions only
|
||||
{
|
||||
displayName: 'unit',
|
||||
testMatch: ['<rootDir>/src/**/*.test.ts'],
|
||||
testPathIgnorePatterns: ['.*\\.integration\\.test\\.ts$', '/tests/'],
|
||||
preset: 'ts-jest',
|
||||
testEnvironment: 'node',
|
||||
collectCoverageFrom: [
|
||||
'src/**/*.ts',
|
||||
'!src/**/*.d.ts',
|
||||
'!src/**/*.test.ts',
|
||||
'!src/**/*.integration.test.ts',
|
||||
],
|
||||
coverageThreshold: {
|
||||
global: {
|
||||
branches: 70,
|
||||
functions: 80,
|
||||
lines: 80,
|
||||
statements: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
// Integration tests - real database/services
|
||||
{
|
||||
displayName: 'integration',
|
||||
testMatch: [
|
||||
'<rootDir>/src/**/*.integration.test.ts',
|
||||
'<rootDir>/tests/integration/**/*.test.ts',
|
||||
],
|
||||
preset: 'ts-jest',
|
||||
testEnvironment: 'node',
|
||||
setupFilesAfterEnv: ['<rootDir>/tests/setup/integration.ts'],
|
||||
testTimeout: 10000,
|
||||
},
|
||||
// E2E tests - full workflows
|
||||
{
|
||||
displayName: 'e2e',
|
||||
testMatch: ['<rootDir>/tests/e2e/**/*.test.ts'],
|
||||
preset: 'ts-jest',
|
||||
testEnvironment: 'node',
|
||||
setupFilesAfterEnv: ['<rootDir>/tests/setup/e2e.ts'],
|
||||
testTimeout: 30000,
|
||||
},
|
||||
],
|
||||
|
||||
// Verbose output for better debugging
|
||||
verbose: true,
|
||||
|
||||
// Run projects sequentially to avoid conflicts
|
||||
maxWorkers: 1,
|
||||
|
||||
// Enable watch mode plugins
|
||||
watchPlugins: ['jest-watch-typeahead/filename', 'jest-watch-typeahead/testname'],
|
||||
};
|
||||
```
|
||||
|
||||
**Step 3: Update package.json Scripts**
|
||||
|
||||
Add these scripts to your `package.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"scripts": {
|
||||
"test": "jest",
|
||||
"test:watch": "jest --watch",
|
||||
"test:coverage": "jest --coverage",
|
||||
"test:unit": "jest --selectProjects unit",
|
||||
"test:integration": "jest --selectProjects integration",
|
||||
"test:e2e": "jest --selectProjects e2e",
|
||||
"test:ci": "jest --ci --coverage --watchAll=false"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Step 4: Create Test Setup Files**
|
||||
|
||||
Create essential test setup files:
|
||||
|
||||
```typescript
|
||||
// tests/setup.ts - Global setup
|
||||
import { jest } from '@jest/globals';
|
||||
|
||||
// Global test configuration
|
||||
beforeAll(() => {
|
||||
// Set test timeout
|
||||
jest.setTimeout(10000);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Clean up mocks after each test
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
```
|
||||
|
||||
```typescript
|
||||
// tests/setup/integration.ts - Integration test setup
|
||||
import { PrismaClient } from '@prisma/client';
|
||||
|
||||
const prisma = new PrismaClient();
|
||||
|
||||
beforeAll(async () => {
|
||||
// Connect to test database
|
||||
await prisma.$connect();
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
// Cleanup and disconnect
|
||||
await prisma.$disconnect();
|
||||
});
|
||||
|
||||
beforeEach(async () => {
|
||||
// Clean test data before each test
|
||||
// Add your cleanup logic here
|
||||
});
|
||||
```
|
||||
|
||||
```typescript
|
||||
// tests/teardown.ts - Global teardown
|
||||
export default async () => {
|
||||
// Global cleanup after all tests
|
||||
console.log('Global test teardown complete');
|
||||
};
|
||||
```
|
||||
|
||||
**Step 5: Create Initial Test Structure**
|
||||
|
||||
```bash
|
||||
# Create test directories
|
||||
mkdir -p tests/{setup,fixtures,unit,integration,e2e}
|
||||
mkdir -p tests/unit/src/{utils,services,middleware}
|
||||
|
||||
# Create sample test fixtures
|
||||
mkdir tests/fixtures
|
||||
```
|
||||
|
||||
### **Generic Testing Framework Setup (Any Language)**
|
||||
|
||||
#### **Framework Selection Guide**
|
||||
|
||||
**Python Projects:**
|
||||
- **pytest**: Recommended for most Python projects
|
||||
- **unittest**: Built-in, suitable for simple projects
|
||||
- **Coverage**: Use `coverage.py` for code coverage
|
||||
|
||||
```bash
|
||||
# Python setup example
|
||||
pip install pytest pytest-cov
|
||||
echo "[tool:pytest]" > pytest.ini
|
||||
echo "testpaths = tests" >> pytest.ini
|
||||
echo "addopts = --cov=src --cov-report=html --cov-report=term" >> pytest.ini
|
||||
```
|
||||
|
||||
**Go Projects:**
|
||||
- **Built-in testing**: Use Go's built-in `testing` package
|
||||
- **Coverage**: Built-in with `go test -cover`
|
||||
|
||||
```bash
|
||||
# Go setup example
|
||||
go mod init your-project
|
||||
mkdir -p tests
|
||||
# Tests are typically *_test.go files alongside source
|
||||
```
|
||||
|
||||
**Rust Projects:**
|
||||
- **Built-in testing**: Use Rust's built-in test framework
|
||||
- **cargo-tarpaulin**: For coverage analysis
|
||||
|
||||
```bash
|
||||
# Rust setup example
|
||||
cargo new your-project
|
||||
cd your-project
|
||||
cargo install cargo-tarpaulin # For coverage
|
||||
```
|
||||
|
||||
**Java Projects:**
|
||||
- **JUnit 5**: Modern testing framework
|
||||
- **Maven/Gradle**: Build tools with testing integration
|
||||
|
||||
```xml
|
||||
<!-- Maven pom.xml example -->
|
||||
<dependency>
|
||||
<groupId>org.junit.jupiter</groupId>
|
||||
<artifactId>junit-jupiter</artifactId>
|
||||
<version>5.9.2</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
#### **Universal Testing Principles**
|
||||
|
||||
**Coverage Standards (Adapt to Your Language):**
|
||||
- **Global Minimum**: 70-80% line coverage
|
||||
- **Critical Code**: 85-90% coverage
|
||||
- **New Features**: Must meet or exceed standards
|
||||
- **Legacy Code**: Gradual improvement strategy
|
||||
|
||||
**Test Organization:**
|
||||
- **Unit Tests**: Fast, isolated, no external dependencies
|
||||
- **Integration Tests**: Test component interactions
|
||||
- **E2E Tests**: Test complete user workflows
|
||||
- **Performance Tests**: Load and stress testing (if applicable)
|
||||
|
||||
**Naming Conventions:**
|
||||
- **Test Files**: `*.test.*`, `*_test.*`, or language-specific patterns
|
||||
- **Test Functions**: Descriptive names (e.g., `should_return_error_for_invalid_input`)
|
||||
- **Test Directories**: Organized by test type and mirroring source structure
|
||||
|
||||
#### **TaskMaster Integration for Any Framework**
|
||||
|
||||
**Document Testing Setup in Subtasks:**
|
||||
```bash
|
||||
# Update subtask with testing framework setup
|
||||
task-master update-subtask --id=X.Y --prompt="Testing framework setup:
|
||||
- Installed [Framework Name] with coverage support
|
||||
- Configured [Coverage Tool] with thresholds: 80% lines, 70% branches
|
||||
- Created test directory structure: unit/, integration/, e2e/
|
||||
- Added test scripts to build configuration
|
||||
- All setup tests passing"
|
||||
```
|
||||
|
||||
**Testing Framework Verification:**
|
||||
```bash
|
||||
# Verify setup works
|
||||
[test-command] # e.g., npm test, pytest, go test, cargo test
|
||||
|
||||
# Check coverage reporting
|
||||
[coverage-command] # e.g., npm run test:coverage
|
||||
|
||||
# Update task with verification
|
||||
task-master update-subtask --id=X.Y --prompt="Testing framework verified:
|
||||
- Sample tests running successfully
|
||||
- Coverage reporting functional
|
||||
- CI/CD integration ready
|
||||
- Ready to begin TDD workflow"
|
||||
```
|
||||
|
||||
## **Test-Driven Development (TDD) Integration**
|
||||
|
||||
### **Core TDD Cycle with Jest**
|
||||
```bash
|
||||
# 1. Start development with watch mode
|
||||
npm run test:watch
|
||||
|
||||
# 2. Write failing test first
|
||||
# Create test file: src/utils/newFeature.test.ts
|
||||
# Write test that describes expected behavior
|
||||
|
||||
# 3. Implement minimum code to make test pass
|
||||
# 4. Refactor while keeping tests green
|
||||
# 5. Add edge cases and error scenarios
|
||||
```
|
||||
|
||||
### **TDD Workflow Per Subtask**
|
||||
```bash
|
||||
# When starting a new subtask:
|
||||
task-master set-status --id=4.1 --status=in-progress
|
||||
|
||||
# Begin TDD cycle:
|
||||
npm run test:watch # Keep running during development
|
||||
|
||||
# Document TDD progress in subtask:
|
||||
task-master update-subtask --id=4.1 --prompt="TDD Progress:
|
||||
- Written 3 failing tests for core functionality
|
||||
- Implemented basic feature, tests now passing
|
||||
- Adding edge case tests for error handling"
|
||||
|
||||
# Complete subtask with test summary:
|
||||
task-master update-subtask --id=4.1 --prompt="Implementation complete:
|
||||
- Feature implemented with 8 unit tests
|
||||
- Coverage: 95% statements, 88% branches
|
||||
- All tests passing, TDD cycle complete"
|
||||
```
|
||||
|
||||
## **Testing Commands & Usage**
|
||||
|
||||
### **Development Commands**
|
||||
```bash
|
||||
# Primary development command - use during coding
|
||||
npm run test:watch # Watch mode with Jest
|
||||
npm run test:watch -- --testNamePattern="auth" # Watch specific tests
|
||||
|
||||
# Targeted testing during development
|
||||
npm run test:unit # Run only unit tests
|
||||
npm run test:unit -- --coverage # Unit tests with coverage
|
||||
|
||||
# Integration testing when APIs are ready
|
||||
npm run test:integration # Run integration tests
|
||||
npm run test:integration -- --detectOpenHandles # Debug hanging tests
|
||||
|
||||
# End-to-end testing for workflows
|
||||
npm run test:e2e # Run E2E tests
|
||||
npm run test:e2e -- --timeout=30000 # Extended timeout for E2E
|
||||
```
|
||||
|
||||
### **Quality Assurance Commands**
|
||||
```bash
|
||||
# Full test suite with coverage (before commits)
|
||||
npm run test:coverage # Complete coverage analysis
|
||||
|
||||
# All tests (CI/CD pipeline)
|
||||
npm test # Run all test projects
|
||||
|
||||
# Specific test file execution
|
||||
npm test -- auth.test.ts # Run specific test file
|
||||
npm test -- --testNamePattern="should handle errors" # Run specific tests
|
||||
```
|
||||
|
||||
## **Test Implementation Patterns**
|
||||
|
||||
### **Unit Test Development**
|
||||
```typescript
|
||||
// ✅ DO: Follow established patterns from auth.test.ts
|
||||
describe('FeatureName', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
// Setup mocks with proper typing
|
||||
});
|
||||
|
||||
describe('functionName', () => {
|
||||
it('should handle normal case', () => {
|
||||
// Test implementation with specific assertions
|
||||
});
|
||||
|
||||
it('should throw error for invalid input', async () => {
|
||||
// Error scenario testing
|
||||
await expect(functionName(invalidInput))
|
||||
.rejects.toThrow('Specific error message');
|
||||
});
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### **Integration Test Development**
|
||||
```typescript
|
||||
// ✅ DO: Use supertest for API endpoint testing
|
||||
import request from 'supertest';
|
||||
import { app } from '../../src/app';
|
||||
|
||||
describe('POST /api/auth/register', () => {
|
||||
beforeEach(async () => {
|
||||
await integrationTestUtils.cleanupTestData();
|
||||
});
|
||||
|
||||
it('should register user successfully', async () => {
|
||||
const userData = createTestUser();
|
||||
|
||||
const response = await request(app)
|
||||
.post('/api/auth/register')
|
||||
.send(userData)
|
||||
.expect(201);
|
||||
|
||||
expect(response.body).toMatchObject({
|
||||
id: expect.any(String),
|
||||
email: userData.email
|
||||
});
|
||||
|
||||
// Verify database state
|
||||
const user = await prisma.user.findUnique({
|
||||
where: { email: userData.email }
|
||||
});
|
||||
expect(user).toBeTruthy();
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### **E2E Test Development**
|
||||
```typescript
|
||||
// ✅ DO: Test complete user workflows
|
||||
describe('User Authentication Flow', () => {
|
||||
it('should complete registration → login → protected access', async () => {
|
||||
// Step 1: Register
|
||||
const userData = createTestUser();
|
||||
await request(app)
|
||||
.post('/api/auth/register')
|
||||
.send(userData)
|
||||
.expect(201);
|
||||
|
||||
// Step 2: Login
|
||||
const loginResponse = await request(app)
|
||||
.post('/api/auth/login')
|
||||
.send({ email: userData.email, password: userData.password })
|
||||
.expect(200);
|
||||
|
||||
const { token } = loginResponse.body;
|
||||
|
||||
// Step 3: Access protected resource
|
||||
await request(app)
|
||||
.get('/api/profile')
|
||||
.set('Authorization', `Bearer ${token}`)
|
||||
.expect(200);
|
||||
}, 30000); // Extended timeout for E2E
|
||||
});
|
||||
```
|
||||
|
||||
## **Mocking & Test Utilities**
|
||||
|
||||
### **Established Mocking Patterns**
|
||||
```typescript
|
||||
// ✅ DO: Use established bcrypt mocking pattern
|
||||
jest.mock('bcrypt');
|
||||
import bcrypt from 'bcrypt';
|
||||
const mockHash = bcrypt.hash as jest.MockedFunction<typeof bcrypt.hash>;
|
||||
const mockCompare = bcrypt.compare as jest.MockedFunction<typeof bcrypt.compare>;
|
||||
|
||||
// ✅ DO: Use Prisma mocking for unit tests
|
||||
jest.mock('@prisma/client', () => ({
|
||||
PrismaClient: jest.fn().mockImplementation(() => ({
|
||||
user: {
|
||||
create: jest.fn(),
|
||||
findUnique: jest.fn(),
|
||||
},
|
||||
$connect: jest.fn(),
|
||||
$disconnect: jest.fn(),
|
||||
})),
|
||||
}));
|
||||
```
|
||||
|
||||
### **Test Fixtures Usage**
|
||||
```typescript
|
||||
// ✅ DO: Use centralized test fixtures
|
||||
import { createTestUser, adminUser, invalidUser } from '../fixtures/users';
|
||||
|
||||
describe('User Service', () => {
|
||||
it('should handle admin user creation', async () => {
|
||||
const userData = createTestUser(adminUser);
|
||||
// Test implementation
|
||||
});
|
||||
|
||||
it('should reject invalid user data', async () => {
|
||||
const userData = createTestUser(invalidUser);
|
||||
// Error testing
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## **Coverage Standards & Monitoring**
|
||||
|
||||
### **Coverage Thresholds**
|
||||
- **Global Standards**: 80% lines/functions, 70% branches
|
||||
- **Critical Code**: 90% utils, 85% middleware
|
||||
- **New Features**: Must meet or exceed global thresholds
|
||||
- **Legacy Code**: Gradual improvement with each change
|
||||
|
||||
### **Coverage Reporting & Analysis**
|
||||
```bash
|
||||
# Generate coverage reports
|
||||
npm run test:coverage
|
||||
|
||||
# View detailed HTML report
|
||||
open coverage/lcov-report/index.html
|
||||
|
||||
# Coverage files generated:
|
||||
# - coverage/lcov-report/index.html # Detailed HTML report
|
||||
# - coverage/lcov.info # LCOV format for IDE integration
|
||||
# - coverage/coverage-final.json # JSON format for tooling
|
||||
```
|
||||
|
||||
### **Coverage Quality Checks**
|
||||
```typescript
|
||||
// ✅ DO: Test all code paths
|
||||
describe('validateInput', () => {
|
||||
it('should return true for valid input', () => {
|
||||
expect(validateInput('valid')).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false for various invalid inputs', () => {
|
||||
expect(validateInput('')).toBe(false); // Empty string
|
||||
expect(validateInput(null)).toBe(false); // Null value
|
||||
expect(validateInput(undefined)).toBe(false); // Undefined
|
||||
});
|
||||
|
||||
it('should throw for unexpected input types', () => {
|
||||
expect(() => validateInput(123)).toThrow('Invalid input type');
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## **Testing During Development Phases**
|
||||
|
||||
### **Feature Development Phase**
|
||||
```bash
|
||||
# 1. Start feature development
|
||||
task-master set-status --id=X.Y --status=in-progress
|
||||
|
||||
# 2. Begin TDD cycle
|
||||
npm run test:watch
|
||||
|
||||
# 3. Document test progress in subtask
|
||||
task-master update-subtask --id=X.Y --prompt="Test development:
|
||||
- Created test file with 5 failing tests
|
||||
- Implemented core functionality
|
||||
- Tests passing, adding error scenarios"
|
||||
|
||||
# 4. Verify coverage before completion
|
||||
npm run test:coverage
|
||||
|
||||
# 5. Update subtask with final test status
|
||||
task-master update-subtask --id=X.Y --prompt="Testing complete:
|
||||
- 12 unit tests with full coverage
|
||||
- All edge cases and error scenarios covered
|
||||
- Ready for integration testing"
|
||||
```
|
||||
|
||||
### **Integration Testing Phase**
|
||||
```bash
|
||||
# After API endpoints are implemented
|
||||
npm run test:integration
|
||||
|
||||
# Update integration test templates
|
||||
# Replace placeholder tests with real endpoint calls
|
||||
|
||||
# Document integration test results
|
||||
task-master update-subtask --id=X.Y --prompt="Integration tests:
|
||||
- Updated auth endpoint tests
|
||||
- Database integration verified
|
||||
- All HTTP status codes and responses tested"
|
||||
```
|
||||
|
||||
### **Pre-Commit Testing Phase**
|
||||
```bash
|
||||
# Before committing code
|
||||
npm run test:coverage # Verify all tests pass with coverage
|
||||
npm run test:unit # Quick unit test verification
|
||||
npm run test:integration # Integration test verification (if applicable)
|
||||
|
||||
# Commit pattern for test updates
|
||||
git add tests/ src/**/*.test.ts
|
||||
git commit -m "test(task-X): Add comprehensive tests for Feature Y
|
||||
|
||||
- Unit tests with 95% coverage (exceeds 90% threshold)
|
||||
- Integration tests for API endpoints
|
||||
- Test fixtures for data generation
|
||||
- Proper mocking patterns established
|
||||
|
||||
Task X: Feature Y - Testing complete"
|
||||
```
|
||||
|
||||
## **Error Handling & Debugging**
|
||||
|
||||
### **Test Debugging Techniques**
|
||||
```typescript
|
||||
// ✅ DO: Use test utilities for debugging
|
||||
import { testUtils } from '../setup';
|
||||
|
||||
it('should debug complex operation', () => {
|
||||
testUtils.withConsole(() => {
|
||||
// Console output visible only for this test
|
||||
console.log('Debug info:', complexData);
|
||||
service.complexOperation();
|
||||
});
|
||||
});
|
||||
|
||||
// ✅ DO: Use proper async debugging
|
||||
it('should handle async operations', async () => {
|
||||
const promise = service.asyncOperation();
|
||||
|
||||
// Test intermediate state
|
||||
expect(service.isProcessing()).toBe(true);
|
||||
|
||||
const result = await promise;
|
||||
expect(result).toBe('expected');
|
||||
expect(service.isProcessing()).toBe(false);
|
||||
});
|
||||
```
|
||||
|
||||
### **Common Test Issues & Solutions**
|
||||
```bash
|
||||
# Hanging tests (common with database connections)
|
||||
npm run test:integration -- --detectOpenHandles
|
||||
|
||||
# Memory leaks in tests
|
||||
npm run test:unit -- --logHeapUsage
|
||||
|
||||
# Slow tests identification
|
||||
npm run test:coverage -- --verbose
|
||||
|
||||
# Mock not working properly
|
||||
# Check: mock is declared before imports
|
||||
# Check: jest.clearAllMocks() in beforeEach
|
||||
# Check: TypeScript typing is correct
|
||||
```
|
||||
|
||||
## **Continuous Integration Integration**
|
||||
|
||||
### **CI/CD Pipeline Testing**
|
||||
```yaml
|
||||
# Example GitHub Actions integration
|
||||
- name: Run tests
|
||||
run: |
|
||||
npm ci
|
||||
npm run test:coverage
|
||||
|
||||
- name: Upload coverage reports
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
file: ./coverage/lcov.info
|
||||
```
|
||||
|
||||
### **Pre-commit Hooks**
|
||||
```bash
|
||||
# Setup pre-commit testing (recommended)
|
||||
# In package.json scripts:
|
||||
"pre-commit": "npm run test:unit && npm run test:integration"
|
||||
|
||||
# Husky integration example:
|
||||
npx husky add .husky/pre-commit "npm run test:unit"
|
||||
```
|
||||
|
||||
## **Test Maintenance & Evolution**
|
||||
|
||||
### **Adding Tests for New Features**
|
||||
1. **Create test file** alongside source code or in `tests/unit/`
|
||||
2. **Follow established patterns** from `src/utils/auth.test.ts`
|
||||
3. **Use existing fixtures** from `tests/fixtures/`
|
||||
4. **Apply proper mocking** patterns for dependencies
|
||||
5. **Meet coverage thresholds** for the module
|
||||
|
||||
### **Updating Integration/E2E Tests**
|
||||
1. **Update templates** in `tests/integration/` when APIs change
|
||||
2. **Modify E2E workflows** in `tests/e2e/` for new user journeys
|
||||
3. **Update test fixtures** for new data requirements
|
||||
4. **Maintain database cleanup** utilities
|
||||
|
||||
### **Test Performance Optimization**
|
||||
- **Parallel execution**: Jest runs tests in parallel by default
|
||||
- **Test isolation**: Use proper setup/teardown for independence
|
||||
- **Mock optimization**: Mock heavy dependencies appropriately
|
||||
- **Database efficiency**: Use transaction rollbacks where possible
|
||||
|
||||
---
|
||||
|
||||
**Key References:**
|
||||
- [Testing Standards](mdc:.cursor/rules/tests.mdc)
|
||||
- [Git Workflow](mdc:.cursor/rules/git_workflow.mdc)
|
||||
- [Development Workflow](mdc:.cursor/rules/dev_workflow.mdc)
|
||||
- [Jest Configuration](mdc:jest.config.js)
|
||||
@@ -8,9 +8,10 @@ GROQ_API_KEY=YOUR_GROQ_KEY_HERE
|
||||
OPENROUTER_API_KEY=YOUR_OPENROUTER_KEY_HERE
|
||||
XAI_API_KEY=YOUR_XAI_KEY_HERE
|
||||
AZURE_OPENAI_API_KEY=YOUR_AZURE_KEY_HERE
|
||||
OLLAMA_API_KEY=YOUR_OLLAMA_API_KEY_HERE
|
||||
|
||||
# Google Vertex AI Configuration
|
||||
VERTEX_PROJECT_ID=your-gcp-project-id
|
||||
VERTEX_LOCATION=us-central1
|
||||
# Optional: Path to service account credentials JSON file (alternative to API key)
|
||||
GOOGLE_APPLICATION_CREDENTIALS=/path/to/service-account-credentials.json
|
||||
GOOGLE_APPLICATION_CREDENTIALS=/path/to/service-account-credentials.json
|
||||
45
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
45
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
# What type of PR is this?
|
||||
<!-- Check one -->
|
||||
|
||||
- [ ] 🐛 Bug fix
|
||||
- [ ] ✨ Feature
|
||||
- [ ] 🔌 Integration
|
||||
- [ ] 📝 Docs
|
||||
- [ ] 🧹 Refactor
|
||||
- [ ] Other:
|
||||
## Description
|
||||
<!-- What does this PR do? -->
|
||||
|
||||
## Related Issues
|
||||
<!-- Link issues: Fixes #123 -->
|
||||
|
||||
## How to Test This
|
||||
<!-- Quick steps to verify the changes work -->
|
||||
```bash
|
||||
# Example commands or steps
|
||||
```
|
||||
|
||||
**Expected result:**
|
||||
<!-- What should happen? -->
|
||||
|
||||
## Contributor Checklist
|
||||
|
||||
- [ ] Created changeset: `npm run changeset`
|
||||
- [ ] Tests pass: `npm test`
|
||||
- [ ] Format check passes: `npm run format-check` (or `npm run format` to fix)
|
||||
- [ ] Addressed CodeRabbit comments (if any)
|
||||
- [ ] Linked related issues (if any)
|
||||
- [ ] Manually tested the changes
|
||||
|
||||
## Changelog Entry
|
||||
<!-- One line describing the change for users -->
|
||||
<!-- Example: "Added Kiro IDE integration with automatic task status updates" -->
|
||||
|
||||
---
|
||||
|
||||
### For Maintainers
|
||||
|
||||
- [ ] PR title follows conventional commits
|
||||
- [ ] Target branch correct
|
||||
- [ ] Labels added
|
||||
- [ ] Milestone assigned (if applicable)
|
||||
39
.github/PULL_REQUEST_TEMPLATE/bugfix.md
vendored
Normal file
39
.github/PULL_REQUEST_TEMPLATE/bugfix.md
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
## 🐛 Bug Fix
|
||||
|
||||
### 🔍 Bug Description
|
||||
<!-- Describe the bug -->
|
||||
|
||||
### 🔗 Related Issues
|
||||
<!-- Fixes #123 -->
|
||||
|
||||
### ✨ Solution
|
||||
<!-- How does this PR fix the bug? -->
|
||||
|
||||
## How to Test
|
||||
|
||||
### Steps that caused the bug:
|
||||
1.
|
||||
2.
|
||||
|
||||
**Before fix:**
|
||||
**After fix:**
|
||||
|
||||
### Quick verification:
|
||||
```bash
|
||||
# Commands to verify the fix
|
||||
```
|
||||
|
||||
## Contributor Checklist
|
||||
- [ ] Created changeset: `npm run changeset`
|
||||
- [ ] Tests pass: `npm test`
|
||||
- [ ] Format check passes: `npm run format-check`
|
||||
- [ ] Addressed CodeRabbit comments
|
||||
- [ ] Added unit tests (if applicable)
|
||||
- [ ] Manually verified the fix works
|
||||
|
||||
---
|
||||
|
||||
### For Maintainers
|
||||
- [ ] Root cause identified
|
||||
- [ ] Fix doesn't introduce new issues
|
||||
- [ ] CI passes
|
||||
11
.github/PULL_REQUEST_TEMPLATE/config.yml
vendored
Normal file
11
.github/PULL_REQUEST_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: 🐛 Bug Fix
|
||||
url: https://github.com/eyaltoledano/claude-task-master/compare/next...HEAD?template=bugfix.md
|
||||
about: Fix a bug in Task Master
|
||||
- name: ✨ New Feature
|
||||
url: https://github.com/eyaltoledano/claude-task-master/compare/next...HEAD?template=feature.md
|
||||
about: Add a new feature to Task Master
|
||||
- name: 🔌 New Integration
|
||||
url: https://github.com/eyaltoledano/claude-task-master/compare/next...HEAD?template=integration.md
|
||||
about: Add support for a new tool, IDE, or platform
|
||||
49
.github/PULL_REQUEST_TEMPLATE/feature.md
vendored
Normal file
49
.github/PULL_REQUEST_TEMPLATE/feature.md
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
## ✨ New Feature
|
||||
|
||||
### 📋 Feature Description
|
||||
<!-- Brief description -->
|
||||
|
||||
### 🎯 Problem Statement
|
||||
<!-- What problem does this feature solve? Why is it needed? -->
|
||||
|
||||
### 💡 Solution
|
||||
<!-- How does this feature solve the problem? What's the approach? -->
|
||||
|
||||
### 🔗 Related Issues
|
||||
<!-- Link related issues: Fixes #123, Part of #456 -->
|
||||
|
||||
## How to Use It
|
||||
|
||||
### Quick Start
|
||||
```bash
|
||||
# Basic usage example
|
||||
```
|
||||
|
||||
### Example
|
||||
<!-- Show a real use case -->
|
||||
```bash
|
||||
# Practical example
|
||||
```
|
||||
|
||||
**What you should see:**
|
||||
<!-- Expected behavior -->
|
||||
|
||||
## Contributor Checklist
|
||||
- [ ] Created changeset: `npm run changeset`
|
||||
- [ ] Tests pass: `npm test`
|
||||
- [ ] Format check passes: `npm run format-check`
|
||||
- [ ] Addressed CodeRabbit comments
|
||||
- [ ] Added tests for new functionality
|
||||
- [ ] Manually tested in CLI mode
|
||||
- [ ] Manually tested in MCP mode (if applicable)
|
||||
|
||||
## Changelog Entry
|
||||
<!-- One-liner for release notes -->
|
||||
|
||||
---
|
||||
|
||||
### For Maintainers
|
||||
|
||||
- [ ] Feature aligns with project vision
|
||||
- [ ] CIs pass
|
||||
- [ ] Changeset file exists
|
||||
53
.github/PULL_REQUEST_TEMPLATE/integration.md
vendored
Normal file
53
.github/PULL_REQUEST_TEMPLATE/integration.md
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
# 🔌 New Integration
|
||||
|
||||
## What tool/IDE is being integrated?
|
||||
|
||||
<!-- Name and brief description -->
|
||||
|
||||
## What can users do with it?
|
||||
|
||||
<!-- Key benefits -->
|
||||
|
||||
## How to Enable
|
||||
|
||||
### Setup
|
||||
|
||||
```bash
|
||||
task-master rules add [name]
|
||||
# Any other setup steps
|
||||
```
|
||||
|
||||
### Example Usage
|
||||
|
||||
<!-- Show it in action -->
|
||||
|
||||
```bash
|
||||
# Real example
|
||||
```
|
||||
|
||||
### Natural Language Hooks (if applicable)
|
||||
|
||||
```
|
||||
"When tests pass, mark task as done"
|
||||
# Other examples
|
||||
```
|
||||
|
||||
## Contributor Checklist
|
||||
|
||||
- [ ] Created changeset: `npm run changeset`
|
||||
- [ ] Tests pass: `npm test`
|
||||
- [ ] Format check passes: `npm run format-check`
|
||||
- [ ] Addressed CodeRabbit comments
|
||||
- [ ] Integration fully tested with target tool/IDE
|
||||
- [ ] Error scenarios tested
|
||||
- [ ] Added integration tests
|
||||
- [ ] Documentation includes setup guide
|
||||
- [ ] Examples are working and clear
|
||||
|
||||
---
|
||||
|
||||
## For Maintainers
|
||||
|
||||
- [ ] Integration stability verified
|
||||
- [ ] Documentation comprehensive
|
||||
- [ ] Examples working
|
||||
259
.github/scripts/auto-close-duplicates.mjs
vendored
Normal file
259
.github/scripts/auto-close-duplicates.mjs
vendored
Normal file
@@ -0,0 +1,259 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
async function githubRequest(endpoint, token, method = 'GET', body) {
|
||||
const response = await fetch(`https://api.github.com${endpoint}`, {
|
||||
method,
|
||||
headers: {
|
||||
Authorization: `Bearer ${token}`,
|
||||
Accept: 'application/vnd.github.v3+json',
|
||||
'User-Agent': 'auto-close-duplicates-script',
|
||||
...(body && { 'Content-Type': 'application/json' })
|
||||
},
|
||||
...(body && { body: JSON.stringify(body) })
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(
|
||||
`GitHub API request failed: ${response.status} ${response.statusText}`
|
||||
);
|
||||
}
|
||||
|
||||
return response.json();
|
||||
}
|
||||
|
||||
function extractDuplicateIssueNumber(commentBody) {
|
||||
const match = commentBody.match(/#(\d+)/);
|
||||
return match ? parseInt(match[1], 10) : null;
|
||||
}
|
||||
|
||||
async function closeIssueAsDuplicate(
|
||||
owner,
|
||||
repo,
|
||||
issueNumber,
|
||||
duplicateOfNumber,
|
||||
token
|
||||
) {
|
||||
await githubRequest(
|
||||
`/repos/${owner}/${repo}/issues/${issueNumber}`,
|
||||
token,
|
||||
'PATCH',
|
||||
{
|
||||
state: 'closed',
|
||||
state_reason: 'not_planned',
|
||||
labels: ['duplicate']
|
||||
}
|
||||
);
|
||||
|
||||
await githubRequest(
|
||||
`/repos/${owner}/${repo}/issues/${issueNumber}/comments`,
|
||||
token,
|
||||
'POST',
|
||||
{
|
||||
body: `This issue has been automatically closed as a duplicate of #${duplicateOfNumber}.
|
||||
|
||||
If this is incorrect, please re-open this issue or create a new one.
|
||||
|
||||
🤖 Generated with [Task Master Bot]`
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
async function autoCloseDuplicates() {
|
||||
console.log('[DEBUG] Starting auto-close duplicates script');
|
||||
|
||||
const token = process.env.GITHUB_TOKEN;
|
||||
if (!token) {
|
||||
throw new Error('GITHUB_TOKEN environment variable is required');
|
||||
}
|
||||
console.log('[DEBUG] GitHub token found');
|
||||
|
||||
const owner = process.env.GITHUB_REPOSITORY_OWNER || 'eyaltoledano';
|
||||
const repo = process.env.GITHUB_REPOSITORY_NAME || 'claude-task-master';
|
||||
console.log(`[DEBUG] Repository: ${owner}/${repo}`);
|
||||
|
||||
const threeDaysAgo = new Date();
|
||||
threeDaysAgo.setDate(threeDaysAgo.getDate() - 3);
|
||||
console.log(
|
||||
`[DEBUG] Checking for duplicate comments older than: ${threeDaysAgo.toISOString()}`
|
||||
);
|
||||
|
||||
console.log('[DEBUG] Fetching open issues created more than 3 days ago...');
|
||||
const allIssues = [];
|
||||
let page = 1;
|
||||
const perPage = 100;
|
||||
|
||||
const MAX_PAGES = 50; // Increase limit for larger repos
|
||||
let foundRecentIssue = false;
|
||||
|
||||
while (true) {
|
||||
const pageIssues = await githubRequest(
|
||||
`/repos/${owner}/${repo}/issues?state=open&per_page=${perPage}&page=${page}&sort=created&direction=desc`,
|
||||
token
|
||||
);
|
||||
|
||||
if (pageIssues.length === 0) break;
|
||||
|
||||
// Filter for issues created more than 3 days ago
|
||||
const oldEnoughIssues = pageIssues.filter(
|
||||
(issue) => new Date(issue.created_at) <= threeDaysAgo
|
||||
);
|
||||
|
||||
allIssues.push(...oldEnoughIssues);
|
||||
|
||||
// If all issues on this page are newer than 3 days, we can stop
|
||||
if (oldEnoughIssues.length === 0 && page === 1) {
|
||||
foundRecentIssue = true;
|
||||
break;
|
||||
}
|
||||
|
||||
// If we found some old issues but not all, continue to next page
|
||||
// as there might be more old issues
|
||||
page++;
|
||||
|
||||
// Safety limit to avoid infinite loops
|
||||
if (page > MAX_PAGES) {
|
||||
console.log(`[WARNING] Reached maximum page limit of ${MAX_PAGES}`);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
const issues = allIssues;
|
||||
console.log(`[DEBUG] Found ${issues.length} open issues`);
|
||||
|
||||
let processedCount = 0;
|
||||
let candidateCount = 0;
|
||||
|
||||
for (const issue of issues) {
|
||||
processedCount++;
|
||||
console.log(
|
||||
`[DEBUG] Processing issue #${issue.number} (${processedCount}/${issues.length}): ${issue.title}`
|
||||
);
|
||||
|
||||
console.log(`[DEBUG] Fetching comments for issue #${issue.number}...`);
|
||||
const comments = await githubRequest(
|
||||
`/repos/${owner}/${repo}/issues/${issue.number}/comments`,
|
||||
token
|
||||
);
|
||||
console.log(
|
||||
`[DEBUG] Issue #${issue.number} has ${comments.length} comments`
|
||||
);
|
||||
|
||||
const dupeComments = comments.filter(
|
||||
(comment) =>
|
||||
comment.body.includes('Found') &&
|
||||
comment.body.includes('possible duplicate') &&
|
||||
comment.user.type === 'Bot'
|
||||
);
|
||||
console.log(
|
||||
`[DEBUG] Issue #${issue.number} has ${dupeComments.length} duplicate detection comments`
|
||||
);
|
||||
|
||||
if (dupeComments.length === 0) {
|
||||
console.log(
|
||||
`[DEBUG] Issue #${issue.number} - no duplicate comments found, skipping`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
const lastDupeComment = dupeComments[dupeComments.length - 1];
|
||||
const dupeCommentDate = new Date(lastDupeComment.created_at);
|
||||
console.log(
|
||||
`[DEBUG] Issue #${
|
||||
issue.number
|
||||
} - most recent duplicate comment from: ${dupeCommentDate.toISOString()}`
|
||||
);
|
||||
|
||||
if (dupeCommentDate > threeDaysAgo) {
|
||||
console.log(
|
||||
`[DEBUG] Issue #${issue.number} - duplicate comment is too recent, skipping`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
console.log(
|
||||
`[DEBUG] Issue #${
|
||||
issue.number
|
||||
} - duplicate comment is old enough (${Math.floor(
|
||||
(Date.now() - dupeCommentDate.getTime()) / (1000 * 60 * 60 * 24)
|
||||
)} days)`
|
||||
);
|
||||
|
||||
const commentsAfterDupe = comments.filter(
|
||||
(comment) => new Date(comment.created_at) > dupeCommentDate
|
||||
);
|
||||
console.log(
|
||||
`[DEBUG] Issue #${issue.number} - ${commentsAfterDupe.length} comments after duplicate detection`
|
||||
);
|
||||
|
||||
if (commentsAfterDupe.length > 0) {
|
||||
console.log(
|
||||
`[DEBUG] Issue #${issue.number} - has activity after duplicate comment, skipping`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
console.log(
|
||||
`[DEBUG] Issue #${issue.number} - checking reactions on duplicate comment...`
|
||||
);
|
||||
const reactions = await githubRequest(
|
||||
`/repos/${owner}/${repo}/issues/comments/${lastDupeComment.id}/reactions`,
|
||||
token
|
||||
);
|
||||
console.log(
|
||||
`[DEBUG] Issue #${issue.number} - duplicate comment has ${reactions.length} reactions`
|
||||
);
|
||||
|
||||
const authorThumbsDown = reactions.some(
|
||||
(reaction) =>
|
||||
reaction.user.id === issue.user.id && reaction.content === '-1'
|
||||
);
|
||||
console.log(
|
||||
`[DEBUG] Issue #${issue.number} - author thumbs down reaction: ${authorThumbsDown}`
|
||||
);
|
||||
|
||||
if (authorThumbsDown) {
|
||||
console.log(
|
||||
`[DEBUG] Issue #${issue.number} - author disagreed with duplicate detection, skipping`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
const duplicateIssueNumber = extractDuplicateIssueNumber(
|
||||
lastDupeComment.body
|
||||
);
|
||||
if (!duplicateIssueNumber) {
|
||||
console.log(
|
||||
`[DEBUG] Issue #${issue.number} - could not extract duplicate issue number from comment, skipping`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
candidateCount++;
|
||||
const issueUrl = `https://github.com/${owner}/${repo}/issues/${issue.number}`;
|
||||
|
||||
try {
|
||||
console.log(
|
||||
`[INFO] Auto-closing issue #${issue.number} as duplicate of #${duplicateIssueNumber}: ${issueUrl}`
|
||||
);
|
||||
await closeIssueAsDuplicate(
|
||||
owner,
|
||||
repo,
|
||||
issue.number,
|
||||
duplicateIssueNumber,
|
||||
token
|
||||
);
|
||||
console.log(
|
||||
`[SUCCESS] Successfully closed issue #${issue.number} as duplicate of #${duplicateIssueNumber}`
|
||||
);
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`[ERROR] Failed to close issue #${issue.number} as duplicate: ${error}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
console.log(
|
||||
`[DEBUG] Script completed. Processed ${processedCount} issues, found ${candidateCount} candidates for auto-close`
|
||||
);
|
||||
}
|
||||
|
||||
autoCloseDuplicates().catch(console.error);
|
||||
178
.github/scripts/backfill-duplicate-comments.mjs
vendored
Normal file
178
.github/scripts/backfill-duplicate-comments.mjs
vendored
Normal file
@@ -0,0 +1,178 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
async function githubRequest(endpoint, token, method = 'GET', body) {
|
||||
const response = await fetch(`https://api.github.com${endpoint}`, {
|
||||
method,
|
||||
headers: {
|
||||
Authorization: `Bearer ${token}`,
|
||||
Accept: 'application/vnd.github.v3+json',
|
||||
'User-Agent': 'backfill-duplicate-comments-script',
|
||||
...(body && { 'Content-Type': 'application/json' })
|
||||
},
|
||||
...(body && { body: JSON.stringify(body) })
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(
|
||||
`GitHub API request failed: ${response.status} ${response.statusText}`
|
||||
);
|
||||
}
|
||||
|
||||
return response.json();
|
||||
}
|
||||
|
||||
async function triggerDedupeWorkflow(
|
||||
owner,
|
||||
repo,
|
||||
issueNumber,
|
||||
token,
|
||||
dryRun = true
|
||||
) {
|
||||
if (dryRun) {
|
||||
console.log(
|
||||
`[DRY RUN] Would trigger dedupe workflow for issue #${issueNumber}`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
await githubRequest(
|
||||
`/repos/${owner}/${repo}/actions/workflows/claude-dedupe-issues.yml/dispatches`,
|
||||
token,
|
||||
'POST',
|
||||
{
|
||||
ref: 'main',
|
||||
inputs: {
|
||||
issue_number: issueNumber.toString()
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
async function backfillDuplicateComments() {
|
||||
console.log('[DEBUG] Starting backfill duplicate comments script');
|
||||
|
||||
const token = process.env.GITHUB_TOKEN;
|
||||
if (!token) {
|
||||
throw new Error(`GITHUB_TOKEN environment variable is required
|
||||
|
||||
Usage:
|
||||
node .github/scripts/backfill-duplicate-comments.mjs
|
||||
|
||||
Environment Variables:
|
||||
GITHUB_TOKEN - GitHub personal access token with repo and actions permissions (required)
|
||||
DRY_RUN - Set to "false" to actually trigger workflows (default: true for safety)
|
||||
DAYS_BACK - How many days back to look for old issues (default: 90)`);
|
||||
}
|
||||
console.log('[DEBUG] GitHub token found');
|
||||
|
||||
const owner = process.env.GITHUB_REPOSITORY_OWNER || 'eyaltoledano';
|
||||
const repo = process.env.GITHUB_REPOSITORY_NAME || 'claude-task-master';
|
||||
const dryRun = process.env.DRY_RUN !== 'false';
|
||||
const daysBack = parseInt(process.env.DAYS_BACK || '90', 10);
|
||||
|
||||
console.log(`[DEBUG] Repository: ${owner}/${repo}`);
|
||||
console.log(`[DEBUG] Dry run mode: ${dryRun}`);
|
||||
console.log(`[DEBUG] Looking back ${daysBack} days`);
|
||||
|
||||
const cutoffDate = new Date();
|
||||
cutoffDate.setDate(cutoffDate.getDate() - daysBack);
|
||||
|
||||
console.log(
|
||||
`[DEBUG] Fetching issues created since ${cutoffDate.toISOString()}...`
|
||||
);
|
||||
const allIssues = [];
|
||||
let page = 1;
|
||||
const perPage = 100;
|
||||
|
||||
while (true) {
|
||||
const pageIssues = await githubRequest(
|
||||
`/repos/${owner}/${repo}/issues?state=all&per_page=${perPage}&page=${page}&since=${cutoffDate.toISOString()}`,
|
||||
token
|
||||
);
|
||||
|
||||
if (pageIssues.length === 0) break;
|
||||
|
||||
allIssues.push(...pageIssues);
|
||||
page++;
|
||||
|
||||
// Safety limit to avoid infinite loops
|
||||
if (page > 100) {
|
||||
console.log('[DEBUG] Reached page limit, stopping pagination');
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
console.log(
|
||||
`[DEBUG] Found ${allIssues.length} issues from the last ${daysBack} days`
|
||||
);
|
||||
|
||||
let processedCount = 0;
|
||||
let candidateCount = 0;
|
||||
let triggeredCount = 0;
|
||||
|
||||
for (const issue of allIssues) {
|
||||
processedCount++;
|
||||
console.log(
|
||||
`[DEBUG] Processing issue #${issue.number} (${processedCount}/${allIssues.length}): ${issue.title}`
|
||||
);
|
||||
|
||||
console.log(`[DEBUG] Fetching comments for issue #${issue.number}...`);
|
||||
const comments = await githubRequest(
|
||||
`/repos/${owner}/${repo}/issues/${issue.number}/comments`,
|
||||
token
|
||||
);
|
||||
console.log(
|
||||
`[DEBUG] Issue #${issue.number} has ${comments.length} comments`
|
||||
);
|
||||
|
||||
// Look for existing duplicate detection comments (from the dedupe bot)
|
||||
const dupeDetectionComments = comments.filter(
|
||||
(comment) =>
|
||||
comment.body.includes('Found') &&
|
||||
comment.body.includes('possible duplicate') &&
|
||||
comment.user.type === 'Bot'
|
||||
);
|
||||
|
||||
console.log(
|
||||
`[DEBUG] Issue #${issue.number} has ${dupeDetectionComments.length} duplicate detection comments`
|
||||
);
|
||||
|
||||
// Skip if there's already a duplicate detection comment
|
||||
if (dupeDetectionComments.length > 0) {
|
||||
console.log(
|
||||
`[DEBUG] Issue #${issue.number} already has duplicate detection comment, skipping`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
candidateCount++;
|
||||
const issueUrl = `https://github.com/${owner}/${repo}/issues/${issue.number}`;
|
||||
|
||||
try {
|
||||
console.log(
|
||||
`[INFO] ${dryRun ? '[DRY RUN] ' : ''}Triggering dedupe workflow for issue #${issue.number}: ${issueUrl}`
|
||||
);
|
||||
await triggerDedupeWorkflow(owner, repo, issue.number, token, dryRun);
|
||||
|
||||
if (!dryRun) {
|
||||
console.log(
|
||||
`[SUCCESS] Successfully triggered dedupe workflow for issue #${issue.number}`
|
||||
);
|
||||
}
|
||||
triggeredCount++;
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`[ERROR] Failed to trigger workflow for issue #${issue.number}: ${error}`
|
||||
);
|
||||
}
|
||||
|
||||
// Add a delay between workflow triggers to avoid overwhelming the system
|
||||
await new Promise((resolve) => setTimeout(resolve, 1000));
|
||||
}
|
||||
|
||||
console.log(
|
||||
`[DEBUG] Script completed. Processed ${processedCount} issues, found ${candidateCount} candidates without duplicate comments, ${dryRun ? 'would trigger' : 'triggered'} ${triggeredCount} workflows`
|
||||
);
|
||||
}
|
||||
|
||||
backfillDuplicateComments().catch(console.error);
|
||||
102
.github/scripts/check-pre-release-mode.mjs
vendored
Executable file
102
.github/scripts/check-pre-release-mode.mjs
vendored
Executable file
@@ -0,0 +1,102 @@
|
||||
#!/usr/bin/env node
|
||||
import { readFileSync, existsSync } from 'node:fs';
|
||||
import { join, dirname, resolve } from 'node:path';
|
||||
import { fileURLToPath } from 'node:url';
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = dirname(__filename);
|
||||
|
||||
// Get context from command line argument or environment
|
||||
const context = process.argv[2] || process.env.GITHUB_WORKFLOW || 'manual';
|
||||
|
||||
function findRootDir(startDir) {
|
||||
let currentDir = resolve(startDir);
|
||||
while (currentDir !== '/') {
|
||||
if (existsSync(join(currentDir, 'package.json'))) {
|
||||
try {
|
||||
const pkg = JSON.parse(
|
||||
readFileSync(join(currentDir, 'package.json'), 'utf8')
|
||||
);
|
||||
if (pkg.name === 'task-master-ai' || pkg.repository) {
|
||||
return currentDir;
|
||||
}
|
||||
} catch {}
|
||||
}
|
||||
currentDir = dirname(currentDir);
|
||||
}
|
||||
throw new Error('Could not find root directory');
|
||||
}
|
||||
|
||||
function checkPreReleaseMode() {
|
||||
console.log('🔍 Checking if branch is in pre-release mode...');
|
||||
|
||||
const rootDir = findRootDir(__dirname);
|
||||
const preJsonPath = join(rootDir, '.changeset', 'pre.json');
|
||||
|
||||
// Check if pre.json exists
|
||||
if (!existsSync(preJsonPath)) {
|
||||
console.log('✅ Not in active pre-release mode - safe to proceed');
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
try {
|
||||
// Read and parse pre.json
|
||||
const preJsonContent = readFileSync(preJsonPath, 'utf8');
|
||||
const preJson = JSON.parse(preJsonContent);
|
||||
|
||||
// Check if we're in active pre-release mode
|
||||
if (preJson.mode === 'pre') {
|
||||
console.error('❌ ERROR: This branch is in active pre-release mode!');
|
||||
console.error('');
|
||||
|
||||
// Provide context-specific error messages
|
||||
if (context === 'Release Check' || context === 'pull_request') {
|
||||
console.error(
|
||||
'Pre-release mode must be exited before merging to main.'
|
||||
);
|
||||
console.error('');
|
||||
console.error(
|
||||
'To fix this, run the following commands in your branch:'
|
||||
);
|
||||
console.error(' npx changeset pre exit');
|
||||
console.error(' git add -u');
|
||||
console.error(' git commit -m "chore: exit pre-release mode"');
|
||||
console.error(' git push');
|
||||
console.error('');
|
||||
console.error('Then update this pull request.');
|
||||
} else if (context === 'Release' || context === 'main') {
|
||||
console.error(
|
||||
'Pre-release mode should only be used on feature branches, not main.'
|
||||
);
|
||||
console.error('');
|
||||
console.error('To fix this, run the following commands locally:');
|
||||
console.error(' npx changeset pre exit');
|
||||
console.error(' git add -u');
|
||||
console.error(' git commit -m "chore: exit pre-release mode"');
|
||||
console.error(' git push origin main');
|
||||
console.error('');
|
||||
console.error('Then re-run this workflow.');
|
||||
} else {
|
||||
console.error('Pre-release mode must be exited before proceeding.');
|
||||
console.error('');
|
||||
console.error('To fix this, run the following commands:');
|
||||
console.error(' npx changeset pre exit');
|
||||
console.error(' git add -u');
|
||||
console.error(' git commit -m "chore: exit pre-release mode"');
|
||||
console.error(' git push');
|
||||
}
|
||||
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log('✅ Not in active pre-release mode - safe to proceed');
|
||||
process.exit(0);
|
||||
} catch (error) {
|
||||
console.error(`❌ ERROR: Unable to parse .changeset/pre.json – aborting.`);
|
||||
console.error(`Error details: ${error.message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Run the check
|
||||
checkPreReleaseMode();
|
||||
157
.github/scripts/parse-metrics.mjs
vendored
Normal file
157
.github/scripts/parse-metrics.mjs
vendored
Normal file
@@ -0,0 +1,157 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
import { readFileSync, existsSync, writeFileSync } from 'fs';
|
||||
|
||||
function parseMetricsTable(content, metricName) {
|
||||
const lines = content.split('\n');
|
||||
|
||||
for (let i = 0; i < lines.length; i++) {
|
||||
const line = lines[i].trim();
|
||||
// Match a markdown table row like: | Metric Name | value | ...
|
||||
const safeName = metricName.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
|
||||
const re = new RegExp(`^\\|\\s*${safeName}\\s*\\|\\s*([^|]+)\\|?`);
|
||||
const match = line.match(re);
|
||||
if (match) {
|
||||
return match[1].trim() || 'N/A';
|
||||
}
|
||||
}
|
||||
return 'N/A';
|
||||
}
|
||||
|
||||
function parseCountMetric(content, metricName) {
|
||||
const result = parseMetricsTable(content, metricName);
|
||||
// Extract number from string, handling commas and spaces
|
||||
const numberMatch = result.toString().match(/[\d,]+/);
|
||||
if (numberMatch) {
|
||||
const number = parseInt(numberMatch[0].replace(/,/g, ''));
|
||||
return isNaN(number) ? 0 : number;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
function main() {
|
||||
const metrics = {
|
||||
issues_created: 0,
|
||||
issues_closed: 0,
|
||||
prs_created: 0,
|
||||
prs_merged: 0,
|
||||
issue_avg_first_response: 'N/A',
|
||||
issue_avg_time_to_close: 'N/A',
|
||||
pr_avg_first_response: 'N/A',
|
||||
pr_avg_merge_time: 'N/A'
|
||||
};
|
||||
|
||||
// Parse issue metrics
|
||||
if (existsSync('issue_metrics.md')) {
|
||||
console.log('📄 Found issue_metrics.md, parsing...');
|
||||
const issueContent = readFileSync('issue_metrics.md', 'utf8');
|
||||
|
||||
metrics.issues_created = parseCountMetric(
|
||||
issueContent,
|
||||
'Total number of items created'
|
||||
);
|
||||
metrics.issues_closed = parseCountMetric(
|
||||
issueContent,
|
||||
'Number of items closed'
|
||||
);
|
||||
metrics.issue_avg_first_response = parseMetricsTable(
|
||||
issueContent,
|
||||
'Time to first response'
|
||||
);
|
||||
metrics.issue_avg_time_to_close = parseMetricsTable(
|
||||
issueContent,
|
||||
'Time to close'
|
||||
);
|
||||
} else {
|
||||
console.warn('[parse-metrics] issue_metrics.md not found; using defaults.');
|
||||
}
|
||||
|
||||
// Parse PR created metrics
|
||||
if (existsSync('pr_created_metrics.md')) {
|
||||
console.log('📄 Found pr_created_metrics.md, parsing...');
|
||||
const prCreatedContent = readFileSync('pr_created_metrics.md', 'utf8');
|
||||
|
||||
metrics.prs_created = parseCountMetric(
|
||||
prCreatedContent,
|
||||
'Total number of items created'
|
||||
);
|
||||
metrics.pr_avg_first_response = parseMetricsTable(
|
||||
prCreatedContent,
|
||||
'Time to first response'
|
||||
);
|
||||
} else {
|
||||
console.warn(
|
||||
'[parse-metrics] pr_created_metrics.md not found; using defaults.'
|
||||
);
|
||||
}
|
||||
|
||||
// Parse PR merged metrics (for more accurate merge data)
|
||||
if (existsSync('pr_merged_metrics.md')) {
|
||||
console.log('📄 Found pr_merged_metrics.md, parsing...');
|
||||
const prMergedContent = readFileSync('pr_merged_metrics.md', 'utf8');
|
||||
|
||||
metrics.prs_merged = parseCountMetric(
|
||||
prMergedContent,
|
||||
'Total number of items created'
|
||||
);
|
||||
// For merged PRs, "Time to close" is actually time to merge
|
||||
metrics.pr_avg_merge_time = parseMetricsTable(
|
||||
prMergedContent,
|
||||
'Time to close'
|
||||
);
|
||||
} else {
|
||||
console.warn(
|
||||
'[parse-metrics] pr_merged_metrics.md not found; falling back to pr_metrics.md.'
|
||||
);
|
||||
// Fallback: try old pr_metrics.md if it exists
|
||||
if (existsSync('pr_metrics.md')) {
|
||||
console.log('📄 Falling back to pr_metrics.md...');
|
||||
const prContent = readFileSync('pr_metrics.md', 'utf8');
|
||||
|
||||
const mergedCount = parseCountMetric(prContent, 'Number of items merged');
|
||||
metrics.prs_merged =
|
||||
mergedCount || parseCountMetric(prContent, 'Number of items closed');
|
||||
|
||||
const maybeMergeTime = parseMetricsTable(
|
||||
prContent,
|
||||
'Average time to merge'
|
||||
);
|
||||
metrics.pr_avg_merge_time =
|
||||
maybeMergeTime !== 'N/A'
|
||||
? maybeMergeTime
|
||||
: parseMetricsTable(prContent, 'Time to close');
|
||||
} else {
|
||||
console.warn('[parse-metrics] pr_metrics.md not found; using defaults.');
|
||||
}
|
||||
}
|
||||
|
||||
// Output for GitHub Actions
|
||||
const output = Object.entries(metrics)
|
||||
.map(([key, value]) => `${key}=${value}`)
|
||||
.join('\n');
|
||||
|
||||
// Always output to stdout for debugging
|
||||
console.log('\n=== FINAL METRICS ===');
|
||||
Object.entries(metrics).forEach(([key, value]) => {
|
||||
console.log(`${key}: ${value}`);
|
||||
});
|
||||
|
||||
// Write to GITHUB_OUTPUT if in GitHub Actions
|
||||
if (process.env.GITHUB_OUTPUT) {
|
||||
try {
|
||||
writeFileSync(process.env.GITHUB_OUTPUT, output + '\n', { flag: 'a' });
|
||||
console.log(
|
||||
`\nSuccessfully wrote metrics to ${process.env.GITHUB_OUTPUT}`
|
||||
);
|
||||
} catch (error) {
|
||||
console.error(`Failed to write to GITHUB_OUTPUT: ${error.message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
} else {
|
||||
console.log(
|
||||
'\nNo GITHUB_OUTPUT environment variable found, skipping file write'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
main();
|
||||
30
.github/scripts/release.mjs
vendored
Executable file
30
.github/scripts/release.mjs
vendored
Executable file
@@ -0,0 +1,30 @@
|
||||
#!/usr/bin/env node
|
||||
import { existsSync, unlinkSync } from 'node:fs';
|
||||
import { join, dirname } from 'node:path';
|
||||
import { fileURLToPath } from 'node:url';
|
||||
import { findRootDir, runCommand } from './utils.mjs';
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = dirname(__filename);
|
||||
|
||||
const rootDir = findRootDir(__dirname);
|
||||
|
||||
console.log('🚀 Starting release process...');
|
||||
|
||||
// Double-check we're not in pre-release mode (safety net)
|
||||
const preJsonPath = join(rootDir, '.changeset', 'pre.json');
|
||||
if (existsSync(preJsonPath)) {
|
||||
console.log('⚠️ Warning: pre.json still exists. Removing it...');
|
||||
unlinkSync(preJsonPath);
|
||||
}
|
||||
|
||||
// Check if the extension version has changed and tag it
|
||||
// This prevents changeset from trying to publish the private package
|
||||
runCommand('node', [join(__dirname, 'tag-extension.mjs')]);
|
||||
|
||||
// Run changeset publish for npm packages
|
||||
runCommand('npx', ['changeset', 'publish']);
|
||||
|
||||
console.log('✅ Release process completed!');
|
||||
|
||||
// The extension tag (if created) will trigger the extension-release workflow
|
||||
33
.github/scripts/tag-extension.mjs
vendored
Executable file
33
.github/scripts/tag-extension.mjs
vendored
Executable file
@@ -0,0 +1,33 @@
|
||||
#!/usr/bin/env node
|
||||
import assert from 'node:assert/strict';
|
||||
import { readFileSync } from 'node:fs';
|
||||
import { join, dirname } from 'node:path';
|
||||
import { fileURLToPath } from 'node:url';
|
||||
import { findRootDir, createAndPushTag } from './utils.mjs';
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = dirname(__filename);
|
||||
|
||||
const rootDir = findRootDir(__dirname);
|
||||
|
||||
// Read the extension's package.json
|
||||
const extensionDir = join(rootDir, 'apps', 'extension');
|
||||
const pkgPath = join(extensionDir, 'package.json');
|
||||
|
||||
let pkg;
|
||||
try {
|
||||
const pkgContent = readFileSync(pkgPath, 'utf8');
|
||||
pkg = JSON.parse(pkgContent);
|
||||
} catch (error) {
|
||||
console.error('Failed to read package.json:', error.message);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Ensure we have required fields
|
||||
assert(pkg.name, 'package.json must have a name field');
|
||||
assert(pkg.version, 'package.json must have a version field');
|
||||
|
||||
const tag = `${pkg.name}@${pkg.version}`;
|
||||
|
||||
// Create and push the tag if it doesn't exist
|
||||
createAndPushTag(tag);
|
||||
88
.github/scripts/utils.mjs
vendored
Executable file
88
.github/scripts/utils.mjs
vendored
Executable file
@@ -0,0 +1,88 @@
|
||||
#!/usr/bin/env node
|
||||
import { spawnSync } from 'node:child_process';
|
||||
import { readFileSync } from 'node:fs';
|
||||
import { join, dirname, resolve } from 'node:path';
|
||||
|
||||
// Find the root directory by looking for package.json with task-master-ai
|
||||
export function findRootDir(startDir) {
|
||||
let currentDir = resolve(startDir);
|
||||
while (currentDir !== '/') {
|
||||
const pkgPath = join(currentDir, 'package.json');
|
||||
try {
|
||||
const pkg = JSON.parse(readFileSync(pkgPath, 'utf8'));
|
||||
if (pkg.name === 'task-master-ai' || pkg.repository) {
|
||||
return currentDir;
|
||||
}
|
||||
} catch {}
|
||||
currentDir = dirname(currentDir);
|
||||
}
|
||||
throw new Error('Could not find root directory');
|
||||
}
|
||||
|
||||
// Run a command with proper error handling
|
||||
export function runCommand(command, args = [], options = {}) {
|
||||
console.log(`Running: ${command} ${args.join(' ')}`);
|
||||
const result = spawnSync(command, args, {
|
||||
encoding: 'utf8',
|
||||
stdio: 'inherit',
|
||||
...options
|
||||
});
|
||||
|
||||
if (result.status !== 0) {
|
||||
console.error(`Command failed with exit code ${result.status}`);
|
||||
process.exit(result.status);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// Get package version from a package.json file
|
||||
export function getPackageVersion(packagePath) {
|
||||
try {
|
||||
const pkg = JSON.parse(readFileSync(packagePath, 'utf8'));
|
||||
return pkg.version;
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`Failed to read package version from ${packagePath}:`,
|
||||
error.message
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Check if a git tag exists on remote
|
||||
export function tagExistsOnRemote(tag, remote = 'origin') {
|
||||
const result = spawnSync('git', ['ls-remote', remote, tag], {
|
||||
encoding: 'utf8'
|
||||
});
|
||||
|
||||
return result.status === 0 && result.stdout.trim() !== '';
|
||||
}
|
||||
|
||||
// Create and push a git tag if it doesn't exist
|
||||
export function createAndPushTag(tag, remote = 'origin') {
|
||||
// Check if tag already exists
|
||||
if (tagExistsOnRemote(tag, remote)) {
|
||||
console.log(`Tag ${tag} already exists on remote, skipping`);
|
||||
return false;
|
||||
}
|
||||
|
||||
console.log(`Creating new tag: ${tag}`);
|
||||
|
||||
// Create the tag locally
|
||||
const tagResult = spawnSync('git', ['tag', tag]);
|
||||
if (tagResult.status !== 0) {
|
||||
console.error('Failed to create tag:', tagResult.error || tagResult.stderr);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Push the tag to remote
|
||||
const pushResult = spawnSync('git', ['push', remote, tag]);
|
||||
if (pushResult.status !== 0) {
|
||||
console.error('Failed to push tag:', pushResult.error || pushResult.stderr);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log(`✅ Successfully created and pushed tag: ${tag}`);
|
||||
return true;
|
||||
}
|
||||
31
.github/workflows/auto-close-duplicates.yml
vendored
Normal file
31
.github/workflows/auto-close-duplicates.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Auto-close duplicate issues
|
||||
# description: Auto-closes issues that are duplicates of existing issues
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 9 * * *" # Runs daily at 9 AM UTC
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
auto-close-duplicates:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write # Need write permission to close issues and add comments
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Auto-close duplicate issues
|
||||
run: node .github/scripts/auto-close-duplicates.mjs
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }}
|
||||
GITHUB_REPOSITORY_NAME: ${{ github.event.repository.name }}
|
||||
46
.github/workflows/backfill-duplicate-comments.yml
vendored
Normal file
46
.github/workflows/backfill-duplicate-comments.yml
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
name: Backfill Duplicate Comments
|
||||
# description: Triggers duplicate detection for old issues that don't have duplicate comments
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
days_back:
|
||||
description: "How many days back to look for old issues"
|
||||
required: false
|
||||
default: "90"
|
||||
type: string
|
||||
dry_run:
|
||||
description: "Dry run mode (true to only log what would be done)"
|
||||
required: false
|
||||
default: "true"
|
||||
type: choice
|
||||
options:
|
||||
- "true"
|
||||
- "false"
|
||||
|
||||
jobs:
|
||||
backfill-duplicate-comments:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
issues: read
|
||||
actions: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Backfill duplicate comments
|
||||
run: node .github/scripts/backfill-duplicate-comments.mjs
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }}
|
||||
GITHUB_REPOSITORY_NAME: ${{ github.event.repository.name }}
|
||||
DAYS_BACK: ${{ inputs.days_back }}
|
||||
DRY_RUN: ${{ inputs.dry_run }}
|
||||
128
.github/workflows/ci.yml
vendored
128
.github/workflows/ci.yml
vendored
@@ -6,73 +6,124 @@ on:
|
||||
- main
|
||||
- next
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- next
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
DO_NOT_TRACK: 1
|
||||
NODE_ENV: development
|
||||
|
||||
jobs:
|
||||
setup:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: 'npm'
|
||||
|
||||
- name: Install Dependencies
|
||||
id: install
|
||||
run: npm ci
|
||||
timeout-minutes: 2
|
||||
|
||||
- name: Cache node_modules
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: node_modules
|
||||
key: ${{ runner.os }}-node-modules-${{ hashFiles('**/package-lock.json') }}
|
||||
|
||||
# Fast checks that can run in parallel
|
||||
format-check:
|
||||
needs: setup
|
||||
name: Format Check
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: "npm"
|
||||
|
||||
- name: Restore node_modules
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: node_modules
|
||||
key: ${{ runner.os }}-node-modules-${{ hashFiles('**/package-lock.json') }}
|
||||
- name: Install dependencies
|
||||
run: npm install --frozen-lockfile --prefer-offline
|
||||
timeout-minutes: 5
|
||||
|
||||
- name: Format Check
|
||||
run: npm run format-check
|
||||
env:
|
||||
FORCE_COLOR: 1
|
||||
|
||||
test:
|
||||
needs: setup
|
||||
typecheck:
|
||||
name: Typecheck
|
||||
timeout-minutes: 10
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: "npm"
|
||||
|
||||
- name: Restore node_modules
|
||||
uses: actions/cache@v4
|
||||
- name: Install dependencies
|
||||
run: npm install --frozen-lockfile --prefer-offline
|
||||
timeout-minutes: 5
|
||||
|
||||
- name: Typecheck
|
||||
run: npm run turbo:typecheck
|
||||
env:
|
||||
FORCE_COLOR: 1
|
||||
|
||||
# Build job to ensure everything compiles
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
path: node_modules
|
||||
key: ${{ runner.os }}-node-modules-${{ hashFiles('**/package-lock.json') }}
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: "npm"
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm install --frozen-lockfile --prefer-offline
|
||||
timeout-minutes: 5
|
||||
|
||||
- name: Build
|
||||
run: npm run turbo:build
|
||||
env:
|
||||
NODE_ENV: production
|
||||
FORCE_COLOR: 1
|
||||
TM_PUBLIC_BASE_DOMAIN: ${{ secrets.TM_PUBLIC_BASE_DOMAIN }}
|
||||
TM_PUBLIC_SUPABASE_URL: ${{ secrets.TM_PUBLIC_SUPABASE_URL }}
|
||||
TM_PUBLIC_SUPABASE_ANON_KEY: ${{ secrets.TM_PUBLIC_SUPABASE_ANON_KEY }}
|
||||
|
||||
- name: Upload build artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: build-artifacts
|
||||
path: dist/
|
||||
retention-days: 1
|
||||
|
||||
test:
|
||||
name: Test
|
||||
timeout-minutes: 15
|
||||
runs-on: ubuntu-latest
|
||||
needs: [format-check, typecheck, build]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: "npm"
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm install --frozen-lockfile --prefer-offline
|
||||
timeout-minutes: 5
|
||||
|
||||
- name: Download build artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: build-artifacts
|
||||
path: dist/
|
||||
|
||||
- name: Run Tests
|
||||
run: |
|
||||
@@ -81,7 +132,6 @@ jobs:
|
||||
NODE_ENV: test
|
||||
CI: true
|
||||
FORCE_COLOR: 1
|
||||
timeout-minutes: 10
|
||||
|
||||
- name: Upload Test Results
|
||||
if: always()
|
||||
|
||||
81
.github/workflows/claude-dedupe-issues.yml
vendored
Normal file
81
.github/workflows/claude-dedupe-issues.yml
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
name: Claude Issue Dedupe
|
||||
# description: Automatically dedupe GitHub issues using Claude Code
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
issue_number:
|
||||
description: "Issue number to process for duplicate detection"
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
claude-dedupe-issues:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Run Claude Code slash command
|
||||
uses: anthropics/claude-code-base-action@beta
|
||||
with:
|
||||
prompt: "/dedupe ${{ github.repository }}/issues/${{ github.event.issue.number || inputs.issue_number }}"
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
claude_env: |
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Log duplicate comment event to Statsig
|
||||
if: always()
|
||||
env:
|
||||
STATSIG_API_KEY: ${{ secrets.STATSIG_API_KEY }}
|
||||
run: |
|
||||
ISSUE_NUMBER=${{ github.event.issue.number || inputs.issue_number }}
|
||||
REPO=${{ github.repository }}
|
||||
|
||||
if [ -z "$STATSIG_API_KEY" ]; then
|
||||
echo "STATSIG_API_KEY not found, skipping Statsig logging"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Prepare the event payload
|
||||
EVENT_PAYLOAD=$(jq -n \
|
||||
--arg issue_number "$ISSUE_NUMBER" \
|
||||
--arg repo "$REPO" \
|
||||
--arg triggered_by "${{ github.event_name }}" \
|
||||
'{
|
||||
events: [{
|
||||
eventName: "github_duplicate_comment_added",
|
||||
value: 1,
|
||||
metadata: {
|
||||
repository: $repo,
|
||||
issue_number: ($issue_number | tonumber),
|
||||
triggered_by: $triggered_by,
|
||||
workflow_run_id: "${{ github.run_id }}"
|
||||
},
|
||||
time: (now | floor | tostring)
|
||||
}]
|
||||
}')
|
||||
|
||||
# Send to Statsig API
|
||||
echo "Logging duplicate comment event to Statsig for issue #${ISSUE_NUMBER}"
|
||||
|
||||
RESPONSE=$(curl -s -w "\n%{http_code}" -X POST https://events.statsigapi.net/v1/log_event \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "STATSIG-API-KEY: ${STATSIG_API_KEY}" \
|
||||
-d "$EVENT_PAYLOAD")
|
||||
|
||||
HTTP_CODE=$(echo "$RESPONSE" | tail -n1)
|
||||
BODY=$(echo "$RESPONSE" | head -n-1)
|
||||
|
||||
if [ "$HTTP_CODE" -eq 200 ] || [ "$HTTP_CODE" -eq 202 ]; then
|
||||
echo "Successfully logged duplicate comment event for issue #${ISSUE_NUMBER}"
|
||||
else
|
||||
echo "Failed to log duplicate comment event for issue #${ISSUE_NUMBER}. HTTP ${HTTP_CODE}: ${BODY}"
|
||||
fi
|
||||
57
.github/workflows/claude-docs-trigger.yml
vendored
Normal file
57
.github/workflows/claude-docs-trigger.yml
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
name: Trigger Claude Documentation Update
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- next
|
||||
paths-ignore:
|
||||
- "apps/docs/**"
|
||||
- "*.md"
|
||||
- ".github/workflows/**"
|
||||
|
||||
jobs:
|
||||
trigger-docs-update:
|
||||
# Only run if changes were merged (not direct pushes from bots)
|
||||
if: github.actor != 'github-actions[bot]' && github.actor != 'dependabot[bot]'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
actions: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2 # Need previous commit for comparison
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
run: |
|
||||
echo "Changed files in this push:"
|
||||
git diff --name-only HEAD^ HEAD | tee changed_files.txt
|
||||
|
||||
# Store changed files for Claude to analyze (escaped for JSON)
|
||||
CHANGED_FILES=$(git diff --name-only HEAD^ HEAD | jq -Rs .)
|
||||
echo "changed_files=$CHANGED_FILES" >> $GITHUB_OUTPUT
|
||||
|
||||
# Get the commit message (escaped for JSON)
|
||||
COMMIT_MSG=$(git log -1 --pretty=%B | jq -Rs .)
|
||||
echo "commit_message=$COMMIT_MSG" >> $GITHUB_OUTPUT
|
||||
|
||||
# Get diff for documentation context (escaped for JSON)
|
||||
COMMIT_DIFF=$(git diff HEAD^ HEAD --stat | jq -Rs .)
|
||||
echo "commit_diff=$COMMIT_DIFF" >> $GITHUB_OUTPUT
|
||||
|
||||
# Get commit SHA
|
||||
echo "commit_sha=${{ github.sha }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Trigger Claude workflow
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
# Trigger the Claude docs updater workflow with the change information
|
||||
gh workflow run claude-docs-updater.yml \
|
||||
--ref next \
|
||||
-f commit_sha="${{ steps.changed-files.outputs.commit_sha }}" \
|
||||
-f commit_message=${{ steps.changed-files.outputs.commit_message }} \
|
||||
-f changed_files=${{ steps.changed-files.outputs.changed_files }} \
|
||||
-f commit_diff=${{ steps.changed-files.outputs.commit_diff }}
|
||||
145
.github/workflows/claude-docs-updater.yml
vendored
Normal file
145
.github/workflows/claude-docs-updater.yml
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
name: Claude Documentation Updater
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
commit_sha:
|
||||
description: 'The commit SHA that triggered this update'
|
||||
required: true
|
||||
type: string
|
||||
commit_message:
|
||||
description: 'The commit message'
|
||||
required: true
|
||||
type: string
|
||||
changed_files:
|
||||
description: 'List of changed files'
|
||||
required: true
|
||||
type: string
|
||||
commit_diff:
|
||||
description: 'Diff summary of changes'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
update-docs:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
issues: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: next
|
||||
fetch-depth: 0 # Need full history to checkout specific commit
|
||||
|
||||
- name: Create docs update branch
|
||||
id: create-branch
|
||||
run: |
|
||||
BRANCH_NAME="docs/auto-update-$(date +%Y%m%d-%H%M%S)"
|
||||
git checkout -b $BRANCH_NAME
|
||||
echo "branch_name=$BRANCH_NAME" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Run Claude Code to Update Documentation
|
||||
uses: anthropics/claude-code-action@beta
|
||||
with:
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
timeout_minutes: "30"
|
||||
mode: "agent"
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
experimental_allowed_domains: |
|
||||
.anthropic.com
|
||||
.github.com
|
||||
api.github.com
|
||||
.githubusercontent.com
|
||||
registry.npmjs.org
|
||||
.task-master.dev
|
||||
base_branch: "next"
|
||||
direct_prompt: |
|
||||
You are a documentation specialist. Analyze the recent changes pushed to the 'next' branch and update the documentation accordingly.
|
||||
|
||||
Recent changes:
|
||||
- Commit: ${{ inputs.commit_message }}
|
||||
- Changed files:
|
||||
${{ inputs.changed_files }}
|
||||
|
||||
- Changes summary:
|
||||
${{ inputs.commit_diff }}
|
||||
|
||||
Your task:
|
||||
1. Analyze the changes to understand what functionality was added, modified, or removed
|
||||
2. Check if these changes require documentation updates in apps/docs/
|
||||
3. If documentation updates are needed:
|
||||
- Update relevant documentation files in apps/docs/
|
||||
- Ensure examples are updated if APIs changed
|
||||
- Update any configuration documentation if config options changed
|
||||
- Add new documentation pages if new features were added
|
||||
- Update the changelog or release notes if applicable
|
||||
4. If no documentation updates are needed, skip creating changes
|
||||
|
||||
Guidelines:
|
||||
- Focus only on user-facing changes that need documentation
|
||||
- Keep documentation clear, concise, and helpful
|
||||
- Include code examples where appropriate
|
||||
- Maintain consistent documentation style with existing docs
|
||||
- Don't document internal implementation details unless they affect users
|
||||
- Update navigation/menu files if new pages are added
|
||||
|
||||
Only make changes if the documentation truly needs updating based on the code changes.
|
||||
|
||||
- name: Check if changes were made
|
||||
id: check-changes
|
||||
run: |
|
||||
if git diff --quiet; then
|
||||
echo "has_changes=false" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "has_changes=true" >> $GITHUB_OUTPUT
|
||||
git add -A
|
||||
git config --local user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git config --local user.name "github-actions[bot]"
|
||||
git commit -m "docs: auto-update documentation based on changes in next branch
|
||||
|
||||
This PR was automatically generated to update documentation based on recent changes.
|
||||
|
||||
Original commit: ${{ inputs.commit_message }}
|
||||
|
||||
Co-authored-by: Claude <claude-assistant@anthropic.com>"
|
||||
fi
|
||||
|
||||
- name: Push changes and create PR
|
||||
if: steps.check-changes.outputs.has_changes == 'true'
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
git push origin ${{ steps.create-branch.outputs.branch_name }}
|
||||
|
||||
# Create PR using GitHub CLI
|
||||
gh pr create \
|
||||
--title "docs: update documentation for recent changes" \
|
||||
--body "## 📚 Documentation Update
|
||||
|
||||
This PR automatically updates documentation based on recent changes merged to the \`next\` branch.
|
||||
|
||||
### Original Changes
|
||||
**Commit:** ${{ inputs.commit_sha }}
|
||||
**Message:** ${{ inputs.commit_message }}
|
||||
|
||||
### Changed Files in Original Commit
|
||||
\`\`\`
|
||||
${{ inputs.changed_files }}
|
||||
\`\`\`
|
||||
|
||||
### Documentation Updates
|
||||
This PR includes documentation updates to reflect the changes above. Please review to ensure:
|
||||
- [ ] Documentation accurately reflects the changes
|
||||
- [ ] Examples are correct and working
|
||||
- [ ] No important details are missing
|
||||
- [ ] Style is consistent with existing documentation
|
||||
|
||||
---
|
||||
*This PR was automatically generated by Claude Code GitHub Action*" \
|
||||
--base next \
|
||||
--head ${{ steps.create-branch.outputs.branch_name }} \
|
||||
--label "documentation" \
|
||||
--label "automated"
|
||||
107
.github/workflows/claude-issue-triage.yml
vendored
Normal file
107
.github/workflows/claude-issue-triage.yml
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
name: Claude Issue Triage
|
||||
# description: Automatically triage GitHub issues using Claude Code
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened]
|
||||
|
||||
jobs:
|
||||
triage-issue:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create triage prompt
|
||||
run: |
|
||||
mkdir -p /tmp/claude-prompts
|
||||
cat > /tmp/claude-prompts/triage-prompt.txt << 'EOF'
|
||||
You're an issue triage assistant for GitHub issues. Your task is to analyze the issue and select appropriate labels from the provided list.
|
||||
|
||||
IMPORTANT: Don't post any comments or messages to the issue. Your only action should be to apply labels.
|
||||
|
||||
Issue Information:
|
||||
- REPO: ${{ github.repository }}
|
||||
- ISSUE_NUMBER: ${{ github.event.issue.number }}
|
||||
|
||||
TASK OVERVIEW:
|
||||
|
||||
1. First, fetch the list of labels available in this repository by running: `gh label list`. Run exactly this command with nothing else.
|
||||
|
||||
2. Next, use the GitHub tools to get context about the issue:
|
||||
- You have access to these tools:
|
||||
- mcp__github__get_issue: Use this to retrieve the current issue's details including title, description, and existing labels
|
||||
- mcp__github__get_issue_comments: Use this to read any discussion or additional context provided in the comments
|
||||
- mcp__github__update_issue: Use this to apply labels to the issue (do not use this for commenting)
|
||||
- mcp__github__search_issues: Use this to find similar issues that might provide context for proper categorization and to identify potential duplicate issues
|
||||
- mcp__github__list_issues: Use this to understand patterns in how other issues are labeled
|
||||
- Start by using mcp__github__get_issue to get the issue details
|
||||
|
||||
3. Analyze the issue content, considering:
|
||||
- The issue title and description
|
||||
- The type of issue (bug report, feature request, question, etc.)
|
||||
- Technical areas mentioned
|
||||
- Severity or priority indicators
|
||||
- User impact
|
||||
- Components affected
|
||||
|
||||
4. Select appropriate labels from the available labels list provided above:
|
||||
- Choose labels that accurately reflect the issue's nature
|
||||
- Be specific but comprehensive
|
||||
- Select priority labels if you can determine urgency (high-priority, med-priority, or low-priority)
|
||||
- Consider platform labels (android, ios) if applicable
|
||||
- If you find similar issues using mcp__github__search_issues, consider using a "duplicate" label if appropriate. Only do so if the issue is a duplicate of another OPEN issue.
|
||||
|
||||
5. Apply the selected labels:
|
||||
- Use mcp__github__update_issue to apply your selected labels
|
||||
- DO NOT post any comments explaining your decision
|
||||
- DO NOT communicate directly with users
|
||||
- If no labels are clearly applicable, do not apply any labels
|
||||
|
||||
IMPORTANT GUIDELINES:
|
||||
- Be thorough in your analysis
|
||||
- Only select labels from the provided list above
|
||||
- DO NOT post any comments to the issue
|
||||
- Your ONLY action should be to apply labels using mcp__github__update_issue
|
||||
- It's okay to not add any labels if none are clearly applicable
|
||||
EOF
|
||||
|
||||
- name: Setup GitHub MCP Server
|
||||
run: |
|
||||
mkdir -p /tmp/mcp-config
|
||||
cat > /tmp/mcp-config/mcp-servers.json << 'EOF'
|
||||
{
|
||||
"mcpServers": {
|
||||
"github": {
|
||||
"command": "docker",
|
||||
"args": [
|
||||
"run",
|
||||
"-i",
|
||||
"--rm",
|
||||
"-e",
|
||||
"GITHUB_PERSONAL_ACCESS_TOKEN",
|
||||
"ghcr.io/github/github-mcp-server:sha-7aced2b"
|
||||
],
|
||||
"env": {
|
||||
"GITHUB_PERSONAL_ACCESS_TOKEN": "${{ secrets.GITHUB_TOKEN }}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
- name: Run Claude Code for Issue Triage
|
||||
uses: anthropics/claude-code-base-action@beta
|
||||
with:
|
||||
prompt_file: /tmp/claude-prompts/triage-prompt.txt
|
||||
allowed_tools: "Bash(gh label list),mcp__github__get_issue,mcp__github__get_issue_comments,mcp__github__update_issue,mcp__github__search_issues,mcp__github__list_issues"
|
||||
timeout_minutes: "5"
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
mcp_config: /tmp/mcp-config/mcp-servers.json
|
||||
claude_env: |
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
36
.github/workflows/claude.yml
vendored
Normal file
36
.github/workflows/claude.yml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
name: Claude Code
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
pull_request_review_comment:
|
||||
types: [created]
|
||||
issues:
|
||||
types: [opened, assigned]
|
||||
pull_request_review:
|
||||
types: [submitted]
|
||||
|
||||
jobs:
|
||||
claude:
|
||||
if: |
|
||||
(github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
|
||||
(github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
issues: read
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Run Claude Code
|
||||
id: claude
|
||||
uses: anthropics/claude-code-action@beta
|
||||
with:
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
140
.github/workflows/extension-ci.yml
vendored
Normal file
140
.github/workflows/extension-ci.yml
vendored
Normal file
@@ -0,0 +1,140 @@
|
||||
name: Extension CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- next
|
||||
paths:
|
||||
- 'apps/extension/**'
|
||||
- '.github/workflows/extension-ci.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- next
|
||||
paths:
|
||||
- 'apps/extension/**'
|
||||
- '.github/workflows/extension-ci.yml'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
setup:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Cache node_modules
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
node_modules
|
||||
*/*/node_modules
|
||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-
|
||||
|
||||
- name: Install Monorepo Dependencies
|
||||
run: npm ci
|
||||
timeout-minutes: 5
|
||||
|
||||
typecheck:
|
||||
needs: setup
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
|
||||
- name: Restore node_modules
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
node_modules
|
||||
*/*/node_modules
|
||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-
|
||||
|
||||
- name: Install if cache miss
|
||||
run: npm ci
|
||||
timeout-minutes: 3
|
||||
|
||||
- name: Type Check Extension
|
||||
working-directory: apps/extension
|
||||
run: npm run check-types
|
||||
env:
|
||||
FORCE_COLOR: 1
|
||||
|
||||
build:
|
||||
needs: setup
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
|
||||
- name: Restore node_modules
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
node_modules
|
||||
*/*/node_modules
|
||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-
|
||||
|
||||
- name: Install if cache miss
|
||||
run: npm ci
|
||||
timeout-minutes: 3
|
||||
|
||||
- name: Build Extension
|
||||
working-directory: apps/extension
|
||||
run: npm run build
|
||||
env:
|
||||
FORCE_COLOR: 1
|
||||
|
||||
- name: Package Extension
|
||||
working-directory: apps/extension
|
||||
run: npm run package
|
||||
env:
|
||||
FORCE_COLOR: 1
|
||||
|
||||
- name: Verify Package Contents
|
||||
working-directory: apps/extension
|
||||
run: |
|
||||
echo "Checking vsix-build contents..."
|
||||
ls -la vsix-build/
|
||||
echo "Checking dist contents..."
|
||||
ls -la vsix-build/dist/
|
||||
echo "Checking package.json exists..."
|
||||
test -f vsix-build/package.json
|
||||
|
||||
- name: Create VSIX Package (Test)
|
||||
working-directory: apps/extension/vsix-build
|
||||
run: npx vsce package --no-dependencies
|
||||
env:
|
||||
FORCE_COLOR: 1
|
||||
|
||||
- name: Upload Extension Artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: extension-package
|
||||
path: |
|
||||
apps/extension/vsix-build/*.vsix
|
||||
apps/extension/dist/
|
||||
retention-days: 30
|
||||
|
||||
110
.github/workflows/extension-release.yml
vendored
Normal file
110
.github/workflows/extension-release.yml
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
name: Extension Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "extension@*"
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
concurrency: extension-release-${{ github.ref }}
|
||||
|
||||
jobs:
|
||||
publish-extension:
|
||||
runs-on: ubuntu-latest
|
||||
environment: extension-release
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Cache node_modules
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
node_modules
|
||||
*/*/node_modules
|
||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-
|
||||
|
||||
- name: Install Monorepo Dependencies
|
||||
run: npm ci
|
||||
timeout-minutes: 5
|
||||
|
||||
- name: Type Check Extension
|
||||
working-directory: apps/extension
|
||||
run: npm run check-types
|
||||
env:
|
||||
FORCE_COLOR: 1
|
||||
|
||||
- name: Build Extension
|
||||
working-directory: apps/extension
|
||||
run: npm run build
|
||||
env:
|
||||
FORCE_COLOR: 1
|
||||
|
||||
- name: Package Extension
|
||||
working-directory: apps/extension
|
||||
run: npm run package
|
||||
env:
|
||||
FORCE_COLOR: 1
|
||||
|
||||
- name: Create VSIX Package
|
||||
working-directory: apps/extension/vsix-build
|
||||
run: npx vsce package --no-dependencies
|
||||
env:
|
||||
FORCE_COLOR: 1
|
||||
|
||||
- name: Get VSIX filename
|
||||
id: vsix-info
|
||||
working-directory: apps/extension/vsix-build
|
||||
run: |
|
||||
VSIX_FILE=$(find . -maxdepth 1 -name "*.vsix" -type f | head -n1 | xargs basename)
|
||||
if [ -z "$VSIX_FILE" ]; then
|
||||
echo "Error: No VSIX file found"
|
||||
exit 1
|
||||
fi
|
||||
echo "vsix-filename=$VSIX_FILE" >> "$GITHUB_OUTPUT"
|
||||
echo "Found VSIX: $VSIX_FILE"
|
||||
|
||||
- name: Publish to VS Code Marketplace
|
||||
working-directory: apps/extension/vsix-build
|
||||
run: npx vsce publish --packagePath "${{ steps.vsix-info.outputs.vsix-filename }}"
|
||||
env:
|
||||
VSCE_PAT: ${{ secrets.VSCE_PAT }}
|
||||
FORCE_COLOR: 1
|
||||
|
||||
- name: Install Open VSX CLI
|
||||
run: npm install -g ovsx
|
||||
|
||||
- name: Publish to Open VSX Registry
|
||||
working-directory: apps/extension/vsix-build
|
||||
run: ovsx publish "${{ steps.vsix-info.outputs.vsix-filename }}"
|
||||
env:
|
||||
OVSX_PAT: ${{ secrets.OVSX_PAT }}
|
||||
FORCE_COLOR: 1
|
||||
|
||||
- name: Upload Build Artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: extension-release-${{ github.ref_name }}
|
||||
path: |
|
||||
apps/extension/vsix-build/*.vsix
|
||||
apps/extension/dist/
|
||||
retention-days: 90
|
||||
|
||||
notify-success:
|
||||
needs: publish-extension
|
||||
if: success()
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Success Notification
|
||||
run: |
|
||||
echo "🎉 Extension ${{ github.ref_name }} successfully published!"
|
||||
echo "📦 Available on VS Code Marketplace"
|
||||
echo "🌍 Available on Open VSX Registry"
|
||||
echo "🏷️ GitHub release created: ${{ github.ref_name }}"
|
||||
176
.github/workflows/log-issue-events.yml
vendored
Normal file
176
.github/workflows/log-issue-events.yml
vendored
Normal file
@@ -0,0 +1,176 @@
|
||||
name: Log GitHub Issue Events
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened, closed]
|
||||
|
||||
jobs:
|
||||
log-issue-created:
|
||||
if: github.event.action == 'opened'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
contents: read
|
||||
issues: read
|
||||
|
||||
steps:
|
||||
- name: Log issue creation to Statsig
|
||||
env:
|
||||
STATSIG_API_KEY: ${{ secrets.STATSIG_API_KEY }}
|
||||
run: |
|
||||
ISSUE_NUMBER=${{ github.event.issue.number }}
|
||||
REPO=${{ github.repository }}
|
||||
ISSUE_TITLE=$(echo '${{ github.event.issue.title }}' | sed "s/'/'\\\\''/g")
|
||||
AUTHOR="${{ github.event.issue.user.login }}"
|
||||
CREATED_AT="${{ github.event.issue.created_at }}"
|
||||
|
||||
if [ -z "$STATSIG_API_KEY" ]; then
|
||||
echo "STATSIG_API_KEY not found, skipping Statsig logging"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Prepare the event payload
|
||||
EVENT_PAYLOAD=$(jq -n \
|
||||
--arg issue_number "$ISSUE_NUMBER" \
|
||||
--arg repo "$REPO" \
|
||||
--arg title "$ISSUE_TITLE" \
|
||||
--arg author "$AUTHOR" \
|
||||
--arg created_at "$CREATED_AT" \
|
||||
'{
|
||||
events: [{
|
||||
eventName: "github_issue_created",
|
||||
value: 1,
|
||||
metadata: {
|
||||
repository: $repo,
|
||||
issue_number: ($issue_number | tonumber),
|
||||
issue_title: $title,
|
||||
issue_author: $author,
|
||||
created_at: $created_at
|
||||
},
|
||||
time: (now | floor | tostring)
|
||||
}]
|
||||
}')
|
||||
|
||||
# Send to Statsig API
|
||||
echo "Logging issue creation to Statsig for issue #${ISSUE_NUMBER}"
|
||||
|
||||
RESPONSE=$(curl -s -w "\n%{http_code}" -X POST https://events.statsigapi.net/v1/log_event \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "STATSIG-API-KEY: ${STATSIG_API_KEY}" \
|
||||
-d "$EVENT_PAYLOAD")
|
||||
|
||||
HTTP_CODE=$(echo "$RESPONSE" | tail -n1)
|
||||
BODY=$(echo "$RESPONSE" | head -n-1)
|
||||
|
||||
if [ "$HTTP_CODE" -eq 200 ] || [ "$HTTP_CODE" -eq 202 ]; then
|
||||
echo "Successfully logged issue creation for issue #${ISSUE_NUMBER}"
|
||||
else
|
||||
echo "Failed to log issue creation for issue #${ISSUE_NUMBER}. HTTP ${HTTP_CODE}: ${BODY}"
|
||||
fi
|
||||
|
||||
log-issue-closed:
|
||||
if: github.event.action == 'closed'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
contents: read
|
||||
issues: read
|
||||
|
||||
steps:
|
||||
- name: Log issue closure to Statsig
|
||||
env:
|
||||
STATSIG_API_KEY: ${{ secrets.STATSIG_API_KEY }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
ISSUE_NUMBER=${{ github.event.issue.number }}
|
||||
REPO=${{ github.repository }}
|
||||
ISSUE_TITLE=$(echo '${{ github.event.issue.title }}' | sed "s/'/'\\\\''/g")
|
||||
CLOSED_BY="${{ github.event.issue.closed_by.login }}"
|
||||
CLOSED_AT="${{ github.event.issue.closed_at }}"
|
||||
STATE_REASON="${{ github.event.issue.state_reason }}"
|
||||
|
||||
if [ -z "$STATSIG_API_KEY" ]; then
|
||||
echo "STATSIG_API_KEY not found, skipping Statsig logging"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Get additional issue data via GitHub API
|
||||
echo "Fetching additional issue data for #${ISSUE_NUMBER}"
|
||||
ISSUE_DATA=$(curl -s -H "Authorization: token ${GITHUB_TOKEN}" \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
"https://api.github.com/repos/${REPO}/issues/${ISSUE_NUMBER}")
|
||||
|
||||
COMMENTS_COUNT=$(echo "$ISSUE_DATA" | jq -r '.comments')
|
||||
|
||||
# Get reactions data
|
||||
REACTIONS_DATA=$(curl -s -H "Authorization: token ${GITHUB_TOKEN}" \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
"https://api.github.com/repos/${REPO}/issues/${ISSUE_NUMBER}/reactions")
|
||||
|
||||
REACTIONS_COUNT=$(echo "$REACTIONS_DATA" | jq '. | length')
|
||||
|
||||
# Check if issue was closed automatically (by checking if closed_by is a bot)
|
||||
CLOSED_AUTOMATICALLY="false"
|
||||
if [[ "$CLOSED_BY" == *"[bot]"* ]]; then
|
||||
CLOSED_AUTOMATICALLY="true"
|
||||
fi
|
||||
|
||||
# Check if closed as duplicate by state_reason
|
||||
CLOSED_AS_DUPLICATE="false"
|
||||
if [ "$STATE_REASON" = "duplicate" ]; then
|
||||
CLOSED_AS_DUPLICATE="true"
|
||||
fi
|
||||
|
||||
# Prepare the event payload
|
||||
EVENT_PAYLOAD=$(jq -n \
|
||||
--arg issue_number "$ISSUE_NUMBER" \
|
||||
--arg repo "$REPO" \
|
||||
--arg title "$ISSUE_TITLE" \
|
||||
--arg closed_by "$CLOSED_BY" \
|
||||
--arg closed_at "$CLOSED_AT" \
|
||||
--arg state_reason "$STATE_REASON" \
|
||||
--arg comments_count "$COMMENTS_COUNT" \
|
||||
--arg reactions_count "$REACTIONS_COUNT" \
|
||||
--arg closed_automatically "$CLOSED_AUTOMATICALLY" \
|
||||
--arg closed_as_duplicate "$CLOSED_AS_DUPLICATE" \
|
||||
'{
|
||||
events: [{
|
||||
eventName: "github_issue_closed",
|
||||
value: 1,
|
||||
metadata: {
|
||||
repository: $repo,
|
||||
issue_number: ($issue_number | tonumber),
|
||||
issue_title: $title,
|
||||
closed_by: $closed_by,
|
||||
closed_at: $closed_at,
|
||||
state_reason: $state_reason,
|
||||
comments_count: ($comments_count | tonumber),
|
||||
reactions_count: ($reactions_count | tonumber),
|
||||
closed_automatically: ($closed_automatically | test("true")),
|
||||
closed_as_duplicate: ($closed_as_duplicate | test("true"))
|
||||
},
|
||||
time: (now | floor | tostring)
|
||||
}]
|
||||
}')
|
||||
|
||||
# Send to Statsig API
|
||||
echo "Logging issue closure to Statsig for issue #${ISSUE_NUMBER}"
|
||||
|
||||
RESPONSE=$(curl -s -w "\n%{http_code}" -X POST https://events.statsigapi.net/v1/log_event \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "STATSIG-API-KEY: ${STATSIG_API_KEY}" \
|
||||
-d "$EVENT_PAYLOAD")
|
||||
|
||||
HTTP_CODE=$(echo "$RESPONSE" | tail -n1)
|
||||
BODY=$(echo "$RESPONSE" | head -n-1)
|
||||
|
||||
if [ "$HTTP_CODE" -eq 200 ] || [ "$HTTP_CODE" -eq 202 ]; then
|
||||
echo "Successfully logged issue closure for issue #${ISSUE_NUMBER}"
|
||||
echo "Closed by: $CLOSED_BY"
|
||||
echo "Comments: $COMMENTS_COUNT"
|
||||
echo "Reactions: $REACTIONS_COUNT"
|
||||
echo "Closed automatically: $CLOSED_AUTOMATICALLY"
|
||||
echo "Closed as duplicate: $CLOSED_AS_DUPLICATE"
|
||||
else
|
||||
echo "Failed to log issue closure for issue #${ISSUE_NUMBER}. HTTP ${HTTP_CODE}: ${BODY}"
|
||||
fi
|
||||
55
.github/workflows/pre-release.yml
vendored
55
.github/workflows/pre-release.yml
vendored
@@ -3,11 +3,13 @@ name: Pre-Release (RC)
|
||||
on:
|
||||
workflow_dispatch: # Allows manual triggering from GitHub UI/API
|
||||
|
||||
concurrency: pre-release-${{ github.ref }}
|
||||
|
||||
concurrency: pre-release-${{ github.ref_name }}
|
||||
jobs:
|
||||
rc:
|
||||
runs-on: ubuntu-latest
|
||||
# Only allow pre-releases on non-main branches
|
||||
if: github.ref != 'refs/heads/main'
|
||||
environment: extension-release
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -16,7 +18,7 @@ jobs:
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: 'npm'
|
||||
cache: "npm"
|
||||
|
||||
- name: Cache node_modules
|
||||
uses: actions/cache@v4
|
||||
@@ -32,10 +34,30 @@ jobs:
|
||||
run: npm ci
|
||||
timeout-minutes: 2
|
||||
|
||||
- name: Enter RC mode
|
||||
- name: Enter RC mode (if not already in RC mode)
|
||||
run: |
|
||||
npx changeset pre exit || true
|
||||
npx changeset pre enter rc
|
||||
# Check if we're in pre-release mode with the "rc" tag
|
||||
if [ -f .changeset/pre.json ]; then
|
||||
MODE=$(jq -r '.mode' .changeset/pre.json 2>/dev/null || echo '')
|
||||
TAG=$(jq -r '.tag' .changeset/pre.json 2>/dev/null || echo '')
|
||||
|
||||
if [ "$MODE" = "exit" ]; then
|
||||
echo "Pre-release mode is in 'exit' state, re-entering RC mode..."
|
||||
npx changeset pre enter rc
|
||||
elif [ "$MODE" = "pre" ] && [ "$TAG" != "rc" ]; then
|
||||
echo "In pre-release mode but with wrong tag ($TAG), switching to RC..."
|
||||
npx changeset pre exit
|
||||
npx changeset pre enter rc
|
||||
elif [ "$MODE" = "pre" ] && [ "$TAG" = "rc" ]; then
|
||||
echo "Already in RC pre-release mode"
|
||||
else
|
||||
echo "Unknown mode state: $MODE, entering RC mode..."
|
||||
npx changeset pre enter rc
|
||||
fi
|
||||
else
|
||||
echo "No pre.json found, entering RC mode..."
|
||||
npx changeset pre enter rc
|
||||
fi
|
||||
|
||||
- name: Version RC packages
|
||||
run: npx changeset version
|
||||
@@ -43,20 +65,31 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
|
||||
- name: Run format
|
||||
run: npm run format
|
||||
env:
|
||||
FORCE_COLOR: 1
|
||||
|
||||
- name: Build packages
|
||||
run: npm run turbo:build
|
||||
env:
|
||||
NODE_ENV: production
|
||||
FORCE_COLOR: 1
|
||||
TM_PUBLIC_BASE_DOMAIN: ${{ secrets.TM_PUBLIC_BASE_DOMAIN }}
|
||||
TM_PUBLIC_SUPABASE_URL: ${{ secrets.TM_PUBLIC_SUPABASE_URL }}
|
||||
TM_PUBLIC_SUPABASE_ANON_KEY: ${{ secrets.TM_PUBLIC_SUPABASE_ANON_KEY }}
|
||||
|
||||
- name: Create Release Candidate Pull Request or Publish Release Candidate to npm
|
||||
uses: changesets/action@v1
|
||||
with:
|
||||
publish: npm run release
|
||||
publish: npx changeset publish
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
|
||||
- name: Exit RC mode
|
||||
run: npx changeset pre exit
|
||||
|
||||
- name: Commit & Push changes
|
||||
uses: actions-js/push@master
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
branch: ${{ github.ref }}
|
||||
message: 'chore: rc version bump'
|
||||
message: "chore: rc version bump"
|
||||
|
||||
21
.github/workflows/release-check.yml
vendored
Normal file
21
.github/workflows/release-check.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: Release Check
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
concurrency:
|
||||
group: release-check-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
check-release-mode:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Check release mode
|
||||
run: node ./.github/scripts/check-pre-release-mode.mjs "pull_request"
|
||||
22
.github/workflows/release.yml
vendored
22
.github/workflows/release.yml
vendored
@@ -6,6 +6,11 @@ on:
|
||||
|
||||
concurrency: ${{ github.workflow }}-${{ github.ref }}
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
id-token: write
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -17,7 +22,7 @@ jobs:
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: 'npm'
|
||||
cache: "npm"
|
||||
|
||||
- name: Cache node_modules
|
||||
uses: actions/cache@v4
|
||||
@@ -33,13 +38,22 @@ jobs:
|
||||
run: npm ci
|
||||
timeout-minutes: 2
|
||||
|
||||
- name: Exit pre-release mode (safety check)
|
||||
run: npx changeset pre exit || true
|
||||
- name: Check pre-release mode
|
||||
run: node ./.github/scripts/check-pre-release-mode.mjs "main"
|
||||
|
||||
- name: Build packages
|
||||
run: npm run turbo:build
|
||||
env:
|
||||
NODE_ENV: production
|
||||
FORCE_COLOR: 1
|
||||
TM_PUBLIC_BASE_DOMAIN: ${{ secrets.TM_PUBLIC_BASE_DOMAIN }}
|
||||
TM_PUBLIC_SUPABASE_URL: ${{ secrets.TM_PUBLIC_SUPABASE_URL }}
|
||||
TM_PUBLIC_SUPABASE_ANON_KEY: ${{ secrets.TM_PUBLIC_SUPABASE_ANON_KEY }}
|
||||
|
||||
- name: Create Release Pull Request or Publish to npm
|
||||
uses: changesets/action@v1
|
||||
with:
|
||||
publish: npm run release
|
||||
publish: node ./.github/scripts/release.mjs
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
|
||||
108
.github/workflows/weekly-metrics-discord.yml
vendored
Normal file
108
.github/workflows/weekly-metrics-discord.yml
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
name: Weekly Metrics to Discord
|
||||
# description: Sends weekly metrics summary to Discord channel
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 9 * * 1" # Every Monday at 9 AM
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
issues: read
|
||||
pull-requests: read
|
||||
|
||||
jobs:
|
||||
weekly-metrics:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DISCORD_WEBHOOK: ${{ secrets.DISCORD_METRICS_WEBHOOK }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20'
|
||||
|
||||
- name: Get dates for last 14 days
|
||||
run: |
|
||||
set -Eeuo pipefail
|
||||
# Last 14 days
|
||||
first_day=$(date -d "14 days ago" +%Y-%m-%d)
|
||||
last_day=$(date +%Y-%m-%d)
|
||||
|
||||
echo "first_day=$first_day" >> $GITHUB_ENV
|
||||
echo "last_day=$last_day" >> $GITHUB_ENV
|
||||
echo "week_of=$(date -d '7 days ago' +'Week of %B %d, %Y')" >> $GITHUB_ENV
|
||||
echo "date_range=Past 14 days ($first_day to $last_day)" >> $GITHUB_ENV
|
||||
|
||||
- name: Generate issue metrics
|
||||
uses: github/issue-metrics@v3
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
SEARCH_QUERY: "repo:${{ github.repository }} is:issue created:${{ env.first_day }}..${{ env.last_day }}"
|
||||
HIDE_TIME_TO_ANSWER: true
|
||||
HIDE_LABEL_METRICS: false
|
||||
OUTPUT_FILE: issue_metrics.md
|
||||
|
||||
- name: Generate PR created metrics
|
||||
uses: github/issue-metrics@v3
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
SEARCH_QUERY: "repo:${{ github.repository }} is:pr created:${{ env.first_day }}..${{ env.last_day }}"
|
||||
OUTPUT_FILE: pr_created_metrics.md
|
||||
|
||||
- name: Generate PR merged metrics
|
||||
uses: github/issue-metrics@v3
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
SEARCH_QUERY: "repo:${{ github.repository }} is:pr is:merged merged:${{ env.first_day }}..${{ env.last_day }}"
|
||||
OUTPUT_FILE: pr_merged_metrics.md
|
||||
|
||||
- name: Debug generated metrics
|
||||
run: |
|
||||
set -Eeuo pipefail
|
||||
echo "Listing markdown files in workspace:"
|
||||
ls -la *.md || true
|
||||
for f in issue_metrics.md pr_created_metrics.md pr_merged_metrics.md; do
|
||||
if [ -f "$f" ]; then
|
||||
echo "== $f (first 10 lines) =="
|
||||
head -n 10 "$f"
|
||||
else
|
||||
echo "Missing $f"
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Parse metrics
|
||||
id: metrics
|
||||
run: node .github/scripts/parse-metrics.mjs
|
||||
|
||||
- name: Send to Discord
|
||||
uses: sarisia/actions-status-discord@v1
|
||||
if: env.DISCORD_WEBHOOK != ''
|
||||
with:
|
||||
webhook: ${{ env.DISCORD_WEBHOOK }}
|
||||
status: Success
|
||||
title: "📊 Weekly Metrics Report"
|
||||
description: |
|
||||
**${{ env.week_of }}**
|
||||
*${{ env.date_range }}*
|
||||
|
||||
**🎯 Issues**
|
||||
• Created: ${{ steps.metrics.outputs.issues_created }}
|
||||
• Closed: ${{ steps.metrics.outputs.issues_closed }}
|
||||
• Avg Response Time: ${{ steps.metrics.outputs.issue_avg_first_response }}
|
||||
• Avg Time to Close: ${{ steps.metrics.outputs.issue_avg_time_to_close }}
|
||||
|
||||
**🔀 Pull Requests**
|
||||
• Created: ${{ steps.metrics.outputs.prs_created }}
|
||||
• Merged: ${{ steps.metrics.outputs.prs_merged }}
|
||||
• Avg Response Time: ${{ steps.metrics.outputs.pr_avg_first_response }}
|
||||
• Avg Time to Merge: ${{ steps.metrics.outputs.pr_avg_merge_time }}
|
||||
|
||||
**📈 Visual Analytics**
|
||||
https://repobeats.axiom.co/api/embed/b439f28f0ab5bd7a2da19505355693cd2c55bfd4.svg
|
||||
color: 0x58AFFF
|
||||
username: Task Master Metrics Bot
|
||||
avatar_url: https://raw.githubusercontent.com/eyaltoledano/claude-task-master/main/images/logo.png
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -87,3 +87,13 @@ dev-debug.log
|
||||
*.njsproj
|
||||
*.sln
|
||||
*.sw?
|
||||
|
||||
# VS Code extension test files
|
||||
.vscode-test/
|
||||
apps/extension/.vscode-test/
|
||||
|
||||
# apps/extension
|
||||
apps/extension/vsix-build/
|
||||
|
||||
# turbo
|
||||
.turbo
|
||||
23
.kiro/hooks/tm-code-change-task-tracker.kiro.hook
Normal file
23
.kiro/hooks/tm-code-change-task-tracker.kiro.hook
Normal file
@@ -0,0 +1,23 @@
|
||||
{
|
||||
"enabled": true,
|
||||
"name": "[TM] Code Change Task Tracker",
|
||||
"description": "Track implementation progress by monitoring code changes",
|
||||
"version": "1",
|
||||
"when": {
|
||||
"type": "fileEdited",
|
||||
"patterns": [
|
||||
"**/*.{js,ts,jsx,tsx,py,go,rs,java,cpp,c,h,hpp,cs,rb,php,swift,kt,scala,clj}",
|
||||
"!**/node_modules/**",
|
||||
"!**/vendor/**",
|
||||
"!**/.git/**",
|
||||
"!**/build/**",
|
||||
"!**/dist/**",
|
||||
"!**/target/**",
|
||||
"!**/__pycache__/**"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"type": "askAgent",
|
||||
"prompt": "I just saved a source code file. Please:\n\n1. Check what task is currently 'in-progress' using 'tm list --status=in-progress'\n2. Look at the file I saved and summarize what was changed (considering the programming language and context)\n3. Update the task's notes with: 'tm update-subtask --id=<task_id> --prompt=\"Implemented: <summary_of_changes> in <file_path>\"'\n4. If the changes seem to complete the task based on its description, ask if I want to mark it as done"
|
||||
}
|
||||
}
|
||||
16
.kiro/hooks/tm-complexity-analyzer.kiro.hook
Normal file
16
.kiro/hooks/tm-complexity-analyzer.kiro.hook
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"enabled": false,
|
||||
"name": "[TM] Complexity Analyzer",
|
||||
"description": "Analyze task complexity when new tasks are added",
|
||||
"version": "1",
|
||||
"when": {
|
||||
"type": "fileEdited",
|
||||
"patterns": [
|
||||
".taskmaster/tasks/tasks.json"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"type": "askAgent",
|
||||
"prompt": "New tasks were added to tasks.json. For each new task:\n\n1. Run 'tm analyze-complexity --id=<task_id>'\n2. If complexity score is > 7, automatically expand it: 'tm expand --id=<task_id> --num=5'\n3. Show the complexity analysis results\n4. Suggest task dependencies based on the expanded subtasks"
|
||||
}
|
||||
}
|
||||
13
.kiro/hooks/tm-daily-standup-assistant.kiro.hook
Normal file
13
.kiro/hooks/tm-daily-standup-assistant.kiro.hook
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"enabled": true,
|
||||
"name": "[TM] Daily Standup Assistant",
|
||||
"description": "Morning workflow summary and task selection",
|
||||
"version": "1",
|
||||
"when": {
|
||||
"type": "userTriggered"
|
||||
},
|
||||
"then": {
|
||||
"type": "askAgent",
|
||||
"prompt": "Good morning! Please provide my daily standup summary:\n\n1. Run 'tm list --status=done' and show tasks completed in the last 24 hours\n2. Run 'tm list --status=in-progress' to show current work\n3. Run 'tm next' to suggest the highest priority task to start\n4. Show the dependency graph for upcoming work\n5. Ask which task I'd like to focus on today"
|
||||
}
|
||||
}
|
||||
13
.kiro/hooks/tm-git-commit-task-linker.kiro.hook
Normal file
13
.kiro/hooks/tm-git-commit-task-linker.kiro.hook
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"enabled": true,
|
||||
"name": "[TM] Git Commit Task Linker",
|
||||
"description": "Link commits to tasks for traceability",
|
||||
"version": "1",
|
||||
"when": {
|
||||
"type": "manual"
|
||||
},
|
||||
"then": {
|
||||
"type": "askAgent",
|
||||
"prompt": "I'm about to commit code. Please:\n\n1. Run 'git diff --staged' to see what's being committed\n2. Analyze the changes and suggest which tasks they relate to\n3. Generate a commit message in format: 'feat(task-<id>): <description>'\n4. Update the relevant tasks with a note about this commit\n5. Show the proposed commit message for approval"
|
||||
}
|
||||
}
|
||||
13
.kiro/hooks/tm-pr-readiness-checker.kiro.hook
Normal file
13
.kiro/hooks/tm-pr-readiness-checker.kiro.hook
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"enabled": true,
|
||||
"name": "[TM] PR Readiness Checker",
|
||||
"description": "Validate tasks before creating a pull request",
|
||||
"version": "1",
|
||||
"when": {
|
||||
"type": "manual"
|
||||
},
|
||||
"then": {
|
||||
"type": "askAgent",
|
||||
"prompt": "I'm about to create a PR. Please:\n\n1. List all tasks marked as 'done' in this branch\n2. For each done task, verify:\n - All subtasks are also done\n - Test files exist for new functionality\n - No TODO comments remain related to the task\n3. Generate a PR description listing completed tasks\n4. Suggest a PR title based on the main tasks completed"
|
||||
}
|
||||
}
|
||||
17
.kiro/hooks/tm-task-dependency-auto-progression.kiro.hook
Normal file
17
.kiro/hooks/tm-task-dependency-auto-progression.kiro.hook
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"enabled": true,
|
||||
"name": "[TM] Task Dependency Auto-Progression",
|
||||
"description": "Automatically progress tasks when dependencies are completed",
|
||||
"version": "1",
|
||||
"when": {
|
||||
"type": "fileEdited",
|
||||
"patterns": [
|
||||
".taskmaster/tasks/tasks.json",
|
||||
".taskmaster/tasks/*.json"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"type": "askAgent",
|
||||
"prompt": "Check the tasks.json file for any tasks that just changed status to 'done'. For each completed task:\n\n1. Find all tasks that depend on it\n2. Check if those dependent tasks now have all their dependencies satisfied\n3. If a task has all dependencies met and is still 'pending', use the command 'tm set-status --id=<task_id> --status=in-progress' to start it\n4. Show me which tasks were auto-started and why"
|
||||
}
|
||||
}
|
||||
23
.kiro/hooks/tm-test-success-task-completer.kiro.hook
Normal file
23
.kiro/hooks/tm-test-success-task-completer.kiro.hook
Normal file
@@ -0,0 +1,23 @@
|
||||
{
|
||||
"enabled": true,
|
||||
"name": "[TM] Test Success Task Completer",
|
||||
"description": "Mark tasks as done when their tests pass",
|
||||
"version": "1",
|
||||
"when": {
|
||||
"type": "fileEdited",
|
||||
"patterns": [
|
||||
"**/*test*.{js,ts,jsx,tsx,py,go,java,rb,php,rs,cpp,cs}",
|
||||
"**/*spec*.{js,ts,jsx,tsx,rb}",
|
||||
"**/test_*.py",
|
||||
"**/*_test.go",
|
||||
"**/*Test.java",
|
||||
"**/*Tests.cs",
|
||||
"!**/node_modules/**",
|
||||
"!**/vendor/**"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"type": "askAgent",
|
||||
"prompt": "A test file was just saved. Please:\n\n1. Identify the test framework/language and run the appropriate test command for this file (npm test, pytest, go test, cargo test, dotnet test, mvn test, etc.)\n2. If all tests pass, check which tasks mention this functionality\n3. For any matching tasks that are 'in-progress', ask if the passing tests mean the task is complete\n4. If confirmed, mark the task as done with 'tm set-status --id=<task_id> --status=done'"
|
||||
}
|
||||
}
|
||||
19
.kiro/settings/mcp.json
Normal file
19
.kiro/settings/mcp.json
Normal file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"task-master-ai": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "task-master-ai"],
|
||||
"env": {
|
||||
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
|
||||
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
|
||||
"OPENAI_API_KEY": "YOUR_OPENAI_KEY_HERE",
|
||||
"GOOGLE_API_KEY": "YOUR_GOOGLE_KEY_HERE",
|
||||
"XAI_API_KEY": "YOUR_XAI_KEY_HERE",
|
||||
"OPENROUTER_API_KEY": "YOUR_OPENROUTER_KEY_HERE",
|
||||
"MISTRAL_API_KEY": "YOUR_MISTRAL_KEY_HERE",
|
||||
"AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE",
|
||||
"OLLAMA_API_KEY": "YOUR_OLLAMA_API_KEY_HERE"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
422
.kiro/steering/dev_workflow.md
Normal file
422
.kiro/steering/dev_workflow.md
Normal file
@@ -0,0 +1,422 @@
|
||||
---
|
||||
inclusion: always
|
||||
---
|
||||
|
||||
# Taskmaster Development Workflow
|
||||
|
||||
This guide outlines the standard process for using Taskmaster to manage software development projects. It is written as a set of instructions for you, the AI agent.
|
||||
|
||||
- **Your Default Stance**: For most projects, the user can work directly within the `master` task context. Your initial actions should operate on this default context unless a clear pattern for multi-context work emerges.
|
||||
- **Your Goal**: Your role is to elevate the user's workflow by intelligently introducing advanced features like **Tagged Task Lists** when you detect the appropriate context. Do not force tags on the user; suggest them as a helpful solution to a specific need.
|
||||
|
||||
## The Basic Loop
|
||||
The fundamental development cycle you will facilitate is:
|
||||
1. **`list`**: Show the user what needs to be done.
|
||||
2. **`next`**: Help the user decide what to work on.
|
||||
3. **`show <id>`**: Provide details for a specific task.
|
||||
4. **`expand <id>`**: Break down a complex task into smaller, manageable subtasks.
|
||||
5. **Implement**: The user writes the code and tests.
|
||||
6. **`update-subtask`**: Log progress and findings on behalf of the user.
|
||||
7. **`set-status`**: Mark tasks and subtasks as `done` as work is completed.
|
||||
8. **Repeat**.
|
||||
|
||||
All your standard command executions should operate on the user's current task context, which defaults to `master`.
|
||||
|
||||
---
|
||||
|
||||
## Standard Development Workflow Process
|
||||
|
||||
### Simple Workflow (Default Starting Point)
|
||||
|
||||
For new projects or when users are getting started, operate within the `master` tag context:
|
||||
|
||||
- Start new projects by running `initialize_project` tool / `task-master init` or `parse_prd` / `task-master parse-prd --input='<prd-file.txt>'` (see @`taskmaster.md`) to generate initial tasks.json with tagged structure
|
||||
- Configure rule sets during initialization with `--rules` flag (e.g., `task-master init --rules kiro,windsurf`) or manage them later with `task-master rules add/remove` commands
|
||||
- Begin coding sessions with `get_tasks` / `task-master list` (see @`taskmaster.md`) to see current tasks, status, and IDs
|
||||
- Determine the next task to work on using `next_task` / `task-master next` (see @`taskmaster.md`)
|
||||
- Analyze task complexity with `analyze_project_complexity` / `task-master analyze-complexity --research` (see @`taskmaster.md`) before breaking down tasks
|
||||
- Review complexity report using `complexity_report` / `task-master complexity-report` (see @`taskmaster.md`)
|
||||
- Select tasks based on dependencies (all marked 'done'), priority level, and ID order
|
||||
- View specific task details using `get_task` / `task-master show <id>` (see @`taskmaster.md`) to understand implementation requirements
|
||||
- Break down complex tasks using `expand_task` / `task-master expand --id=<id> --force --research` (see @`taskmaster.md`) with appropriate flags like `--force` (to replace existing subtasks) and `--research`
|
||||
- Implement code following task details, dependencies, and project standards
|
||||
- Mark completed tasks with `set_task_status` / `task-master set-status --id=<id> --status=done` (see @`taskmaster.md`)
|
||||
- Update dependent tasks when implementation differs from original plan using `update` / `task-master update --from=<id> --prompt="..."` or `update_task` / `task-master update-task --id=<id> --prompt="..."` (see @`taskmaster.md`)
|
||||
|
||||
---
|
||||
|
||||
## Leveling Up: Agent-Led Multi-Context Workflows
|
||||
|
||||
While the basic workflow is powerful, your primary opportunity to add value is by identifying when to introduce **Tagged Task Lists**. These patterns are your tools for creating a more organized and efficient development environment for the user, especially if you detect agentic or parallel development happening across the same session.
|
||||
|
||||
**Critical Principle**: Most users should never see a difference in their experience. Only introduce advanced workflows when you detect clear indicators that the project has evolved beyond simple task management.
|
||||
|
||||
### When to Introduce Tags: Your Decision Patterns
|
||||
|
||||
Here are the patterns to look for. When you detect one, you should propose the corresponding workflow to the user.
|
||||
|
||||
#### Pattern 1: Simple Git Feature Branching
|
||||
This is the most common and direct use case for tags.
|
||||
|
||||
- **Trigger**: The user creates a new git branch (e.g., `git checkout -b feature/user-auth`).
|
||||
- **Your Action**: Propose creating a new tag that mirrors the branch name to isolate the feature's tasks from `master`.
|
||||
- **Your Suggested Prompt**: *"I see you've created a new branch named 'feature/user-auth'. To keep all related tasks neatly organized and separate from your main list, I can create a corresponding task tag for you. This helps prevent merge conflicts in your `tasks.json` file later. Shall I create the 'feature-user-auth' tag?"*
|
||||
- **Tool to Use**: `task-master add-tag --from-branch`
|
||||
|
||||
#### Pattern 2: Team Collaboration
|
||||
- **Trigger**: The user mentions working with teammates (e.g., "My teammate Alice is handling the database schema," or "I need to review Bob's work on the API.").
|
||||
- **Your Action**: Suggest creating a separate tag for the user's work to prevent conflicts with shared master context.
|
||||
- **Your Suggested Prompt**: *"Since you're working with Alice, I can create a separate task context for your work to avoid conflicts. This way, Alice can continue working with the master list while you have your own isolated context. When you're ready to merge your work, we can coordinate the tasks back to master. Shall I create a tag for your current work?"*
|
||||
- **Tool to Use**: `task-master add-tag my-work --copy-from-current --description="My tasks while collaborating with Alice"`
|
||||
|
||||
#### Pattern 3: Experiments or Risky Refactors
|
||||
- **Trigger**: The user wants to try something that might not be kept (e.g., "I want to experiment with switching our state management library," or "Let's refactor the old API module, but I want to keep the current tasks as a reference.").
|
||||
- **Your Action**: Propose creating a sandboxed tag for the experimental work.
|
||||
- **Your Suggested Prompt**: *"This sounds like a great experiment. To keep these new tasks separate from our main plan, I can create a temporary 'experiment-zustand' tag for this work. If we decide not to proceed, we can simply delete the tag without affecting the main task list. Sound good?"*
|
||||
- **Tool to Use**: `task-master add-tag experiment-zustand --description="Exploring Zustand migration"`
|
||||
|
||||
#### Pattern 4: Large Feature Initiatives (PRD-Driven)
|
||||
This is a more structured approach for significant new features or epics.
|
||||
|
||||
- **Trigger**: The user describes a large, multi-step feature that would benefit from a formal plan.
|
||||
- **Your Action**: Propose a comprehensive, PRD-driven workflow.
|
||||
- **Your Suggested Prompt**: *"This sounds like a significant new feature. To manage this effectively, I suggest we create a dedicated task context for it. Here's the plan: I'll create a new tag called 'feature-xyz', then we can draft a Product Requirements Document (PRD) together to scope the work. Once the PRD is ready, I'll automatically generate all the necessary tasks within that new tag. How does that sound?"*
|
||||
- **Your Implementation Flow**:
|
||||
1. **Create an empty tag**: `task-master add-tag feature-xyz --description "Tasks for the new XYZ feature"`. You can also start by creating a git branch if applicable, and then create the tag from that branch.
|
||||
2. **Collaborate & Create PRD**: Work with the user to create a detailed PRD file (e.g., `.taskmaster/docs/feature-xyz-prd.txt`).
|
||||
3. **Parse PRD into the new tag**: `task-master parse-prd .taskmaster/docs/feature-xyz-prd.txt --tag feature-xyz`
|
||||
4. **Prepare the new task list**: Follow up by suggesting `analyze-complexity` and `expand-all` for the newly created tasks within the `feature-xyz` tag.
|
||||
|
||||
#### Pattern 5: Version-Based Development
|
||||
Tailor your approach based on the project maturity indicated by tag names.
|
||||
|
||||
- **Prototype/MVP Tags** (`prototype`, `mvp`, `poc`, `v0.x`):
|
||||
- **Your Approach**: Focus on speed and functionality over perfection
|
||||
- **Task Generation**: Create tasks that emphasize "get it working" over "get it perfect"
|
||||
- **Complexity Level**: Lower complexity, fewer subtasks, more direct implementation paths
|
||||
- **Research Prompts**: Include context like "This is a prototype - prioritize speed and basic functionality over optimization"
|
||||
- **Example Prompt Addition**: *"Since this is for the MVP, I'll focus on tasks that get core functionality working quickly rather than over-engineering."*
|
||||
|
||||
- **Production/Mature Tags** (`v1.0+`, `production`, `stable`):
|
||||
- **Your Approach**: Emphasize robustness, testing, and maintainability
|
||||
- **Task Generation**: Include comprehensive error handling, testing, documentation, and optimization
|
||||
- **Complexity Level**: Higher complexity, more detailed subtasks, thorough implementation paths
|
||||
- **Research Prompts**: Include context like "This is for production - prioritize reliability, performance, and maintainability"
|
||||
- **Example Prompt Addition**: *"Since this is for production, I'll ensure tasks include proper error handling, testing, and documentation."*
|
||||
|
||||
### Advanced Workflow (Tag-Based & PRD-Driven)
|
||||
|
||||
**When to Transition**: Recognize when the project has evolved (or has initiated a project which existing code) beyond simple task management. Look for these indicators:
|
||||
- User mentions teammates or collaboration needs
|
||||
- Project has grown to 15+ tasks with mixed priorities
|
||||
- User creates feature branches or mentions major initiatives
|
||||
- User initializes Taskmaster on an existing, complex codebase
|
||||
- User describes large features that would benefit from dedicated planning
|
||||
|
||||
**Your Role in Transition**: Guide the user to a more sophisticated workflow that leverages tags for organization and PRDs for comprehensive planning.
|
||||
|
||||
#### Master List Strategy (High-Value Focus)
|
||||
Once you transition to tag-based workflows, the `master` tag should ideally contain only:
|
||||
- **High-level deliverables** that provide significant business value
|
||||
- **Major milestones** and epic-level features
|
||||
- **Critical infrastructure** work that affects the entire project
|
||||
- **Release-blocking** items
|
||||
|
||||
**What NOT to put in master**:
|
||||
- Detailed implementation subtasks (these go in feature-specific tags' parent tasks)
|
||||
- Refactoring work (create dedicated tags like `refactor-auth`)
|
||||
- Experimental features (use `experiment-*` tags)
|
||||
- Team member-specific tasks (use person-specific tags)
|
||||
|
||||
#### PRD-Driven Feature Development
|
||||
|
||||
**For New Major Features**:
|
||||
1. **Identify the Initiative**: When user describes a significant feature
|
||||
2. **Create Dedicated Tag**: `add_tag feature-[name] --description="[Feature description]"`
|
||||
3. **Collaborative PRD Creation**: Work with user to create comprehensive PRD in `.taskmaster/docs/feature-[name]-prd.txt`
|
||||
4. **Parse & Prepare**:
|
||||
- `parse_prd .taskmaster/docs/feature-[name]-prd.txt --tag=feature-[name]`
|
||||
- `analyze_project_complexity --tag=feature-[name] --research`
|
||||
- `expand_all --tag=feature-[name] --research`
|
||||
5. **Add Master Reference**: Create a high-level task in `master` that references the feature tag
|
||||
|
||||
**For Existing Codebase Analysis**:
|
||||
When users initialize Taskmaster on existing projects:
|
||||
1. **Codebase Discovery**: Use your native tools for producing deep context about the code base. You may use `research` tool with `--tree` and `--files` to collect up to date information using the existing architecture as context.
|
||||
2. **Collaborative Assessment**: Work with user to identify improvement areas, technical debt, or new features
|
||||
3. **Strategic PRD Creation**: Co-author PRDs that include:
|
||||
- Current state analysis (based on your codebase research)
|
||||
- Proposed improvements or new features
|
||||
- Implementation strategy considering existing code
|
||||
4. **Tag-Based Organization**: Parse PRDs into appropriate tags (`refactor-api`, `feature-dashboard`, `tech-debt`, etc.)
|
||||
5. **Master List Curation**: Keep only the most valuable initiatives in master
|
||||
|
||||
The parse-prd's `--append` flag enables the user to parse multiple PRDs within tags or across tags. PRDs should be focused and the number of tasks they are parsed into should be strategically chosen relative to the PRD's complexity and level of detail.
|
||||
|
||||
### Workflow Transition Examples
|
||||
|
||||
**Example 1: Simple → Team-Based**
|
||||
```
|
||||
User: "Alice is going to help with the API work"
|
||||
Your Response: "Great! To avoid conflicts, I'll create a separate task context for your work. Alice can continue with the master list while you work in your own context. When you're ready to merge, we can coordinate the tasks back together."
|
||||
Action: add_tag my-api-work --copy-from-current --description="My API tasks while collaborating with Alice"
|
||||
```
|
||||
|
||||
**Example 2: Simple → PRD-Driven**
|
||||
```
|
||||
User: "I want to add a complete user dashboard with analytics, user management, and reporting"
|
||||
Your Response: "This sounds like a major feature that would benefit from detailed planning. Let me create a dedicated context for this work and we can draft a PRD together to ensure we capture all requirements."
|
||||
Actions:
|
||||
1. add_tag feature-dashboard --description="User dashboard with analytics and management"
|
||||
2. Collaborate on PRD creation
|
||||
3. parse_prd dashboard-prd.txt --tag=feature-dashboard
|
||||
4. Add high-level "User Dashboard" task to master
|
||||
```
|
||||
|
||||
**Example 3: Existing Project → Strategic Planning**
|
||||
```
|
||||
User: "I just initialized Taskmaster on my existing React app. It's getting messy and I want to improve it."
|
||||
Your Response: "Let me research your codebase to understand the current architecture, then we can create a strategic plan for improvements."
|
||||
Actions:
|
||||
1. research "Current React app architecture and improvement opportunities" --tree --files=src/
|
||||
2. Collaborate on improvement PRD based on findings
|
||||
3. Create tags for different improvement areas (refactor-components, improve-state-management, etc.)
|
||||
4. Keep only major improvement initiatives in master
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Primary Interaction: MCP Server vs. CLI
|
||||
|
||||
Taskmaster offers two primary ways to interact:
|
||||
|
||||
1. **MCP Server (Recommended for Integrated Tools)**:
|
||||
- For AI agents and integrated development environments (like Kiro), interacting via the **MCP server is the preferred method**.
|
||||
- The MCP server exposes Taskmaster functionality through a set of tools (e.g., `get_tasks`, `add_subtask`).
|
||||
- This method offers better performance, structured data exchange, and richer error handling compared to CLI parsing.
|
||||
- Refer to @`mcp.md` for details on the MCP architecture and available tools.
|
||||
- A comprehensive list and description of MCP tools and their corresponding CLI commands can be found in @`taskmaster.md`.
|
||||
- **Restart the MCP server** if core logic in `scripts/modules` or MCP tool/direct function definitions change.
|
||||
- **Note**: MCP tools fully support tagged task lists with complete tag management capabilities.
|
||||
|
||||
2. **`task-master` CLI (For Users & Fallback)**:
|
||||
- The global `task-master` command provides a user-friendly interface for direct terminal interaction.
|
||||
- It can also serve as a fallback if the MCP server is inaccessible or a specific function isn't exposed via MCP.
|
||||
- Install globally with `npm install -g task-master-ai` or use locally via `npx task-master-ai ...`.
|
||||
- The CLI commands often mirror the MCP tools (e.g., `task-master list` corresponds to `get_tasks`).
|
||||
- Refer to @`taskmaster.md` for a detailed command reference.
|
||||
- **Tagged Task Lists**: CLI fully supports the new tagged system with seamless migration.
|
||||
|
||||
## How the Tag System Works (For Your Reference)
|
||||
|
||||
- **Data Structure**: Tasks are organized into separate contexts (tags) like "master", "feature-branch", or "v2.0".
|
||||
- **Silent Migration**: Existing projects automatically migrate to use a "master" tag with zero disruption.
|
||||
- **Context Isolation**: Tasks in different tags are completely separate. Changes in one tag do not affect any other tag.
|
||||
- **Manual Control**: The user is always in control. There is no automatic switching. You facilitate switching by using `use-tag <name>`.
|
||||
- **Full CLI & MCP Support**: All tag management commands are available through both the CLI and MCP tools for you to use. Refer to @`taskmaster.md` for a full command list.
|
||||
|
||||
---
|
||||
|
||||
## Task Complexity Analysis
|
||||
|
||||
- Run `analyze_project_complexity` / `task-master analyze-complexity --research` (see @`taskmaster.md`) for comprehensive analysis
|
||||
- Review complexity report via `complexity_report` / `task-master complexity-report` (see @`taskmaster.md`) for a formatted, readable version.
|
||||
- Focus on tasks with highest complexity scores (8-10) for detailed breakdown
|
||||
- Use analysis results to determine appropriate subtask allocation
|
||||
- Note that reports are automatically used by the `expand_task` tool/command
|
||||
|
||||
## Task Breakdown Process
|
||||
|
||||
- Use `expand_task` / `task-master expand --id=<id>`. It automatically uses the complexity report if found, otherwise generates default number of subtasks.
|
||||
- Use `--num=<number>` to specify an explicit number of subtasks, overriding defaults or complexity report recommendations.
|
||||
- Add `--research` flag to leverage Perplexity AI for research-backed expansion.
|
||||
- Add `--force` flag to clear existing subtasks before generating new ones (default is to append).
|
||||
- Use `--prompt="<context>"` to provide additional context when needed.
|
||||
- Review and adjust generated subtasks as necessary.
|
||||
- Use `expand_all` tool or `task-master expand --all` to expand multiple pending tasks at once, respecting flags like `--force` and `--research`.
|
||||
- If subtasks need complete replacement (regardless of the `--force` flag on `expand`), clear them first with `clear_subtasks` / `task-master clear-subtasks --id=<id>`.
|
||||
|
||||
## Implementation Drift Handling
|
||||
|
||||
- When implementation differs significantly from planned approach
|
||||
- When future tasks need modification due to current implementation choices
|
||||
- When new dependencies or requirements emerge
|
||||
- Use `update` / `task-master update --from=<futureTaskId> --prompt='<explanation>\nUpdate context...' --research` to update multiple future tasks.
|
||||
- Use `update_task` / `task-master update-task --id=<taskId> --prompt='<explanation>\nUpdate context...' --research` to update a single specific task.
|
||||
|
||||
## Task Status Management
|
||||
|
||||
- Use 'pending' for tasks ready to be worked on
|
||||
- Use 'done' for completed and verified tasks
|
||||
- Use 'deferred' for postponed tasks
|
||||
- Add custom status values as needed for project-specific workflows
|
||||
|
||||
## Task Structure Fields
|
||||
|
||||
- **id**: Unique identifier for the task (Example: `1`, `1.1`)
|
||||
- **title**: Brief, descriptive title (Example: `"Initialize Repo"`)
|
||||
- **description**: Concise summary of what the task involves (Example: `"Create a new repository, set up initial structure."`)
|
||||
- **status**: Current state of the task (Example: `"pending"`, `"done"`, `"deferred"`)
|
||||
- **dependencies**: IDs of prerequisite tasks (Example: `[1, 2.1]`)
|
||||
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending)
|
||||
- This helps quickly identify which prerequisite tasks are blocking work
|
||||
- **priority**: Importance level (Example: `"high"`, `"medium"`, `"low"`)
|
||||
- **details**: In-depth implementation instructions (Example: `"Use GitHub client ID/secret, handle callback, set session token."`)
|
||||
- **testStrategy**: Verification approach (Example: `"Deploy and call endpoint to confirm 'Hello World' response."`)
|
||||
- **subtasks**: List of smaller, more specific tasks (Example: `[{"id": 1, "title": "Configure OAuth", ...}]`)
|
||||
- Refer to task structure details (previously linked to `tasks.md`).
|
||||
|
||||
## Configuration Management (Updated)
|
||||
|
||||
Taskmaster configuration is managed through two main mechanisms:
|
||||
|
||||
1. **`.taskmaster/config.json` File (Primary):**
|
||||
* Located in the project root directory.
|
||||
* Stores most configuration settings: AI model selections (main, research, fallback), parameters (max tokens, temperature), logging level, default subtasks/priority, project name, etc.
|
||||
* **Tagged System Settings**: Includes `global.defaultTag` (defaults to "master") and `tags` section for tag management configuration.
|
||||
* **Managed via `task-master models --setup` command.** Do not edit manually unless you know what you are doing.
|
||||
* **View/Set specific models via `task-master models` command or `models` MCP tool.**
|
||||
* Created automatically when you run `task-master models --setup` for the first time or during tagged system migration.
|
||||
|
||||
2. **Environment Variables (`.env` / `mcp.json`):**
|
||||
* Used **only** for sensitive API keys and specific endpoint URLs.
|
||||
* Place API keys (one per provider) in a `.env` file in the project root for CLI usage.
|
||||
* For MCP/Kiro integration, configure these keys in the `env` section of `.kiro/mcp.json`.
|
||||
* Available keys/variables: See `assets/env.example` or the Configuration section in the command reference (previously linked to `taskmaster.md`).
|
||||
|
||||
3. **`.taskmaster/state.json` File (Tagged System State):**
|
||||
* Tracks current tag context and migration status.
|
||||
* Automatically created during tagged system migration.
|
||||
* Contains: `currentTag`, `lastSwitched`, `migrationNoticeShown`.
|
||||
|
||||
**Important:** Non-API key settings (like model selections, `MAX_TOKENS`, `TASKMASTER_LOG_LEVEL`) are **no longer configured via environment variables**. Use the `task-master models` command (or `--setup` for interactive configuration) or the `models` MCP tool.
|
||||
**If AI commands FAIL in MCP** verify that the API key for the selected provider is present in the `env` section of `.kiro/mcp.json`.
|
||||
**If AI commands FAIL in CLI** verify that the API key for the selected provider is present in the `.env` file in the root of the project.
|
||||
|
||||
## Rules Management
|
||||
|
||||
Taskmaster supports multiple AI coding assistant rule sets that can be configured during project initialization or managed afterward:
|
||||
|
||||
- **Available Profiles**: Claude Code, Cline, Codex, Kiro, Roo Code, Trae, Windsurf (claude, cline, codex, kiro, roo, trae, windsurf)
|
||||
- **During Initialization**: Use `task-master init --rules kiro,windsurf` to specify which rule sets to include
|
||||
- **After Initialization**: Use `task-master rules add <profiles>` or `task-master rules remove <profiles>` to manage rule sets
|
||||
- **Interactive Setup**: Use `task-master rules setup` to launch an interactive prompt for selecting rule profiles
|
||||
- **Default Behavior**: If no `--rules` flag is specified during initialization, all available rule profiles are included
|
||||
- **Rule Structure**: Each profile creates its own directory (e.g., `.kiro/steering`, `.roo/rules`) with appropriate configuration files
|
||||
|
||||
## Determining the Next Task
|
||||
|
||||
- Run `next_task` / `task-master next` to show the next task to work on.
|
||||
- The command identifies tasks with all dependencies satisfied
|
||||
- Tasks are prioritized by priority level, dependency count, and ID
|
||||
- The command shows comprehensive task information including:
|
||||
- Basic task details and description
|
||||
- Implementation details
|
||||
- Subtasks (if they exist)
|
||||
- Contextual suggested actions
|
||||
- Recommended before starting any new development work
|
||||
- Respects your project's dependency structure
|
||||
- Ensures tasks are completed in the appropriate sequence
|
||||
- Provides ready-to-use commands for common task actions
|
||||
|
||||
## Viewing Specific Task Details
|
||||
|
||||
- Run `get_task` / `task-master show <id>` to view a specific task.
|
||||
- Use dot notation for subtasks: `task-master show 1.2` (shows subtask 2 of task 1)
|
||||
- Displays comprehensive information similar to the next command, but for a specific task
|
||||
- For parent tasks, shows all subtasks and their current status
|
||||
- For subtasks, shows parent task information and relationship
|
||||
- Provides contextual suggested actions appropriate for the specific task
|
||||
- Useful for examining task details before implementation or checking status
|
||||
|
||||
## Managing Task Dependencies
|
||||
|
||||
- Use `add_dependency` / `task-master add-dependency --id=<id> --depends-on=<id>` to add a dependency.
|
||||
- Use `remove_dependency` / `task-master remove-dependency --id=<id> --depends-on=<id>` to remove a dependency.
|
||||
- The system prevents circular dependencies and duplicate dependency entries
|
||||
- Dependencies are checked for existence before being added or removed
|
||||
- Task files are automatically regenerated after dependency changes
|
||||
- Dependencies are visualized with status indicators in task listings and files
|
||||
|
||||
## Task Reorganization
|
||||
|
||||
- Use `move_task` / `task-master move --from=<id> --to=<id>` to move tasks or subtasks within the hierarchy
|
||||
- This command supports several use cases:
|
||||
- Moving a standalone task to become a subtask (e.g., `--from=5 --to=7`)
|
||||
- Moving a subtask to become a standalone task (e.g., `--from=5.2 --to=7`)
|
||||
- Moving a subtask to a different parent (e.g., `--from=5.2 --to=7.3`)
|
||||
- Reordering subtasks within the same parent (e.g., `--from=5.2 --to=5.4`)
|
||||
- Moving a task to a new, non-existent ID position (e.g., `--from=5 --to=25`)
|
||||
- Moving multiple tasks at once using comma-separated IDs (e.g., `--from=10,11,12 --to=16,17,18`)
|
||||
- The system includes validation to prevent data loss:
|
||||
- Allows moving to non-existent IDs by creating placeholder tasks
|
||||
- Prevents moving to existing task IDs that have content (to avoid overwriting)
|
||||
- Validates source tasks exist before attempting to move them
|
||||
- The system maintains proper parent-child relationships and dependency integrity
|
||||
- Task files are automatically regenerated after the move operation
|
||||
- This provides greater flexibility in organizing and refining your task structure as project understanding evolves
|
||||
- This is especially useful when dealing with potential merge conflicts arising from teams creating tasks on separate branches. Solve these conflicts very easily by moving your tasks and keeping theirs.
|
||||
|
||||
## Iterative Subtask Implementation
|
||||
|
||||
Once a task has been broken down into subtasks using `expand_task` or similar methods, follow this iterative process for implementation:
|
||||
|
||||
1. **Understand the Goal (Preparation):**
|
||||
* Use `get_task` / `task-master show <subtaskId>` (see @`taskmaster.md`) to thoroughly understand the specific goals and requirements of the subtask.
|
||||
|
||||
2. **Initial Exploration & Planning (Iteration 1):**
|
||||
* This is the first attempt at creating a concrete implementation plan.
|
||||
* Explore the codebase to identify the precise files, functions, and even specific lines of code that will need modification.
|
||||
* Determine the intended code changes (diffs) and their locations.
|
||||
* Gather *all* relevant details from this exploration phase.
|
||||
|
||||
3. **Log the Plan:**
|
||||
* Run `update_subtask` / `task-master update-subtask --id=<subtaskId> --prompt='<detailed plan>'`.
|
||||
* Provide the *complete and detailed* findings from the exploration phase in the prompt. Include file paths, line numbers, proposed diffs, reasoning, and any potential challenges identified. Do not omit details. The goal is to create a rich, timestamped log within the subtask's `details`.
|
||||
|
||||
4. **Verify the Plan:**
|
||||
* Run `get_task` / `task-master show <subtaskId>` again to confirm that the detailed implementation plan has been successfully appended to the subtask's details.
|
||||
|
||||
5. **Begin Implementation:**
|
||||
* Set the subtask status using `set_task_status` / `task-master set-status --id=<subtaskId> --status=in-progress`.
|
||||
* Start coding based on the logged plan.
|
||||
|
||||
6. **Refine and Log Progress (Iteration 2+):**
|
||||
* As implementation progresses, you will encounter challenges, discover nuances, or confirm successful approaches.
|
||||
* **Before appending new information**: Briefly review the *existing* details logged in the subtask (using `get_task` or recalling from context) to ensure the update adds fresh insights and avoids redundancy.
|
||||
* **Regularly** use `update_subtask` / `task-master update-subtask --id=<subtaskId> --prompt='<update details>\n- What worked...\n- What didn't work...'` to append new findings.
|
||||
* **Crucially, log:**
|
||||
* What worked ("fundamental truths" discovered).
|
||||
* What didn't work and why (to avoid repeating mistakes).
|
||||
* Specific code snippets or configurations that were successful.
|
||||
* Decisions made, especially if confirmed with user input.
|
||||
* Any deviations from the initial plan and the reasoning.
|
||||
* The objective is to continuously enrich the subtask's details, creating a log of the implementation journey that helps the AI (and human developers) learn, adapt, and avoid repeating errors.
|
||||
|
||||
7. **Review & Update Rules (Post-Implementation):**
|
||||
* Once the implementation for the subtask is functionally complete, review all code changes and the relevant chat history.
|
||||
* Identify any new or modified code patterns, conventions, or best practices established during the implementation.
|
||||
* Create new or update existing rules following internal guidelines (previously linked to `cursor_rules.md` and `self_improve.md`).
|
||||
|
||||
8. **Mark Task Complete:**
|
||||
* After verifying the implementation and updating any necessary rules, mark the subtask as completed: `set_task_status` / `task-master set-status --id=<subtaskId> --status=done`.
|
||||
|
||||
9. **Commit Changes (If using Git):**
|
||||
* Stage the relevant code changes and any updated/new rule files (`git add .`).
|
||||
* Craft a comprehensive Git commit message summarizing the work done for the subtask, including both code implementation and any rule adjustments.
|
||||
* Execute the commit command directly in the terminal (e.g., `git commit -m 'feat(module): Implement feature X for subtask <subtaskId>\n\n- Details about changes...\n- Updated rule Y for pattern Z'`).
|
||||
* Consider if a Changeset is needed according to internal versioning guidelines (previously linked to `changeset.md`). If so, run `npm run changeset`, stage the generated file, and amend the commit or create a new one.
|
||||
|
||||
10. **Proceed to Next Subtask:**
|
||||
* Identify the next subtask (e.g., using `next_task` / `task-master next`).
|
||||
|
||||
## Code Analysis & Refactoring Techniques
|
||||
|
||||
- **Top-Level Function Search**:
|
||||
- Useful for understanding module structure or planning refactors.
|
||||
- Use grep/ripgrep to find exported functions/constants:
|
||||
`rg "export (async function|function|const) \w+"` or similar patterns.
|
||||
- Can help compare functions between files during migrations or identify potential naming conflicts.
|
||||
|
||||
---
|
||||
*This workflow provides a general guideline. Adapt it based on your specific project needs and team practices.*
|
||||
51
.kiro/steering/kiro_rules.md
Normal file
51
.kiro/steering/kiro_rules.md
Normal file
@@ -0,0 +1,51 @@
|
||||
---
|
||||
inclusion: always
|
||||
---
|
||||
|
||||
- **Required Rule Structure:**
|
||||
```markdown
|
||||
---
|
||||
description: Clear, one-line description of what the rule enforces
|
||||
globs: path/to/files/*.ext, other/path/**/*
|
||||
alwaysApply: boolean
|
||||
---
|
||||
|
||||
- **Main Points in Bold**
|
||||
- Sub-points with details
|
||||
- Examples and explanations
|
||||
```
|
||||
|
||||
- **File References:**
|
||||
- Use `[filename](mdc:path/to/file)` ([filename](mdc:filename)) to reference files
|
||||
- Example: [prisma.md](.kiro/steering/prisma.md) for rule references
|
||||
- Example: [schema.prisma](mdc:prisma/schema.prisma) for code references
|
||||
|
||||
- **Code Examples:**
|
||||
- Use language-specific code blocks
|
||||
```typescript
|
||||
// ✅ DO: Show good examples
|
||||
const goodExample = true;
|
||||
|
||||
// ❌ DON'T: Show anti-patterns
|
||||
const badExample = false;
|
||||
```
|
||||
|
||||
- **Rule Content Guidelines:**
|
||||
- Start with high-level overview
|
||||
- Include specific, actionable requirements
|
||||
- Show examples of correct implementation
|
||||
- Reference existing code when possible
|
||||
- Keep rules DRY by referencing other rules
|
||||
|
||||
- **Rule Maintenance:**
|
||||
- Update rules when new patterns emerge
|
||||
- Add examples from actual codebase
|
||||
- Remove outdated patterns
|
||||
- Cross-reference related rules
|
||||
|
||||
- **Best Practices:**
|
||||
- Use bullet points for clarity
|
||||
- Keep descriptions concise
|
||||
- Include both DO and DON'T examples
|
||||
- Reference actual code over theoretical examples
|
||||
- Use consistent formatting across rules
|
||||
70
.kiro/steering/self_improve.md
Normal file
70
.kiro/steering/self_improve.md
Normal file
@@ -0,0 +1,70 @@
|
||||
---
|
||||
inclusion: always
|
||||
---
|
||||
|
||||
- **Rule Improvement Triggers:**
|
||||
- New code patterns not covered by existing rules
|
||||
- Repeated similar implementations across files
|
||||
- Common error patterns that could be prevented
|
||||
- New libraries or tools being used consistently
|
||||
- Emerging best practices in the codebase
|
||||
|
||||
- **Analysis Process:**
|
||||
- Compare new code with existing rules
|
||||
- Identify patterns that should be standardized
|
||||
- Look for references to external documentation
|
||||
- Check for consistent error handling patterns
|
||||
- Monitor test patterns and coverage
|
||||
|
||||
- **Rule Updates:**
|
||||
- **Add New Rules When:**
|
||||
- A new technology/pattern is used in 3+ files
|
||||
- Common bugs could be prevented by a rule
|
||||
- Code reviews repeatedly mention the same feedback
|
||||
- New security or performance patterns emerge
|
||||
|
||||
- **Modify Existing Rules When:**
|
||||
- Better examples exist in the codebase
|
||||
- Additional edge cases are discovered
|
||||
- Related rules have been updated
|
||||
- Implementation details have changed
|
||||
|
||||
- **Example Pattern Recognition:**
|
||||
```typescript
|
||||
// If you see repeated patterns like:
|
||||
const data = await prisma.user.findMany({
|
||||
select: { id: true, email: true },
|
||||
where: { status: 'ACTIVE' }
|
||||
});
|
||||
|
||||
// Consider adding to [prisma.md](.kiro/steering/prisma.md):
|
||||
// - Standard select fields
|
||||
// - Common where conditions
|
||||
// - Performance optimization patterns
|
||||
```
|
||||
|
||||
- **Rule Quality Checks:**
|
||||
- Rules should be actionable and specific
|
||||
- Examples should come from actual code
|
||||
- References should be up to date
|
||||
- Patterns should be consistently enforced
|
||||
|
||||
- **Continuous Improvement:**
|
||||
- Monitor code review comments
|
||||
- Track common development questions
|
||||
- Update rules after major refactors
|
||||
- Add links to relevant documentation
|
||||
- Cross-reference related rules
|
||||
|
||||
- **Rule Deprecation:**
|
||||
- Mark outdated patterns as deprecated
|
||||
- Remove rules that no longer apply
|
||||
- Update references to deprecated rules
|
||||
- Document migration paths for old patterns
|
||||
|
||||
- **Documentation Updates:**
|
||||
- Keep examples synchronized with code
|
||||
- Update references to external docs
|
||||
- Maintain links between related rules
|
||||
- Document breaking changes
|
||||
Follow [kiro_rules.md](.kiro/steering/kiro_rules.md) for proper rule formatting and structure.
|
||||
556
.kiro/steering/taskmaster.md
Normal file
556
.kiro/steering/taskmaster.md
Normal file
@@ -0,0 +1,556 @@
|
||||
---
|
||||
inclusion: always
|
||||
---
|
||||
|
||||
# Taskmaster Tool & Command Reference
|
||||
|
||||
This document provides a detailed reference for interacting with Taskmaster, covering both the recommended MCP tools, suitable for integrations like Kiro, and the corresponding `task-master` CLI commands, designed for direct user interaction or fallback.
|
||||
|
||||
**Note:** For interacting with Taskmaster programmatically or via integrated tools, using the **MCP tools is strongly recommended** due to better performance, structured data, and error handling. The CLI commands serve as a user-friendly alternative and fallback.
|
||||
|
||||
**Important:** Several MCP tools involve AI processing... The AI-powered tools include `parse_prd`, `analyze_project_complexity`, `update_subtask`, `update_task`, `update`, `expand_all`, `expand_task`, and `add_task`.
|
||||
|
||||
**🏷️ Tagged Task Lists System:** Task Master now supports **tagged task lists** for multi-context task management. This allows you to maintain separate, isolated lists of tasks for different features, branches, or experiments. Existing projects are seamlessly migrated to use a default "master" tag. Most commands now support a `--tag <name>` flag to specify which context to operate on. If omitted, commands use the currently active tag.
|
||||
|
||||
---
|
||||
|
||||
## Initialization & Setup
|
||||
|
||||
### 1. Initialize Project (`init`)
|
||||
|
||||
* **MCP Tool:** `initialize_project`
|
||||
* **CLI Command:** `task-master init [options]`
|
||||
* **Description:** `Set up the basic Taskmaster file structure and configuration in the current directory for a new project.`
|
||||
* **Key CLI Options:**
|
||||
* `--name <name>`: `Set the name for your project in Taskmaster's configuration.`
|
||||
* `--description <text>`: `Provide a brief description for your project.`
|
||||
* `--version <version>`: `Set the initial version for your project, e.g., '0.1.0'.`
|
||||
* `-y, --yes`: `Initialize Taskmaster quickly using default settings without interactive prompts.`
|
||||
* **Usage:** Run this once at the beginning of a new project.
|
||||
* **MCP Variant Description:** `Set up the basic Taskmaster file structure and configuration in the current directory for a new project by running the 'task-master init' command.`
|
||||
* **Key MCP Parameters/Options:**
|
||||
* `projectName`: `Set the name for your project.` (CLI: `--name <name>`)
|
||||
* `projectDescription`: `Provide a brief description for your project.` (CLI: `--description <text>`)
|
||||
* `projectVersion`: `Set the initial version for your project, e.g., '0.1.0'.` (CLI: `--version <version>`)
|
||||
* `authorName`: `Author name.` (CLI: `--author <author>`)
|
||||
* `skipInstall`: `Skip installing dependencies. Default is false.` (CLI: `--skip-install`)
|
||||
* `addAliases`: `Add shell aliases tm and taskmaster. Default is false.` (CLI: `--aliases`)
|
||||
* `yes`: `Skip prompts and use defaults/provided arguments. Default is false.` (CLI: `-y, --yes`)
|
||||
* **Usage:** Run this once at the beginning of a new project, typically via an integrated tool like Kiro. Operates on the current working directory of the MCP server.
|
||||
* **Important:** Once complete, you *MUST* parse a prd in order to generate tasks. There will be no tasks files until then. The next step after initializing should be to create a PRD using the example PRD in .taskmaster/templates/example_prd.txt.
|
||||
* **Tagging:** Use the `--tag` option to parse the PRD into a specific, non-default tag context. If the tag doesn't exist, it will be created automatically. Example: `task-master parse-prd spec.txt --tag=new-feature`.
|
||||
|
||||
### 2. Parse PRD (`parse_prd`)
|
||||
|
||||
* **MCP Tool:** `parse_prd`
|
||||
* **CLI Command:** `task-master parse-prd [file] [options]`
|
||||
* **Description:** `Parse a Product Requirements Document, PRD, or text file with Taskmaster to automatically generate an initial set of tasks in tasks.json.`
|
||||
* **Key Parameters/Options:**
|
||||
* `input`: `Path to your PRD or requirements text file that Taskmaster should parse for tasks.` (CLI: `[file]` positional or `-i, --input <file>`)
|
||||
* `output`: `Specify where Taskmaster should save the generated 'tasks.json' file. Defaults to '.taskmaster/tasks/tasks.json'.` (CLI: `-o, --output <file>`)
|
||||
* `numTasks`: `Approximate number of top-level tasks Taskmaster should aim to generate from the document.` (CLI: `-n, --num-tasks <number>`)
|
||||
* `force`: `Use this to allow Taskmaster to overwrite an existing 'tasks.json' without asking for confirmation.` (CLI: `-f, --force`)
|
||||
* **Usage:** Useful for bootstrapping a project from an existing requirements document.
|
||||
* **Notes:** Task Master will strictly adhere to any specific requirements mentioned in the PRD, such as libraries, database schemas, frameworks, tech stacks, etc., while filling in any gaps where the PRD isn't fully specified. Tasks are designed to provide the most direct implementation path while avoiding over-engineering.
|
||||
* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress. If the user does not have a PRD, suggest discussing their idea and then use the example PRD in `.taskmaster/templates/example_prd.txt` as a template for creating the PRD based on their idea, for use with `parse-prd`.
|
||||
|
||||
---
|
||||
|
||||
## AI Model Configuration
|
||||
|
||||
### 2. Manage Models (`models`)
|
||||
* **MCP Tool:** `models`
|
||||
* **CLI Command:** `task-master models [options]`
|
||||
* **Description:** `View the current AI model configuration or set specific models for different roles (main, research, fallback). Allows setting custom model IDs for Ollama and OpenRouter.`
|
||||
* **Key MCP Parameters/Options:**
|
||||
* `setMain <model_id>`: `Set the primary model ID for task generation/updates.` (CLI: `--set-main <model_id>`)
|
||||
* `setResearch <model_id>`: `Set the model ID for research-backed operations.` (CLI: `--set-research <model_id>`)
|
||||
* `setFallback <model_id>`: `Set the model ID to use if the primary fails.` (CLI: `--set-fallback <model_id>`)
|
||||
* `ollama <boolean>`: `Indicates the set model ID is a custom Ollama model.` (CLI: `--ollama`)
|
||||
* `openrouter <boolean>`: `Indicates the set model ID is a custom OpenRouter model.` (CLI: `--openrouter`)
|
||||
* `listAvailableModels <boolean>`: `If true, lists available models not currently assigned to a role.` (CLI: No direct equivalent; CLI lists available automatically)
|
||||
* `projectRoot <string>`: `Optional. Absolute path to the project root directory.` (CLI: Determined automatically)
|
||||
* **Key CLI Options:**
|
||||
* `--set-main <model_id>`: `Set the primary model.`
|
||||
* `--set-research <model_id>`: `Set the research model.`
|
||||
* `--set-fallback <model_id>`: `Set the fallback model.`
|
||||
* `--ollama`: `Specify that the provided model ID is for Ollama (use with --set-*).`
|
||||
* `--openrouter`: `Specify that the provided model ID is for OpenRouter (use with --set-*). Validates against OpenRouter API.`
|
||||
* `--bedrock`: `Specify that the provided model ID is for AWS Bedrock (use with --set-*).`
|
||||
* `--setup`: `Run interactive setup to configure models, including custom Ollama/OpenRouter IDs.`
|
||||
* **Usage (MCP):** Call without set flags to get current config. Use `setMain`, `setResearch`, or `setFallback` with a valid model ID to update the configuration. Use `listAvailableModels: true` to get a list of unassigned models. To set a custom model, provide the model ID and set `ollama: true` or `openrouter: true`.
|
||||
* **Usage (CLI):** Run without flags to view current configuration and available models. Use set flags to update specific roles. Use `--setup` for guided configuration, including custom models. To set a custom model via flags, use `--set-<role>=<model_id>` along with either `--ollama` or `--openrouter`.
|
||||
* **Notes:** Configuration is stored in `.taskmaster/config.json` in the project root. This command/tool modifies that file. Use `listAvailableModels` or `task-master models` to see internally supported models. OpenRouter custom models are validated against their live API. Ollama custom models are not validated live.
|
||||
* **API note:** API keys for selected AI providers (based on their model) need to exist in the mcp.json file to be accessible in MCP context. The API keys must be present in the local .env file for the CLI to be able to read them.
|
||||
* **Model costs:** The costs in supported models are expressed in dollars. An input/output value of 3 is $3.00. A value of 0.8 is $0.80.
|
||||
* **Warning:** DO NOT MANUALLY EDIT THE .taskmaster/config.json FILE. Use the included commands either in the MCP or CLI format as needed. Always prioritize MCP tools when available and use the CLI as a fallback.
|
||||
|
||||
---
|
||||
|
||||
## Task Listing & Viewing
|
||||
|
||||
### 3. Get Tasks (`get_tasks`)
|
||||
|
||||
* **MCP Tool:** `get_tasks`
|
||||
* **CLI Command:** `task-master list [options]`
|
||||
* **Description:** `List your Taskmaster tasks, optionally filtering by status and showing subtasks.`
|
||||
* **Key Parameters/Options:**
|
||||
* `status`: `Show only Taskmaster tasks matching this status (or multiple statuses, comma-separated), e.g., 'pending' or 'done,in-progress'.` (CLI: `-s, --status <status>`)
|
||||
* `withSubtasks`: `Include subtasks indented under their parent tasks in the list.` (CLI: `--with-subtasks`)
|
||||
* `tag`: `Specify which tag context to list tasks from. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Get an overview of the project status, often used at the start of a work session.
|
||||
|
||||
### 4. Get Next Task (`next_task`)
|
||||
|
||||
* **MCP Tool:** `next_task`
|
||||
* **CLI Command:** `task-master next [options]`
|
||||
* **Description:** `Ask Taskmaster to show the next available task you can work on, based on status and completed dependencies.`
|
||||
* **Key Parameters/Options:**
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
* `tag`: `Specify which tag context to use. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
* **Usage:** Identify what to work on next according to the plan.
|
||||
|
||||
### 5. Get Task Details (`get_task`)
|
||||
|
||||
* **MCP Tool:** `get_task`
|
||||
* **CLI Command:** `task-master show [id] [options]`
|
||||
* **Description:** `Display detailed information for one or more specific Taskmaster tasks or subtasks by ID.`
|
||||
* **Key Parameters/Options:**
|
||||
* `id`: `Required. The ID of the Taskmaster task (e.g., '15'), subtask (e.g., '15.2'), or a comma-separated list of IDs ('1,5,10.2') you want to view.` (CLI: `[id]` positional or `-i, --id <id>`)
|
||||
* `tag`: `Specify which tag context to get the task(s) from. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Understand the full details for a specific task. When multiple IDs are provided, a summary table is shown.
|
||||
* **CRITICAL INFORMATION** If you need to collect information from multiple tasks, use comma-separated IDs (i.e. 1,2,3) to receive an array of tasks. Do not needlessly get tasks one at a time if you need to get many as that is wasteful.
|
||||
|
||||
---
|
||||
|
||||
## Task Creation & Modification
|
||||
|
||||
### 6. Add Task (`add_task`)
|
||||
|
||||
* **MCP Tool:** `add_task`
|
||||
* **CLI Command:** `task-master add-task [options]`
|
||||
* **Description:** `Add a new task to Taskmaster by describing it; AI will structure it.`
|
||||
* **Key Parameters/Options:**
|
||||
* `prompt`: `Required. Describe the new task you want Taskmaster to create, e.g., "Implement user authentication using JWT".` (CLI: `-p, --prompt <text>`)
|
||||
* `dependencies`: `Specify the IDs of any Taskmaster tasks that must be completed before this new one can start, e.g., '12,14'.` (CLI: `-d, --dependencies <ids>`)
|
||||
* `priority`: `Set the priority for the new task: 'high', 'medium', or 'low'. Default is 'medium'.` (CLI: `--priority <priority>`)
|
||||
* `research`: `Enable Taskmaster to use the research role for potentially more informed task creation.` (CLI: `-r, --research`)
|
||||
* `tag`: `Specify which tag context to add the task to. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Quickly add newly identified tasks during development.
|
||||
* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress.
|
||||
|
||||
### 7. Add Subtask (`add_subtask`)
|
||||
|
||||
* **MCP Tool:** `add_subtask`
|
||||
* **CLI Command:** `task-master add-subtask [options]`
|
||||
* **Description:** `Add a new subtask to a Taskmaster parent task, or convert an existing task into a subtask.`
|
||||
* **Key Parameters/Options:**
|
||||
* `id` / `parent`: `Required. The ID of the Taskmaster task that will be the parent.` (MCP: `id`, CLI: `-p, --parent <id>`)
|
||||
* `taskId`: `Use this if you want to convert an existing top-level Taskmaster task into a subtask of the specified parent.` (CLI: `-i, --task-id <id>`)
|
||||
* `title`: `Required if not using taskId. The title for the new subtask Taskmaster should create.` (CLI: `-t, --title <title>`)
|
||||
* `description`: `A brief description for the new subtask.` (CLI: `-d, --description <text>`)
|
||||
* `details`: `Provide implementation notes or details for the new subtask.` (CLI: `--details <text>`)
|
||||
* `dependencies`: `Specify IDs of other tasks or subtasks, e.g., '15' or '16.1', that must be done before this new subtask.` (CLI: `--dependencies <ids>`)
|
||||
* `status`: `Set the initial status for the new subtask. Default is 'pending'.` (CLI: `-s, --status <status>`)
|
||||
* `generate`: `Enable Taskmaster to regenerate markdown task files after adding the subtask.` (CLI: `--generate`)
|
||||
* `tag`: `Specify which tag context to operate on. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Break down tasks manually or reorganize existing tasks.
|
||||
|
||||
### 8. Update Tasks (`update`)
|
||||
|
||||
* **MCP Tool:** `update`
|
||||
* **CLI Command:** `task-master update [options]`
|
||||
* **Description:** `Update multiple upcoming tasks in Taskmaster based on new context or changes, starting from a specific task ID.`
|
||||
* **Key Parameters/Options:**
|
||||
* `from`: `Required. The ID of the first task Taskmaster should update. All tasks with this ID or higher that are not 'done' will be considered.` (CLI: `--from <id>`)
|
||||
* `prompt`: `Required. Explain the change or new context for Taskmaster to apply to the tasks, e.g., "We are now using React Query instead of Redux Toolkit for data fetching".` (CLI: `-p, --prompt <text>`)
|
||||
* `research`: `Enable Taskmaster to use the research role for more informed updates. Requires appropriate API key.` (CLI: `-r, --research`)
|
||||
* `tag`: `Specify which tag context to operate on. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Handle significant implementation changes or pivots that affect multiple future tasks. Example CLI: `task-master update --from='18' --prompt='Switching to React Query.\nNeed to refactor data fetching...'`
|
||||
* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress.
|
||||
|
||||
### 9. Update Task (`update_task`)
|
||||
|
||||
* **MCP Tool:** `update_task`
|
||||
* **CLI Command:** `task-master update-task [options]`
|
||||
* **Description:** `Modify a specific Taskmaster task by ID, incorporating new information or changes. By default, this replaces the existing task details.`
|
||||
* **Key Parameters/Options:**
|
||||
* `id`: `Required. The specific ID of the Taskmaster task, e.g., '15', you want to update.` (CLI: `-i, --id <id>`)
|
||||
* `prompt`: `Required. Explain the specific changes or provide the new information Taskmaster should incorporate into this task.` (CLI: `-p, --prompt <text>`)
|
||||
* `append`: `If true, appends the prompt content to the task's details with a timestamp, rather than replacing them. Behaves like update-subtask.` (CLI: `--append`)
|
||||
* `research`: `Enable Taskmaster to use the research role for more informed updates. Requires appropriate API key.` (CLI: `-r, --research`)
|
||||
* `tag`: `Specify which tag context the task belongs to. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Refine a specific task based on new understanding. Use `--append` to log progress without creating subtasks.
|
||||
* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress.
|
||||
|
||||
### 10. Update Subtask (`update_subtask`)
|
||||
|
||||
* **MCP Tool:** `update_subtask`
|
||||
* **CLI Command:** `task-master update-subtask [options]`
|
||||
* **Description:** `Append timestamped notes or details to a specific Taskmaster subtask without overwriting existing content. Intended for iterative implementation logging.`
|
||||
* **Key Parameters/Options:**
|
||||
* `id`: `Required. The ID of the Taskmaster subtask, e.g., '5.2', to update with new information.` (CLI: `-i, --id <id>`)
|
||||
* `prompt`: `Required. The information, findings, or progress notes to append to the subtask's details with a timestamp.` (CLI: `-p, --prompt <text>`)
|
||||
* `research`: `Enable Taskmaster to use the research role for more informed updates. Requires appropriate API key.` (CLI: `-r, --research`)
|
||||
* `tag`: `Specify which tag context the subtask belongs to. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Log implementation progress, findings, and discoveries during subtask development. Each update is timestamped and appended to preserve the implementation journey.
|
||||
* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress.
|
||||
|
||||
### 11. Set Task Status (`set_task_status`)
|
||||
|
||||
* **MCP Tool:** `set_task_status`
|
||||
* **CLI Command:** `task-master set-status [options]`
|
||||
* **Description:** `Update the status of one or more Taskmaster tasks or subtasks, e.g., 'pending', 'in-progress', 'done'.`
|
||||
* **Key Parameters/Options:**
|
||||
* `id`: `Required. The ID(s) of the Taskmaster task(s) or subtask(s), e.g., '15', '15.2', or '16,17.1', to update.` (CLI: `-i, --id <id>`)
|
||||
* `status`: `Required. The new status to set, e.g., 'done', 'pending', 'in-progress', 'review', 'cancelled'.` (CLI: `-s, --status <status>`)
|
||||
* `tag`: `Specify which tag context to operate on. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Mark progress as tasks move through the development cycle.
|
||||
|
||||
### 12. Remove Task (`remove_task`)
|
||||
|
||||
* **MCP Tool:** `remove_task`
|
||||
* **CLI Command:** `task-master remove-task [options]`
|
||||
* **Description:** `Permanently remove a task or subtask from the Taskmaster tasks list.`
|
||||
* **Key Parameters/Options:**
|
||||
* `id`: `Required. The ID of the Taskmaster task, e.g., '5', or subtask, e.g., '5.2', to permanently remove.` (CLI: `-i, --id <id>`)
|
||||
* `yes`: `Skip the confirmation prompt and immediately delete the task.` (CLI: `-y, --yes`)
|
||||
* `tag`: `Specify which tag context to operate on. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Permanently delete tasks or subtasks that are no longer needed in the project.
|
||||
* **Notes:** Use with caution as this operation cannot be undone. Consider using 'blocked', 'cancelled', or 'deferred' status instead if you just want to exclude a task from active planning but keep it for reference. The command automatically cleans up dependency references in other tasks.
|
||||
|
||||
---
|
||||
|
||||
## Task Structure & Breakdown
|
||||
|
||||
### 13. Expand Task (`expand_task`)
|
||||
|
||||
* **MCP Tool:** `expand_task`
|
||||
* **CLI Command:** `task-master expand [options]`
|
||||
* **Description:** `Use Taskmaster's AI to break down a complex task into smaller, manageable subtasks. Appends subtasks by default.`
|
||||
* **Key Parameters/Options:**
|
||||
* `id`: `The ID of the specific Taskmaster task you want to break down into subtasks.` (CLI: `-i, --id <id>`)
|
||||
* `num`: `Optional: Suggests how many subtasks Taskmaster should aim to create. Uses complexity analysis/defaults otherwise.` (CLI: `-n, --num <number>`)
|
||||
* `research`: `Enable Taskmaster to use the research role for more informed subtask generation. Requires appropriate API key.` (CLI: `-r, --research`)
|
||||
* `prompt`: `Optional: Provide extra context or specific instructions to Taskmaster for generating the subtasks.` (CLI: `-p, --prompt <text>`)
|
||||
* `force`: `Optional: If true, clear existing subtasks before generating new ones. Default is false (append).` (CLI: `--force`)
|
||||
* `tag`: `Specify which tag context the task belongs to. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Generate a detailed implementation plan for a complex task before starting coding. Automatically uses complexity report recommendations if available and `num` is not specified.
|
||||
* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress.
|
||||
|
||||
### 14. Expand All Tasks (`expand_all`)
|
||||
|
||||
* **MCP Tool:** `expand_all`
|
||||
* **CLI Command:** `task-master expand --all [options]` (Note: CLI uses the `expand` command with the `--all` flag)
|
||||
* **Description:** `Tell Taskmaster to automatically expand all eligible pending/in-progress tasks based on complexity analysis or defaults. Appends subtasks by default.`
|
||||
* **Key Parameters/Options:**
|
||||
* `num`: `Optional: Suggests how many subtasks Taskmaster should aim to create per task.` (CLI: `-n, --num <number>`)
|
||||
* `research`: `Enable research role for more informed subtask generation. Requires appropriate API key.` (CLI: `-r, --research`)
|
||||
* `prompt`: `Optional: Provide extra context for Taskmaster to apply generally during expansion.` (CLI: `-p, --prompt <text>`)
|
||||
* `force`: `Optional: If true, clear existing subtasks before generating new ones for each eligible task. Default is false (append).` (CLI: `--force`)
|
||||
* `tag`: `Specify which tag context to expand. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Useful after initial task generation or complexity analysis to break down multiple tasks at once.
|
||||
* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress.
|
||||
|
||||
### 15. Clear Subtasks (`clear_subtasks`)
|
||||
|
||||
* **MCP Tool:** `clear_subtasks`
|
||||
* **CLI Command:** `task-master clear-subtasks [options]`
|
||||
* **Description:** `Remove all subtasks from one or more specified Taskmaster parent tasks.`
|
||||
* **Key Parameters/Options:**
|
||||
* `id`: `The ID(s) of the Taskmaster parent task(s) whose subtasks you want to remove, e.g., '15' or '16,18'. Required unless using 'all'.` (CLI: `-i, --id <ids>`)
|
||||
* `all`: `Tell Taskmaster to remove subtasks from all parent tasks.` (CLI: `--all`)
|
||||
* `tag`: `Specify which tag context to operate on. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Used before regenerating subtasks with `expand_task` if the previous breakdown needs replacement.
|
||||
|
||||
### 16. Remove Subtask (`remove_subtask`)
|
||||
|
||||
* **MCP Tool:** `remove_subtask`
|
||||
* **CLI Command:** `task-master remove-subtask [options]`
|
||||
* **Description:** `Remove a subtask from its Taskmaster parent, optionally converting it into a standalone task.`
|
||||
* **Key Parameters/Options:**
|
||||
* `id`: `Required. The ID(s) of the Taskmaster subtask(s) to remove, e.g., '15.2' or '16.1,16.3'.` (CLI: `-i, --id <id>`)
|
||||
* `convert`: `If used, Taskmaster will turn the subtask into a regular top-level task instead of deleting it.` (CLI: `-c, --convert`)
|
||||
* `generate`: `Enable Taskmaster to regenerate markdown task files after removing the subtask.` (CLI: `--generate`)
|
||||
* `tag`: `Specify which tag context to operate on. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Delete unnecessary subtasks or promote a subtask to a top-level task.
|
||||
|
||||
### 17. Move Task (`move_task`)
|
||||
|
||||
* **MCP Tool:** `move_task`
|
||||
* **CLI Command:** `task-master move [options]`
|
||||
* **Description:** `Move a task or subtask to a new position within the task hierarchy.`
|
||||
* **Key Parameters/Options:**
|
||||
* `from`: `Required. ID of the task/subtask to move (e.g., "5" or "5.2"). Can be comma-separated for multiple tasks.` (CLI: `--from <id>`)
|
||||
* `to`: `Required. ID of the destination (e.g., "7" or "7.3"). Must match the number of source IDs if comma-separated.` (CLI: `--to <id>`)
|
||||
* `tag`: `Specify which tag context to operate on. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Reorganize tasks by moving them within the hierarchy. Supports various scenarios like:
|
||||
* Moving a task to become a subtask
|
||||
* Moving a subtask to become a standalone task
|
||||
* Moving a subtask to a different parent
|
||||
* Reordering subtasks within the same parent
|
||||
* Moving a task to a new, non-existent ID (automatically creates placeholders)
|
||||
* Moving multiple tasks at once with comma-separated IDs
|
||||
* **Validation Features:**
|
||||
* Allows moving tasks to non-existent destination IDs (creates placeholder tasks)
|
||||
* Prevents moving to existing task IDs that already have content (to avoid overwriting)
|
||||
* Validates that source tasks exist before attempting to move them
|
||||
* Maintains proper parent-child relationships
|
||||
* **Example CLI:** `task-master move --from=5.2 --to=7.3` to move subtask 5.2 to become subtask 7.3.
|
||||
* **Example Multi-Move:** `task-master move --from=10,11,12 --to=16,17,18` to move multiple tasks to new positions.
|
||||
* **Common Use:** Resolving merge conflicts in tasks.json when multiple team members create tasks on different branches.
|
||||
|
||||
---
|
||||
|
||||
## Dependency Management
|
||||
|
||||
### 18. Add Dependency (`add_dependency`)
|
||||
|
||||
* **MCP Tool:** `add_dependency`
|
||||
* **CLI Command:** `task-master add-dependency [options]`
|
||||
* **Description:** `Define a dependency in Taskmaster, making one task a prerequisite for another.`
|
||||
* **Key Parameters/Options:**
|
||||
* `id`: `Required. The ID of the Taskmaster task that will depend on another.` (CLI: `-i, --id <id>`)
|
||||
* `dependsOn`: `Required. The ID of the Taskmaster task that must be completed first, the prerequisite.` (CLI: `-d, --depends-on <id>`)
|
||||
* `tag`: `Specify which tag context to operate on. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <path>`)
|
||||
* **Usage:** Establish the correct order of execution between tasks.
|
||||
|
||||
### 19. Remove Dependency (`remove_dependency`)
|
||||
|
||||
* **MCP Tool:** `remove_dependency`
|
||||
* **CLI Command:** `task-master remove-dependency [options]`
|
||||
* **Description:** `Remove a dependency relationship between two Taskmaster tasks.`
|
||||
* **Key Parameters/Options:**
|
||||
* `id`: `Required. The ID of the Taskmaster task you want to remove a prerequisite from.` (CLI: `-i, --id <id>`)
|
||||
* `dependsOn`: `Required. The ID of the Taskmaster task that should no longer be a prerequisite.` (CLI: `-d, --depends-on <id>`)
|
||||
* `tag`: `Specify which tag context to operate on. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Update task relationships when the order of execution changes.
|
||||
|
||||
### 20. Validate Dependencies (`validate_dependencies`)
|
||||
|
||||
* **MCP Tool:** `validate_dependencies`
|
||||
* **CLI Command:** `task-master validate-dependencies [options]`
|
||||
* **Description:** `Check your Taskmaster tasks for dependency issues (like circular references or links to non-existent tasks) without making changes.`
|
||||
* **Key Parameters/Options:**
|
||||
* `tag`: `Specify which tag context to validate. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Audit the integrity of your task dependencies.
|
||||
|
||||
### 21. Fix Dependencies (`fix_dependencies`)
|
||||
|
||||
* **MCP Tool:** `fix_dependencies`
|
||||
* **CLI Command:** `task-master fix-dependencies [options]`
|
||||
* **Description:** `Automatically fix dependency issues (like circular references or links to non-existent tasks) in your Taskmaster tasks.`
|
||||
* **Key Parameters/Options:**
|
||||
* `tag`: `Specify which tag context to fix dependencies in. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Clean up dependency errors automatically.
|
||||
|
||||
---
|
||||
|
||||
## Analysis & Reporting
|
||||
|
||||
### 22. Analyze Project Complexity (`analyze_project_complexity`)
|
||||
|
||||
* **MCP Tool:** `analyze_project_complexity`
|
||||
* **CLI Command:** `task-master analyze-complexity [options]`
|
||||
* **Description:** `Have Taskmaster analyze your tasks to determine their complexity and suggest which ones need to be broken down further.`
|
||||
* **Key Parameters/Options:**
|
||||
* `output`: `Where to save the complexity analysis report. Default is '.taskmaster/reports/task-complexity-report.json' (or '..._tagname.json' if a tag is used).` (CLI: `-o, --output <file>`)
|
||||
* `threshold`: `The minimum complexity score (1-10) that should trigger a recommendation to expand a task.` (CLI: `-t, --threshold <number>`)
|
||||
* `research`: `Enable research role for more accurate complexity analysis. Requires appropriate API key.` (CLI: `-r, --research`)
|
||||
* `tag`: `Specify which tag context to analyze. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Used before breaking down tasks to identify which ones need the most attention.
|
||||
* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress.
|
||||
|
||||
### 23. View Complexity Report (`complexity_report`)
|
||||
|
||||
* **MCP Tool:** `complexity_report`
|
||||
* **CLI Command:** `task-master complexity-report [options]`
|
||||
* **Description:** `Display the task complexity analysis report in a readable format.`
|
||||
* **Key Parameters/Options:**
|
||||
* `tag`: `Specify which tag context to show the report for. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
* `file`: `Path to the complexity report (default: '.taskmaster/reports/task-complexity-report.json').` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Review and understand the complexity analysis results after running analyze-complexity.
|
||||
|
||||
---
|
||||
|
||||
## File Management
|
||||
|
||||
### 24. Generate Task Files (`generate`)
|
||||
|
||||
* **MCP Tool:** `generate`
|
||||
* **CLI Command:** `task-master generate [options]`
|
||||
* **Description:** `Create or update individual Markdown files for each task based on your tasks.json.`
|
||||
* **Key Parameters/Options:**
|
||||
* `output`: `The directory where Taskmaster should save the task files (default: in a 'tasks' directory).` (CLI: `-o, --output <directory>`)
|
||||
* `tag`: `Specify which tag context to generate files for. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Run this after making changes to tasks.json to keep individual task files up to date. This command is now manual and no longer runs automatically.
|
||||
|
||||
---
|
||||
|
||||
## AI-Powered Research
|
||||
|
||||
### 25. Research (`research`)
|
||||
|
||||
* **MCP Tool:** `research`
|
||||
* **CLI Command:** `task-master research [options]`
|
||||
* **Description:** `Perform AI-powered research queries with project context to get fresh, up-to-date information beyond the AI's knowledge cutoff.`
|
||||
* **Key Parameters/Options:**
|
||||
* `query`: `Required. Research query/prompt (e.g., "What are the latest best practices for React Query v5?").` (CLI: `[query]` positional or `-q, --query <text>`)
|
||||
* `taskIds`: `Comma-separated list of task/subtask IDs from the current tag context (e.g., "15,16.2,17").` (CLI: `-i, --id <ids>`)
|
||||
* `filePaths`: `Comma-separated list of file paths for context (e.g., "src/api.js,docs/readme.md").` (CLI: `-f, --files <paths>`)
|
||||
* `customContext`: `Additional custom context text to include in the research.` (CLI: `-c, --context <text>`)
|
||||
* `includeProjectTree`: `Include project file tree structure in context (default: false).` (CLI: `--tree`)
|
||||
* `detailLevel`: `Detail level for the research response: 'low', 'medium', 'high' (default: medium).` (CLI: `--detail <level>`)
|
||||
* `saveTo`: `Task or subtask ID (e.g., "15", "15.2") to automatically save the research conversation to.` (CLI: `--save-to <id>`)
|
||||
* `saveFile`: `If true, saves the research conversation to a markdown file in '.taskmaster/docs/research/'.` (CLI: `--save-file`)
|
||||
* `noFollowup`: `Disables the interactive follow-up question menu in the CLI.` (CLI: `--no-followup`)
|
||||
* `tag`: `Specify which tag context to use for task-based context gathering. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
* `projectRoot`: `The directory of the project. Must be an absolute path.` (CLI: Determined automatically)
|
||||
* **Usage:** **This is a POWERFUL tool that agents should use FREQUENTLY** to:
|
||||
* Get fresh information beyond knowledge cutoff dates
|
||||
* Research latest best practices, library updates, security patches
|
||||
* Find implementation examples for specific technologies
|
||||
* Validate approaches against current industry standards
|
||||
* Get contextual advice based on project files and tasks
|
||||
* **When to Consider Using Research:**
|
||||
* **Before implementing any task** - Research current best practices
|
||||
* **When encountering new technologies** - Get up-to-date implementation guidance (libraries, apis, etc)
|
||||
* **For security-related tasks** - Find latest security recommendations
|
||||
* **When updating dependencies** - Research breaking changes and migration guides
|
||||
* **For performance optimization** - Get current performance best practices
|
||||
* **When debugging complex issues** - Research known solutions and workarounds
|
||||
* **Research + Action Pattern:**
|
||||
* Use `research` to gather fresh information
|
||||
* Use `update_subtask` to commit findings with timestamps
|
||||
* Use `update_task` to incorporate research into task details
|
||||
* Use `add_task` with research flag for informed task creation
|
||||
* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. The research provides FRESH data beyond the AI's training cutoff, making it invaluable for current best practices and recent developments.
|
||||
|
||||
---
|
||||
|
||||
## Tag Management
|
||||
|
||||
This new suite of commands allows you to manage different task contexts (tags).
|
||||
|
||||
### 26. List Tags (`tags`)
|
||||
|
||||
* **MCP Tool:** `list_tags`
|
||||
* **CLI Command:** `task-master tags [options]`
|
||||
* **Description:** `List all available tags with task counts, completion status, and other metadata.`
|
||||
* **Key Parameters/Options:**
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
* `--show-metadata`: `Include detailed metadata in the output (e.g., creation date, description).` (CLI: `--show-metadata`)
|
||||
|
||||
### 27. Add Tag (`add_tag`)
|
||||
|
||||
* **MCP Tool:** `add_tag`
|
||||
* **CLI Command:** `task-master add-tag <tagName> [options]`
|
||||
* **Description:** `Create a new, empty tag context, or copy tasks from another tag.`
|
||||
* **Key Parameters/Options:**
|
||||
* `tagName`: `Name of the new tag to create (alphanumeric, hyphens, underscores).` (CLI: `<tagName>` positional)
|
||||
* `--from-branch`: `Creates a tag with a name derived from the current git branch, ignoring the <tagName> argument.` (CLI: `--from-branch`)
|
||||
* `--copy-from-current`: `Copy tasks from the currently active tag to the new tag.` (CLI: `--copy-from-current`)
|
||||
* `--copy-from <tag>`: `Copy tasks from a specific source tag to the new tag.` (CLI: `--copy-from <tag>`)
|
||||
* `--description <text>`: `Provide an optional description for the new tag.` (CLI: `-d, --description <text>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
|
||||
### 28. Delete Tag (`delete_tag`)
|
||||
|
||||
* **MCP Tool:** `delete_tag`
|
||||
* **CLI Command:** `task-master delete-tag <tagName> [options]`
|
||||
* **Description:** `Permanently delete a tag and all of its associated tasks.`
|
||||
* **Key Parameters/Options:**
|
||||
* `tagName`: `Name of the tag to delete.` (CLI: `<tagName>` positional)
|
||||
* `--yes`: `Skip the confirmation prompt.` (CLI: `-y, --yes`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
|
||||
### 29. Use Tag (`use_tag`)
|
||||
|
||||
* **MCP Tool:** `use_tag`
|
||||
* **CLI Command:** `task-master use-tag <tagName>`
|
||||
* **Description:** `Switch your active task context to a different tag.`
|
||||
* **Key Parameters/Options:**
|
||||
* `tagName`: `Name of the tag to switch to.` (CLI: `<tagName>` positional)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
|
||||
### 30. Rename Tag (`rename_tag`)
|
||||
|
||||
* **MCP Tool:** `rename_tag`
|
||||
* **CLI Command:** `task-master rename-tag <oldName> <newName>`
|
||||
* **Description:** `Rename an existing tag.`
|
||||
* **Key Parameters/Options:**
|
||||
* `oldName`: `The current name of the tag.` (CLI: `<oldName>` positional)
|
||||
* `newName`: `The new name for the tag.` (CLI: `<newName>` positional)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`)
|
||||
|
||||
### 31. Copy Tag (`copy_tag`)
|
||||
|
||||
* **MCP Tool:** `copy_tag`
|
||||
* **CLI Command:** `task-master copy-tag <sourceName> <targetName> [options]`
|
||||
* **Description:** `Copy an entire tag context, including all its tasks and metadata, to a new tag.`
|
||||
* **Key Parameters/Options:**
|
||||
* `sourceName`: `Name of the tag to copy from.` (CLI: `<sourceName>` positional)
|
||||
* `targetName`: `Name of the new tag to create.` (CLI: `<targetName>` positional)
|
||||
* `--description <text>`: `Optional description for the new tag.` (CLI: `-d, --description <text>`)
|
||||
|
||||
---
|
||||
|
||||
## Miscellaneous
|
||||
|
||||
### 32. Sync Readme (`sync-readme`) -- experimental
|
||||
|
||||
* **MCP Tool:** N/A
|
||||
* **CLI Command:** `task-master sync-readme [options]`
|
||||
* **Description:** `Exports your task list to your project's README.md file, useful for showcasing progress.`
|
||||
* **Key Parameters/Options:**
|
||||
* `status`: `Filter tasks by status (e.g., 'pending', 'done').` (CLI: `-s, --status <status>`)
|
||||
* `withSubtasks`: `Include subtasks in the export.` (CLI: `--with-subtasks`)
|
||||
* `tag`: `Specify which tag context to export from. Defaults to the current active tag.` (CLI: `--tag <name>`)
|
||||
|
||||
---
|
||||
|
||||
## Environment Variables Configuration (Updated)
|
||||
|
||||
Taskmaster primarily uses the **`.taskmaster/config.json`** file (in project root) for configuration (models, parameters, logging level, etc.), managed via `task-master models --setup`.
|
||||
|
||||
Environment variables are used **only** for sensitive API keys related to AI providers and specific overrides like the Ollama base URL:
|
||||
|
||||
* **API Keys (Required for corresponding provider):**
|
||||
* `ANTHROPIC_API_KEY`
|
||||
* `PERPLEXITY_API_KEY`
|
||||
* `OPENAI_API_KEY`
|
||||
* `GOOGLE_API_KEY`
|
||||
* `MISTRAL_API_KEY`
|
||||
* `AZURE_OPENAI_API_KEY` (Requires `AZURE_OPENAI_ENDPOINT` too)
|
||||
* `OPENROUTER_API_KEY`
|
||||
* `XAI_API_KEY`
|
||||
* `OLLAMA_API_KEY` (Requires `OLLAMA_BASE_URL` too)
|
||||
* **Endpoints (Optional/Provider Specific inside .taskmaster/config.json):**
|
||||
* `AZURE_OPENAI_ENDPOINT`
|
||||
* `OLLAMA_BASE_URL` (Default: `http://localhost:11434/api`)
|
||||
|
||||
**Set API keys** in your **`.env`** file in the project root (for CLI use) or within the `env` section of your **`.kiro/mcp.json`** file (for MCP/Kiro integration). All other settings (model choice, max tokens, temperature, log level, custom endpoints) are managed in `.taskmaster/config.json` via `task-master models` command or `models` MCP tool.
|
||||
|
||||
---
|
||||
|
||||
For details on how these commands fit into the development process, see the [dev_workflow.md](.kiro/steering/dev_workflow.md).
|
||||
59
.kiro/steering/taskmaster_hooks_workflow.md
Normal file
59
.kiro/steering/taskmaster_hooks_workflow.md
Normal file
@@ -0,0 +1,59 @@
|
||||
---
|
||||
inclusion: always
|
||||
---
|
||||
|
||||
# Taskmaster Hook-Driven Workflow
|
||||
|
||||
## Core Principle: Hooks Automate Task Management
|
||||
|
||||
When working with Taskmaster in Kiro, **avoid manually marking tasks as done**. The hook system automatically handles task completion based on:
|
||||
|
||||
- **Test Success**: `[TM] Test Success Task Completer` detects passing tests and prompts for task completion
|
||||
- **Code Changes**: `[TM] Code Change Task Tracker` monitors implementation progress
|
||||
- **Dependency Chains**: `[TM] Task Dependency Auto-Progression` auto-starts dependent tasks
|
||||
|
||||
## AI Assistant Workflow
|
||||
|
||||
Follow this pattern when implementing features:
|
||||
|
||||
1. **Implement First**: Write code, create tests, make changes
|
||||
2. **Save Frequently**: Hooks trigger on file saves to track progress automatically
|
||||
3. **Let Hooks Decide**: Allow hooks to detect completion rather than manually setting status
|
||||
4. **Respond to Prompts**: Confirm when hooks suggest task completion
|
||||
|
||||
## Key Rules for AI Assistants
|
||||
|
||||
- **Never use `tm set-status --status=done`** unless hooks fail to detect completion
|
||||
- **Always write tests** - they provide the most reliable completion signal
|
||||
- **Save files after implementation** - this triggers progress tracking
|
||||
- **Trust hook suggestions** - if no completion prompt appears, more work may be needed
|
||||
|
||||
## Automatic Behaviors
|
||||
|
||||
The hook system provides:
|
||||
|
||||
- **Progress Logging**: Implementation details automatically added to task notes
|
||||
- **Evidence-Based Completion**: Tasks marked done only when criteria are met
|
||||
- **Dependency Management**: Next tasks auto-started when dependencies complete
|
||||
- **Natural Flow**: Focus on coding, not task management overhead
|
||||
|
||||
## Manual Override Cases
|
||||
|
||||
Only manually set task status for:
|
||||
|
||||
- Documentation-only tasks
|
||||
- Tasks without testable outcomes
|
||||
- Emergency fixes without proper test coverage
|
||||
|
||||
Use `tm set-status` sparingly - prefer hook-driven completion.
|
||||
|
||||
## Implementation Pattern
|
||||
|
||||
```
|
||||
1. Implement feature → Save file
|
||||
2. Write tests → Save test file
|
||||
3. Tests pass → Hook prompts completion
|
||||
4. Confirm completion → Next task auto-starts
|
||||
```
|
||||
|
||||
This workflow ensures proper task tracking while maintaining development flow.
|
||||
6
.manypkg.json
Normal file
6
.manypkg.json
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"$schema": "https://unpkg.com/@manypkg/get-packages@1.1.3/schema.json",
|
||||
"defaultBranch": "main",
|
||||
"ignoredRules": ["ROOT_HAS_DEPENDENCIES", "INTERNAL_MISMATCH"],
|
||||
"ignoredPackages": ["@tm/core", "@tm/cli", "@tm/build-config"]
|
||||
}
|
||||
9
.mcp.json
Normal file
9
.mcp.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"task-master-ai": {
|
||||
"type": "stdio",
|
||||
"command": "npx",
|
||||
"args": ["-y", "task-master-ai"]
|
||||
}
|
||||
}
|
||||
}
|
||||
417
.taskmaster/CLAUDE.md
Normal file
417
.taskmaster/CLAUDE.md
Normal file
@@ -0,0 +1,417 @@
|
||||
# Task Master AI - Agent Integration Guide
|
||||
|
||||
## Essential Commands
|
||||
|
||||
### Core Workflow Commands
|
||||
|
||||
```bash
|
||||
# Project Setup
|
||||
task-master init # Initialize Task Master in current project
|
||||
task-master parse-prd .taskmaster/docs/prd.txt # Generate tasks from PRD document
|
||||
task-master models --setup # Configure AI models interactively
|
||||
|
||||
# Daily Development Workflow
|
||||
task-master list # Show all tasks with status
|
||||
task-master next # Get next available task to work on
|
||||
task-master show <id> # View detailed task information (e.g., task-master show 1.2)
|
||||
task-master set-status --id=<id> --status=done # Mark task complete
|
||||
|
||||
# Task Management
|
||||
task-master add-task --prompt="description" --research # Add new task with AI assistance
|
||||
task-master expand --id=<id> --research --force # Break task into subtasks
|
||||
task-master update-task --id=<id> --prompt="changes" # Update specific task
|
||||
task-master update --from=<id> --prompt="changes" # Update multiple tasks from ID onwards
|
||||
task-master update-subtask --id=<id> --prompt="notes" # Add implementation notes to subtask
|
||||
|
||||
# Analysis & Planning
|
||||
task-master analyze-complexity --research # Analyze task complexity
|
||||
task-master complexity-report # View complexity analysis
|
||||
task-master expand --all --research # Expand all eligible tasks
|
||||
|
||||
# Dependencies & Organization
|
||||
task-master add-dependency --id=<id> --depends-on=<id> # Add task dependency
|
||||
task-master move --from=<id> --to=<id> # Reorganize task hierarchy
|
||||
task-master validate-dependencies # Check for dependency issues
|
||||
task-master generate # Update task markdown files (usually auto-called)
|
||||
```
|
||||
|
||||
## Key Files & Project Structure
|
||||
|
||||
### Core Files
|
||||
|
||||
- `.taskmaster/tasks/tasks.json` - Main task data file (auto-managed)
|
||||
- `.taskmaster/config.json` - AI model configuration (use `task-master models` to modify)
|
||||
- `.taskmaster/docs/prd.txt` - Product Requirements Document for parsing
|
||||
- `.taskmaster/tasks/*.txt` - Individual task files (auto-generated from tasks.json)
|
||||
- `.env` - API keys for CLI usage
|
||||
|
||||
### Claude Code Integration Files
|
||||
|
||||
- `CLAUDE.md` - Auto-loaded context for Claude Code (this file)
|
||||
- `.claude/settings.json` - Claude Code tool allowlist and preferences
|
||||
- `.claude/commands/` - Custom slash commands for repeated workflows
|
||||
- `.mcp.json` - MCP server configuration (project-specific)
|
||||
|
||||
### Directory Structure
|
||||
|
||||
```
|
||||
project/
|
||||
├── .taskmaster/
|
||||
│ ├── tasks/ # Task files directory
|
||||
│ │ ├── tasks.json # Main task database
|
||||
│ │ ├── task-1.md # Individual task files
|
||||
│ │ └── task-2.md
|
||||
│ ├── docs/ # Documentation directory
|
||||
│ │ ├── prd.txt # Product requirements
|
||||
│ ├── reports/ # Analysis reports directory
|
||||
│ │ └── task-complexity-report.json
|
||||
│ ├── templates/ # Template files
|
||||
│ │ └── example_prd.txt # Example PRD template
|
||||
│ └── config.json # AI models & settings
|
||||
├── .claude/
|
||||
│ ├── settings.json # Claude Code configuration
|
||||
│ └── commands/ # Custom slash commands
|
||||
├── .env # API keys
|
||||
├── .mcp.json # MCP configuration
|
||||
└── CLAUDE.md # This file - auto-loaded by Claude Code
|
||||
```
|
||||
|
||||
## MCP Integration
|
||||
|
||||
Task Master provides an MCP server that Claude Code can connect to. Configure in `.mcp.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"task-master-ai": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "task-master-ai"],
|
||||
"env": {
|
||||
"ANTHROPIC_API_KEY": "your_key_here",
|
||||
"PERPLEXITY_API_KEY": "your_key_here",
|
||||
"OPENAI_API_KEY": "OPENAI_API_KEY_HERE",
|
||||
"GOOGLE_API_KEY": "GOOGLE_API_KEY_HERE",
|
||||
"XAI_API_KEY": "XAI_API_KEY_HERE",
|
||||
"OPENROUTER_API_KEY": "OPENROUTER_API_KEY_HERE",
|
||||
"MISTRAL_API_KEY": "MISTRAL_API_KEY_HERE",
|
||||
"AZURE_OPENAI_API_KEY": "AZURE_OPENAI_API_KEY_HERE",
|
||||
"OLLAMA_API_KEY": "OLLAMA_API_KEY_HERE"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Essential MCP Tools
|
||||
|
||||
```javascript
|
||||
help; // = shows available taskmaster commands
|
||||
// Project setup
|
||||
initialize_project; // = task-master init
|
||||
parse_prd; // = task-master parse-prd
|
||||
|
||||
// Daily workflow
|
||||
get_tasks; // = task-master list
|
||||
next_task; // = task-master next
|
||||
get_task; // = task-master show <id>
|
||||
set_task_status; // = task-master set-status
|
||||
|
||||
// Task management
|
||||
add_task; // = task-master add-task
|
||||
expand_task; // = task-master expand
|
||||
update_task; // = task-master update-task
|
||||
update_subtask; // = task-master update-subtask
|
||||
update; // = task-master update
|
||||
|
||||
// Analysis
|
||||
analyze_project_complexity; // = task-master analyze-complexity
|
||||
complexity_report; // = task-master complexity-report
|
||||
```
|
||||
|
||||
## Claude Code Workflow Integration
|
||||
|
||||
### Standard Development Workflow
|
||||
|
||||
#### 1. Project Initialization
|
||||
|
||||
```bash
|
||||
# Initialize Task Master
|
||||
task-master init
|
||||
|
||||
# Create or obtain PRD, then parse it
|
||||
task-master parse-prd .taskmaster/docs/prd.txt
|
||||
|
||||
# Analyze complexity and expand tasks
|
||||
task-master analyze-complexity --research
|
||||
task-master expand --all --research
|
||||
```
|
||||
|
||||
If tasks already exist, another PRD can be parsed (with new information only!) using parse-prd with --append flag. This will add the generated tasks to the existing list of tasks..
|
||||
|
||||
#### 2. Daily Development Loop
|
||||
|
||||
```bash
|
||||
# Start each session
|
||||
task-master next # Find next available task
|
||||
task-master show <id> # Review task details
|
||||
|
||||
# During implementation, check in code context into the tasks and subtasks
|
||||
task-master update-subtask --id=<id> --prompt="implementation notes..."
|
||||
|
||||
# Complete tasks
|
||||
task-master set-status --id=<id> --status=done
|
||||
```
|
||||
|
||||
#### 3. Multi-Claude Workflows
|
||||
|
||||
For complex projects, use multiple Claude Code sessions:
|
||||
|
||||
```bash
|
||||
# Terminal 1: Main implementation
|
||||
cd project && claude
|
||||
|
||||
# Terminal 2: Testing and validation
|
||||
cd project-test-worktree && claude
|
||||
|
||||
# Terminal 3: Documentation updates
|
||||
cd project-docs-worktree && claude
|
||||
```
|
||||
|
||||
### Custom Slash Commands
|
||||
|
||||
Create `.claude/commands/taskmaster-next.md`:
|
||||
|
||||
```markdown
|
||||
Find the next available Task Master task and show its details.
|
||||
|
||||
Steps:
|
||||
|
||||
1. Run `task-master next` to get the next task
|
||||
2. If a task is available, run `task-master show <id>` for full details
|
||||
3. Provide a summary of what needs to be implemented
|
||||
4. Suggest the first implementation step
|
||||
```
|
||||
|
||||
Create `.claude/commands/taskmaster-complete.md`:
|
||||
|
||||
```markdown
|
||||
Complete a Task Master task: $ARGUMENTS
|
||||
|
||||
Steps:
|
||||
|
||||
1. Review the current task with `task-master show $ARGUMENTS`
|
||||
2. Verify all implementation is complete
|
||||
3. Run any tests related to this task
|
||||
4. Mark as complete: `task-master set-status --id=$ARGUMENTS --status=done`
|
||||
5. Show the next available task with `task-master next`
|
||||
```
|
||||
|
||||
## Tool Allowlist Recommendations
|
||||
|
||||
Add to `.claude/settings.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"allowedTools": [
|
||||
"Edit",
|
||||
"Bash(task-master *)",
|
||||
"Bash(git commit:*)",
|
||||
"Bash(git add:*)",
|
||||
"Bash(npm run *)",
|
||||
"mcp__task_master_ai__*"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Configuration & Setup
|
||||
|
||||
### API Keys Required
|
||||
|
||||
At least **one** of these API keys must be configured:
|
||||
|
||||
- `ANTHROPIC_API_KEY` (Claude models) - **Recommended**
|
||||
- `PERPLEXITY_API_KEY` (Research features) - **Highly recommended**
|
||||
- `OPENAI_API_KEY` (GPT models)
|
||||
- `GOOGLE_API_KEY` (Gemini models)
|
||||
- `MISTRAL_API_KEY` (Mistral models)
|
||||
- `OPENROUTER_API_KEY` (Multiple models)
|
||||
- `XAI_API_KEY` (Grok models)
|
||||
|
||||
An API key is required for any provider used across any of the 3 roles defined in the `models` command.
|
||||
|
||||
### Model Configuration
|
||||
|
||||
```bash
|
||||
# Interactive setup (recommended)
|
||||
task-master models --setup
|
||||
|
||||
# Set specific models
|
||||
task-master models --set-main claude-3-5-sonnet-20241022
|
||||
task-master models --set-research perplexity-llama-3.1-sonar-large-128k-online
|
||||
task-master models --set-fallback gpt-4o-mini
|
||||
```
|
||||
|
||||
## Task Structure & IDs
|
||||
|
||||
### Task ID Format
|
||||
|
||||
- Main tasks: `1`, `2`, `3`, etc.
|
||||
- Subtasks: `1.1`, `1.2`, `2.1`, etc.
|
||||
- Sub-subtasks: `1.1.1`, `1.1.2`, etc.
|
||||
|
||||
### Task Status Values
|
||||
|
||||
- `pending` - Ready to work on
|
||||
- `in-progress` - Currently being worked on
|
||||
- `done` - Completed and verified
|
||||
- `deferred` - Postponed
|
||||
- `cancelled` - No longer needed
|
||||
- `blocked` - Waiting on external factors
|
||||
|
||||
### Task Fields
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "1.2",
|
||||
"title": "Implement user authentication",
|
||||
"description": "Set up JWT-based auth system",
|
||||
"status": "pending",
|
||||
"priority": "high",
|
||||
"dependencies": ["1.1"],
|
||||
"details": "Use bcrypt for hashing, JWT for tokens...",
|
||||
"testStrategy": "Unit tests for auth functions, integration tests for login flow",
|
||||
"subtasks": []
|
||||
}
|
||||
```
|
||||
|
||||
## Claude Code Best Practices with Task Master
|
||||
|
||||
### Context Management
|
||||
|
||||
- Use `/clear` between different tasks to maintain focus
|
||||
- This CLAUDE.md file is automatically loaded for context
|
||||
- Use `task-master show <id>` to pull specific task context when needed
|
||||
|
||||
### Iterative Implementation
|
||||
|
||||
1. `task-master show <subtask-id>` - Understand requirements
|
||||
2. Explore codebase and plan implementation
|
||||
3. `task-master update-subtask --id=<id> --prompt="detailed plan"` - Log plan
|
||||
4. `task-master set-status --id=<id> --status=in-progress` - Start work
|
||||
5. Implement code following logged plan
|
||||
6. `task-master update-subtask --id=<id> --prompt="what worked/didn't work"` - Log progress
|
||||
7. `task-master set-status --id=<id> --status=done` - Complete task
|
||||
|
||||
### Complex Workflows with Checklists
|
||||
|
||||
For large migrations or multi-step processes:
|
||||
|
||||
1. Create a markdown PRD file describing the new changes: `touch task-migration-checklist.md` (prds can be .txt or .md)
|
||||
2. Use Taskmaster to parse the new prd with `task-master parse-prd --append` (also available in MCP)
|
||||
3. Use Taskmaster to expand the newly generated tasks into subtasks. Consdier using `analyze-complexity` with the correct --to and --from IDs (the new ids) to identify the ideal subtask amounts for each task. Then expand them.
|
||||
4. Work through items systematically, checking them off as completed
|
||||
5. Use `task-master update-subtask` to log progress on each task/subtask and/or updating/researching them before/during implementation if getting stuck
|
||||
|
||||
### Git Integration
|
||||
|
||||
Task Master works well with `gh` CLI:
|
||||
|
||||
```bash
|
||||
# Create PR for completed task
|
||||
gh pr create --title "Complete task 1.2: User authentication" --body "Implements JWT auth system as specified in task 1.2"
|
||||
|
||||
# Reference task in commits
|
||||
git commit -m "feat: implement JWT auth (task 1.2)"
|
||||
```
|
||||
|
||||
### Parallel Development with Git Worktrees
|
||||
|
||||
```bash
|
||||
# Create worktrees for parallel task development
|
||||
git worktree add ../project-auth feature/auth-system
|
||||
git worktree add ../project-api feature/api-refactor
|
||||
|
||||
# Run Claude Code in each worktree
|
||||
cd ../project-auth && claude # Terminal 1: Auth work
|
||||
cd ../project-api && claude # Terminal 2: API work
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### AI Commands Failing
|
||||
|
||||
```bash
|
||||
# Check API keys are configured
|
||||
cat .env # For CLI usage
|
||||
|
||||
# Verify model configuration
|
||||
task-master models
|
||||
|
||||
# Test with different model
|
||||
task-master models --set-fallback gpt-4o-mini
|
||||
```
|
||||
|
||||
### MCP Connection Issues
|
||||
|
||||
- Check `.mcp.json` configuration
|
||||
- Verify Node.js installation
|
||||
- Use `--mcp-debug` flag when starting Claude Code
|
||||
- Use CLI as fallback if MCP unavailable
|
||||
|
||||
### Task File Sync Issues
|
||||
|
||||
```bash
|
||||
# Regenerate task files from tasks.json
|
||||
task-master generate
|
||||
|
||||
# Fix dependency issues
|
||||
task-master fix-dependencies
|
||||
```
|
||||
|
||||
DO NOT RE-INITIALIZE. That will not do anything beyond re-adding the same Taskmaster core files.
|
||||
|
||||
## Important Notes
|
||||
|
||||
### AI-Powered Operations
|
||||
|
||||
These commands make AI calls and may take up to a minute:
|
||||
|
||||
- `parse_prd` / `task-master parse-prd`
|
||||
- `analyze_project_complexity` / `task-master analyze-complexity`
|
||||
- `expand_task` / `task-master expand`
|
||||
- `expand_all` / `task-master expand --all`
|
||||
- `add_task` / `task-master add-task`
|
||||
- `update` / `task-master update`
|
||||
- `update_task` / `task-master update-task`
|
||||
- `update_subtask` / `task-master update-subtask`
|
||||
|
||||
### File Management
|
||||
|
||||
- Never manually edit `tasks.json` - use commands instead
|
||||
- Never manually edit `.taskmaster/config.json` - use `task-master models`
|
||||
- Task markdown files in `tasks/` are auto-generated
|
||||
- Run `task-master generate` after manual changes to tasks.json
|
||||
|
||||
### Claude Code Session Management
|
||||
|
||||
- Use `/clear` frequently to maintain focused context
|
||||
- Create custom slash commands for repeated Task Master workflows
|
||||
- Configure tool allowlist to streamline permissions
|
||||
- Use headless mode for automation: `claude -p "task-master next"`
|
||||
|
||||
### Multi-Task Updates
|
||||
|
||||
- Use `update --from=<id>` to update multiple future tasks
|
||||
- Use `update-task --id=<id>` for single task updates
|
||||
- Use `update-subtask --id=<id>` for implementation logging
|
||||
|
||||
### Research Mode
|
||||
|
||||
- Add `--research` flag for research-based AI enhancement
|
||||
- Requires a research model API key like Perplexity (`PERPLEXITY_API_KEY`) in environment
|
||||
- Provides more informed task creation and updates
|
||||
- Recommended for complex technical tasks
|
||||
|
||||
---
|
||||
|
||||
_This guide ensures Claude Code has immediate access to Task Master's essential functionality for agentic development workflows._
|
||||
@@ -2,8 +2,8 @@
|
||||
"models": {
|
||||
"main": {
|
||||
"provider": "anthropic",
|
||||
"modelId": "claude-3-7-sonnet-20250219",
|
||||
"maxTokens": 120000,
|
||||
"modelId": "claude-sonnet-4-20250514",
|
||||
"maxTokens": 64000,
|
||||
"temperature": 0.2
|
||||
},
|
||||
"research": {
|
||||
@@ -14,8 +14,8 @@
|
||||
},
|
||||
"fallback": {
|
||||
"provider": "anthropic",
|
||||
"modelId": "claude-3-5-sonnet-20241022",
|
||||
"maxTokens": 8192,
|
||||
"modelId": "claude-3-7-sonnet-20250219",
|
||||
"maxTokens": 120000,
|
||||
"temperature": 0.2
|
||||
}
|
||||
},
|
||||
@@ -29,9 +29,15 @@
|
||||
"ollamaBaseURL": "http://localhost:11434/api",
|
||||
"bedrockBaseURL": "https://bedrock.us-east-1.amazonaws.com",
|
||||
"responseLanguage": "English",
|
||||
"enableCodebaseAnalysis": true,
|
||||
"userId": "1234567890",
|
||||
"azureBaseURL": "https://your-endpoint.azure.com/",
|
||||
"defaultTag": "master"
|
||||
},
|
||||
"claudeCode": {}
|
||||
"claudeCode": {},
|
||||
"grokCli": {
|
||||
"timeout": 120000,
|
||||
"workingDirectory": null,
|
||||
"defaultModel": "grok-4-latest"
|
||||
}
|
||||
}
|
||||
|
||||
188
.taskmaster/docs/MIGRATION-ROADMAP.md
Normal file
188
.taskmaster/docs/MIGRATION-ROADMAP.md
Normal file
@@ -0,0 +1,188 @@
|
||||
# Task Master Migration Roadmap
|
||||
|
||||
## Overview
|
||||
Gradual migration from scripts-based architecture to a clean monorepo with separated concerns.
|
||||
|
||||
## Architecture Vision
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────┐
|
||||
│ User Interfaces │
|
||||
├──────────┬──────────┬──────────┬────────────────┤
|
||||
│ @tm/cli │ @tm/mcp │ @tm/ext │ @tm/web │
|
||||
│ (CLI) │ (MCP) │ (VSCode)│ (Future) │
|
||||
└──────────┴──────────┴──────────┴────────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────┐
|
||||
│ @tm/core │
|
||||
│ (Business Logic) │
|
||||
└──────────────────────┘
|
||||
```
|
||||
|
||||
## Migration Phases
|
||||
|
||||
### Phase 1: Core Extraction ✅ (In Progress)
|
||||
**Goal**: Move all business logic to @tm/core
|
||||
|
||||
- [x] Create @tm/core package structure
|
||||
- [x] Move types and interfaces
|
||||
- [x] Implement TaskMasterCore facade
|
||||
- [x] Move storage adapters
|
||||
- [x] Move task services
|
||||
- [ ] Move AI providers
|
||||
- [ ] Move parser logic
|
||||
- [ ] Complete test coverage
|
||||
|
||||
### Phase 2: CLI Package Creation 🚧 (Started)
|
||||
**Goal**: Create @tm/cli as a thin presentation layer
|
||||
|
||||
- [x] Create @tm/cli package structure
|
||||
- [x] Implement Command interface pattern
|
||||
- [x] Create CommandRegistry
|
||||
- [x] Build legacy bridge/adapter
|
||||
- [x] Migrate list-tasks command
|
||||
- [ ] Migrate remaining commands one by one
|
||||
- [ ] Remove UI logic from core
|
||||
|
||||
### Phase 3: Transitional Integration
|
||||
**Goal**: Use new packages in existing scripts without breaking changes
|
||||
|
||||
```javascript
|
||||
// scripts/modules/commands.js gradually adopts new commands
|
||||
import { ListTasksCommand } from '@tm/cli';
|
||||
const listCommand = new ListTasksCommand();
|
||||
|
||||
// Old interface remains the same
|
||||
programInstance
|
||||
.command('list')
|
||||
.action(async (options) => {
|
||||
// Use new command internally
|
||||
const result = await listCommand.execute(convertOptions(options));
|
||||
});
|
||||
```
|
||||
|
||||
### Phase 4: MCP Package
|
||||
**Goal**: Separate MCP server as its own package
|
||||
|
||||
- [ ] Create @tm/mcp package
|
||||
- [ ] Move MCP server code
|
||||
- [ ] Use @tm/core for all logic
|
||||
- [ ] MCP becomes a thin RPC layer
|
||||
|
||||
### Phase 5: Complete Migration
|
||||
**Goal**: Remove old scripts, pure monorepo
|
||||
|
||||
- [ ] All commands migrated to @tm/cli
|
||||
- [ ] Remove scripts/modules/task-manager/*
|
||||
- [ ] Remove scripts/modules/commands.js
|
||||
- [ ] Update bin/task-master.js to use @tm/cli
|
||||
- [ ] Clean up dependencies
|
||||
|
||||
## Current Transitional Strategy
|
||||
|
||||
### 1. Adapter Pattern (commands-adapter.js)
|
||||
```javascript
|
||||
// Checks if new CLI is available and uses it
|
||||
// Falls back to legacy implementation if not
|
||||
export async function listTasksAdapter(...args) {
|
||||
if (cliAvailable) {
|
||||
return useNewImplementation(...args);
|
||||
}
|
||||
return useLegacyImplementation(...args);
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Command Bridge Pattern
|
||||
```javascript
|
||||
// Allows new commands to work in old code
|
||||
const bridge = new CommandBridge(new ListTasksCommand());
|
||||
const data = await bridge.run(legacyOptions); // Legacy style
|
||||
const result = await bridge.execute(newOptions); // New style
|
||||
```
|
||||
|
||||
### 3. Gradual File Migration
|
||||
Instead of big-bang refactoring:
|
||||
1. Create new implementation in @tm/cli
|
||||
2. Add adapter in commands-adapter.js
|
||||
3. Update commands.js to use adapter
|
||||
4. Test both paths work
|
||||
5. Eventually remove adapter when all migrated
|
||||
|
||||
## Benefits of This Approach
|
||||
|
||||
1. **No Breaking Changes**: Existing CLI continues to work
|
||||
2. **Incremental PRs**: Each command can be migrated separately
|
||||
3. **Parallel Development**: New features can use new architecture
|
||||
4. **Easy Rollback**: Can disable new implementation if issues
|
||||
5. **Clear Separation**: Business logic (core) vs presentation (cli/mcp/etc)
|
||||
|
||||
## Example PR Sequence
|
||||
|
||||
### PR 1: Core Package Setup ✅
|
||||
- Create @tm/core
|
||||
- Move types and interfaces
|
||||
- Basic TaskMasterCore implementation
|
||||
|
||||
### PR 2: CLI Package Foundation ✅
|
||||
- Create @tm/cli
|
||||
- Command interface and registry
|
||||
- Legacy bridge utilities
|
||||
|
||||
### PR 3: First Command Migration
|
||||
- Migrate list-tasks to new system
|
||||
- Add adapter in scripts
|
||||
- Test both implementations
|
||||
|
||||
### PR 4-N: Migrate Commands One by One
|
||||
- Each PR migrates 1-2 related commands
|
||||
- Small, reviewable changes
|
||||
- Continuous delivery
|
||||
|
||||
### Final PR: Cleanup
|
||||
- Remove legacy implementations
|
||||
- Remove adapters
|
||||
- Update documentation
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### Dual Testing During Migration
|
||||
```javascript
|
||||
describe('List Tasks', () => {
|
||||
it('works with legacy implementation', async () => {
|
||||
// Force legacy
|
||||
const result = await legacyListTasks(...);
|
||||
expect(result).toBeDefined();
|
||||
});
|
||||
|
||||
it('works with new implementation', async () => {
|
||||
// Force new
|
||||
const command = new ListTasksCommand();
|
||||
const result = await command.execute(...);
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
|
||||
it('adapter chooses correctly', async () => {
|
||||
// Let adapter decide
|
||||
const result = await listTasksAdapter(...);
|
||||
expect(result).toBeDefined();
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Success Metrics
|
||||
|
||||
- [ ] All commands migrated without breaking changes
|
||||
- [ ] Test coverage maintained or improved
|
||||
- [ ] Performance maintained or improved
|
||||
- [ ] Cleaner, more maintainable codebase
|
||||
- [ ] Easy to add new interfaces (web, desktop, etc.)
|
||||
|
||||
## Notes for Contributors
|
||||
|
||||
1. **Keep PRs Small**: Migrate one command at a time
|
||||
2. **Test Both Paths**: Ensure legacy and new both work
|
||||
3. **Document Changes**: Update this roadmap as you go
|
||||
4. **Communicate**: Discuss in PRs if architecture needs adjustment
|
||||
|
||||
This is a living document - update as the migration progresses!
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user