Compare commits
15 Commits
fix/tasksP
...
chore/merg
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
458496e3b6 | ||
|
|
fb92693d81 | ||
|
|
f6ba4a36ee | ||
|
|
baf9bd545a | ||
|
|
fbea48d8ec | ||
|
|
d0fe7dc25a | ||
|
|
f380b8e86c | ||
|
|
bd89061a1d | ||
|
|
7d5ebf05e3 | ||
|
|
21392a1117 | ||
|
|
3e61d26235 | ||
|
|
dc5de53dcd | ||
|
|
4312d3bd67 | ||
|
|
0253f3ed87 | ||
|
|
a65ad0a47c |
@@ -1,5 +0,0 @@
|
||||
---
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
Recover from `@anthropic-ai/claude-code` JSON truncation bug that caused Task Master to crash when handling large (>8 kB) structured responses. The CLI/SDK still truncates, but Task Master now detects the error, preserves buffered text, and returns a usable response instead of throwing.
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
Updating dependency ai-sdk-provider-gemini-cli to 0.0.4 to address breaking change Google made to Gemini CLI and add better 'api-key' in addition to 'gemini-api-key' AI-SDK compatibility.
|
||||
@@ -1,9 +0,0 @@
|
||||
---
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
Add support for xAI Grok 4 model
|
||||
|
||||
- Add grok-4 model to xAI provider with $3/$15 per 1M token pricing
|
||||
- Enable main, fallback, and research roles for grok-4
|
||||
- Max tokens set to 131,072 (matching other xAI models)
|
||||
@@ -1,8 +0,0 @@
|
||||
---
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
Add stricter validation and clearer feedback for task priority when adding new tasks
|
||||
|
||||
- if a task priority is invalid, it will default to medium
|
||||
- made taks priority case-insensitive, essentially making HIGH and high the same value
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
Add support for MCP Sampling as AI provider, requires no API key, uses the client LLM provider
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
Unify and streamline profile system architecture for improved maintainability
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
Added Groq provider support
|
||||
@@ -1,21 +1,21 @@
|
||||
{
|
||||
"models": {
|
||||
"main": {
|
||||
"provider": "groq",
|
||||
"modelId": "llama-3.1-8b-instant",
|
||||
"maxTokens": 131072,
|
||||
"provider": "anthropic",
|
||||
"modelId": "claude-3-7-sonnet-20250219",
|
||||
"maxTokens": 120000,
|
||||
"temperature": 0.2
|
||||
},
|
||||
"research": {
|
||||
"provider": "groq",
|
||||
"modelId": "llama-3.3-70b-versatile",
|
||||
"maxTokens": 32768,
|
||||
"provider": "perplexity",
|
||||
"modelId": "sonar",
|
||||
"maxTokens": 8700,
|
||||
"temperature": 0.1
|
||||
},
|
||||
"fallback": {
|
||||
"provider": "anthropic",
|
||||
"modelId": "claude-3-7-sonnet-20250219",
|
||||
"maxTokens": 128000,
|
||||
"modelId": "claude-3-5-sonnet-20241022",
|
||||
"maxTokens": 8192,
|
||||
"temperature": 0.2
|
||||
}
|
||||
},
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
# Task ID: 1
|
||||
# Title: Implement TTS Flag for Taskmaster Commands
|
||||
# Status: pending
|
||||
# Dependencies: 16 (Not found)
|
||||
# Priority: medium
|
||||
# Description: Add text-to-speech functionality to taskmaster commands with configurable voice options and audio output settings.
|
||||
# Details:
|
||||
Implement TTS functionality including:
|
||||
- Add --tts flag to all relevant taskmaster commands (list, show, generate, etc.)
|
||||
- Integrate with system TTS engines (Windows SAPI, macOS say command, Linux espeak/festival)
|
||||
- Create TTS configuration options in the configuration management system
|
||||
- Add voice selection options (male/female, different languages if available)
|
||||
- Implement audio output settings (volume, speed, pitch)
|
||||
- Add TTS-specific error handling for cases where TTS is unavailable
|
||||
- Create fallback behavior when TTS fails (silent failure or text output)
|
||||
- Support for reading task titles, descriptions, and status updates aloud
|
||||
- Add option to read entire task lists or individual task details
|
||||
- Implement TTS for command confirmations and error messages
|
||||
- Create TTS output formatting to make spoken text more natural (removing markdown, formatting numbers/dates appropriately)
|
||||
- Add configuration option to enable/disable TTS globally
|
||||
|
||||
# Test Strategy:
|
||||
Test TTS functionality across different operating systems (Windows, macOS, Linux). Verify that the --tts flag works with all major commands. Test voice configuration options and ensure audio output settings are properly applied. Test error handling when TTS services are unavailable. Verify that text formatting for speech is natural and understandable. Test with various task content types including special characters, code snippets, and long descriptions. Ensure TTS can be disabled and enabled through configuration.
|
||||
File diff suppressed because one or more lines are too long
14
.vscode/settings.json
vendored
Normal file
14
.vscode/settings.json
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"json.schemas": [
|
||||
{
|
||||
"fileMatch": ["src/prompts/*.json"],
|
||||
"url": "./src/prompts/schemas/prompt-template.schema.json"
|
||||
}
|
||||
],
|
||||
"files.associations": {
|
||||
"src/prompts/*.json": "json"
|
||||
},
|
||||
|
||||
"json.format.enable": true,
|
||||
"json.validate.enable": true
|
||||
}
|
||||
50
CHANGELOG.md
50
CHANGELOG.md
@@ -1,5 +1,55 @@
|
||||
# task-master-ai
|
||||
|
||||
## 0.20.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- [#950](https://github.com/eyaltoledano/claude-task-master/pull/950) [`699e9ee`](https://github.com/eyaltoledano/claude-task-master/commit/699e9eefb5d687b256e9402d686bdd5e3a358b4a) Thanks [@ben-vargas](https://github.com/ben-vargas)! - Add support for xAI Grok 4 model
|
||||
- Add grok-4 model to xAI provider with $3/$15 per 1M token pricing
|
||||
- Enable main, fallback, and research roles for grok-4
|
||||
- Max tokens set to 131,072 (matching other xAI models)
|
||||
|
||||
- [#946](https://github.com/eyaltoledano/claude-task-master/pull/946) [`5f009a5`](https://github.com/eyaltoledano/claude-task-master/commit/5f009a5e1fc10e37be26f5135df4b7f44a9c5320) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Add stricter validation and clearer feedback for task priority when adding new tasks
|
||||
- if a task priority is invalid, it will default to medium
|
||||
- made taks priority case-insensitive, essentially making HIGH and high the same value
|
||||
|
||||
- [#863](https://github.com/eyaltoledano/claude-task-master/pull/863) [`b530657`](https://github.com/eyaltoledano/claude-task-master/commit/b53065713c8da0ae6f18eb2655397aa975004923) Thanks [@OrenMe](https://github.com/OrenMe)! - Add support for MCP Sampling as AI provider, requires no API key, uses the client LLM provider
|
||||
|
||||
- [#930](https://github.com/eyaltoledano/claude-task-master/pull/930) [`98d1c97`](https://github.com/eyaltoledano/claude-task-master/commit/98d1c974361a56ddbeb772b1272986b9d3913459) Thanks [@OmarElKadri](https://github.com/OmarElKadri)! - Added Groq provider support
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- [#958](https://github.com/eyaltoledano/claude-task-master/pull/958) [`6c88a4a`](https://github.com/eyaltoledano/claude-task-master/commit/6c88a4a749083e3bd2d073a9240799771774495a) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Recover from `@anthropic-ai/claude-code` JSON truncation bug that caused Task Master to crash when handling large (>8 kB) structured responses. The CLI/SDK still truncates, but Task Master now detects the error, preserves buffered text, and returns a usable response instead of throwing.
|
||||
|
||||
- [#958](https://github.com/eyaltoledano/claude-task-master/pull/958) [`3334e40`](https://github.com/eyaltoledano/claude-task-master/commit/3334e409ae659d5223bb136ae23fd22c5e219073) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Updating dependency ai-sdk-provider-gemini-cli to 0.0.4 to address breaking change Google made to Gemini CLI and add better 'api-key' in addition to 'gemini-api-key' AI-SDK compatibility.
|
||||
|
||||
- [#853](https://github.com/eyaltoledano/claude-task-master/pull/853) [`95c299d`](https://github.com/eyaltoledano/claude-task-master/commit/95c299df642bd8e6d75f8fa5110ac705bcc72edf) Thanks [@joedanz](https://github.com/joedanz)! - Unify and streamline profile system architecture for improved maintainability
|
||||
|
||||
## 0.20.0-rc.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- [#950](https://github.com/eyaltoledano/claude-task-master/pull/950) [`699e9ee`](https://github.com/eyaltoledano/claude-task-master/commit/699e9eefb5d687b256e9402d686bdd5e3a358b4a) Thanks [@ben-vargas](https://github.com/ben-vargas)! - Add support for xAI Grok 4 model
|
||||
- Add grok-4 model to xAI provider with $3/$15 per 1M token pricing
|
||||
- Enable main, fallback, and research roles for grok-4
|
||||
- Max tokens set to 131,072 (matching other xAI models)
|
||||
|
||||
- [#946](https://github.com/eyaltoledano/claude-task-master/pull/946) [`5f009a5`](https://github.com/eyaltoledano/claude-task-master/commit/5f009a5e1fc10e37be26f5135df4b7f44a9c5320) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Add stricter validation and clearer feedback for task priority when adding new tasks
|
||||
- if a task priority is invalid, it will default to medium
|
||||
- made taks priority case-insensitive, essentially making HIGH and high the same value
|
||||
|
||||
- [#863](https://github.com/eyaltoledano/claude-task-master/pull/863) [`b530657`](https://github.com/eyaltoledano/claude-task-master/commit/b53065713c8da0ae6f18eb2655397aa975004923) Thanks [@OrenMe](https://github.com/OrenMe)! - Add support for MCP Sampling as AI provider, requires no API key, uses the client LLM provider
|
||||
|
||||
- [#930](https://github.com/eyaltoledano/claude-task-master/pull/930) [`98d1c97`](https://github.com/eyaltoledano/claude-task-master/commit/98d1c974361a56ddbeb772b1272986b9d3913459) Thanks [@OmarElKadri](https://github.com/OmarElKadri)! - Added Groq provider support
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- [#916](https://github.com/eyaltoledano/claude-task-master/pull/916) [`6c88a4a`](https://github.com/eyaltoledano/claude-task-master/commit/6c88a4a749083e3bd2d073a9240799771774495a) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Recover from `@anthropic-ai/claude-code` JSON truncation bug that caused Task Master to crash when handling large (>8 kB) structured responses. The CLI/SDK still truncates, but Task Master now detects the error, preserves buffered text, and returns a usable response instead of throwing.
|
||||
|
||||
- [#916](https://github.com/eyaltoledano/claude-task-master/pull/916) [`3334e40`](https://github.com/eyaltoledano/claude-task-master/commit/3334e409ae659d5223bb136ae23fd22c5e219073) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Updating dependency ai-sdk-provider-gemini-cli to 0.0.4 to address breaking change Google made to Gemini CLI and add better 'api-key' in addition to 'gemini-api-key' AI-SDK compatibility.
|
||||
|
||||
- [#853](https://github.com/eyaltoledano/claude-task-master/pull/853) [`95c299d`](https://github.com/eyaltoledano/claude-task-master/commit/95c299df642bd8e6d75f8fa5110ac705bcc72edf) Thanks [@joedanz](https://github.com/joedanz)! - Unify and streamline profile system architecture for improved maintainability
|
||||
|
||||
## 0.19.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
@@ -4,30 +4,7 @@ Taskmaster uses two primary methods for configuration:
|
||||
|
||||
1. **`.taskmaster/config.json` File (Recommended - New Structure)**
|
||||
|
||||
- This JSON file stores most configuration settings, including A5. **Usage Requirements**:
|
||||
8. **Troubleshooting**:
|
||||
- "MCP provider requires session context" → Ensure running in MCP environment
|
||||
- See the [MCP Provider Guide](./mcp-provider-guide.md) for detailed troubleshootingust be running in an MCP context (session must be available)
|
||||
- Session must provide `clientCapabilities.sampling` capability
|
||||
|
||||
6. **Best Practices**:
|
||||
- Always configure a non-MCP fallback provider
|
||||
- Use `mcp` for main/research roles when in MCP environments
|
||||
- Test sampling capability before production use
|
||||
|
||||
7. **Setup Commands**:
|
||||
```bash
|
||||
# Set MCP provider for main role
|
||||
task-master models set-main --provider mcp --model claude-3-5-sonnet-20241022
|
||||
|
||||
# Set MCP provider for research role
|
||||
task-master models set-research --provider mcp --model claude-3-opus-20240229
|
||||
|
||||
# Verify configuration
|
||||
task-master models list
|
||||
```
|
||||
|
||||
8. **Troubleshooting**:lections, parameters, logging levels, and project defaults.
|
||||
- This JSON file stores most configuration settings, including AI model selections, parameters, logging levels, and project defaults.
|
||||
- **Location:** This file is created in the `.taskmaster/` directory when you run the `task-master models --setup` interactive setup or initialize a new project with `task-master init`.
|
||||
- **Migration:** Existing projects with `.taskmasterconfig` in the root will continue to work, but should be migrated to the new structure using `task-master migrate`.
|
||||
- **Management:** Use the `task-master models --setup` command (or `models` MCP tool) to interactively create and manage this file. You can also set specific models directly using `task-master models --set-<role>=<model_id>`, adding `--ollama` or `--openrouter` flags for custom models. Manual editing is possible but not recommended unless you understand the structure.
|
||||
@@ -68,11 +45,12 @@ Taskmaster uses two primary methods for configuration:
|
||||
"azureBaseURL": "https://your-endpoint.azure.com/openai/deployments",
|
||||
"vertexProjectId": "your-gcp-project-id",
|
||||
"vertexLocation": "us-central1",
|
||||
"responseLanguage": "English"
|
||||
"responseLanguage": "English"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
> For MCP-specific setup and troubleshooting, see [Provider-Specific Configuration](#provider-specific-configuration).
|
||||
|
||||
2. **Legacy `.taskmasterconfig` File (Backward Compatibility)**
|
||||
|
||||
@@ -198,8 +176,6 @@ node scripts/init.js
|
||||
|
||||
### MCP (Model Context Protocol) Provider
|
||||
|
||||
The MCP provider enables Task Master to use MCP servers as AI providers. This is particularly useful when running Task Master within MCP-compatible development environments like Claude Desktop or Cursor.
|
||||
|
||||
1. **Prerequisites**:
|
||||
- An active MCP session with sampling capability
|
||||
- MCP client with sampling support (e.g. VS Code)
|
||||
@@ -238,12 +214,24 @@ The MCP provider enables Task Master to use MCP servers as AI providers. This is
|
||||
- Must be running in an MCP context (session must be available)
|
||||
- Session must provide `clientCapabilities.sampling` capability
|
||||
|
||||
5. **Best Practices**:
|
||||
6. **Best Practices**:
|
||||
- Always configure a non-MCP fallback provider
|
||||
- Use `mcp` for main/research roles when in MCP environments
|
||||
- Test sampling capability before production use
|
||||
|
||||
6. **Troubleshooting**:
|
||||
7. **Setup Commands**:
|
||||
```bash
|
||||
# Set MCP provider for main role
|
||||
task-master models set-main --provider mcp --model claude-3-5-sonnet-20241022
|
||||
|
||||
# Set MCP provider for research role
|
||||
task-master models set-research --provider mcp --model claude-3-opus-20240229
|
||||
|
||||
# Verify configuration
|
||||
task-master models list
|
||||
```
|
||||
|
||||
8. **Troubleshooting**:
|
||||
- "MCP provider requires session context" → Ensure running in MCP environment
|
||||
- See the [MCP Provider Guide](./mcp-provider-guide.md) for detailed troubleshooting
|
||||
|
||||
|
||||
192
docs/models.md
192
docs/models.md
@@ -4,21 +4,15 @@
|
||||
|
||||
| Provider | Model Name | SWE Score | Input Cost | Output Cost |
|
||||
| ----------- | ---------------------------------------------- | --------- | ---------- | ----------- |
|
||||
| bedrock | us.anthropic.claude-3-haiku-20240307-v1:0 | 0.4 | 0.25 | 1.25 |
|
||||
| bedrock | us.anthropic.claude-3-opus-20240229-v1:0 | 0.725 | 15 | 75 |
|
||||
| bedrock | us.anthropic.claude-3-5-sonnet-20240620-v1:0 | 0.49 | 3 | 15 |
|
||||
| bedrock | us.anthropic.claude-3-5-sonnet-20241022-v2:0 | 0.49 | 3 | 15 |
|
||||
| bedrock | us.anthropic.claude-3-7-sonnet-20250219-v1:0 | 0.623 | 3 | 15 |
|
||||
| bedrock | us.anthropic.claude-3-5-haiku-20241022-v1:0 | 0.4 | 0.8 | 4 |
|
||||
| bedrock | us.anthropic.claude-opus-4-20250514-v1:0 | 0.725 | 15 | 75 |
|
||||
| bedrock | us.anthropic.claude-sonnet-4-20250514-v1:0 | 0.727 | 3 | 15 |
|
||||
| anthropic | claude-sonnet-4-20250514 | 0.727 | 3 | 15 |
|
||||
| anthropic | claude-opus-4-20250514 | 0.725 | 15 | 75 |
|
||||
| anthropic | claude-3-7-sonnet-20250219 | 0.623 | 3 | 15 |
|
||||
| anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 |
|
||||
| azure | gpt-4o | 0.332 | 2.5 | 10 |
|
||||
| azure | gpt-4o-mini | 0.3 | 0.15 | 0.6 |
|
||||
| azure | gpt-4-1 | — | 2 | 10 |
|
||||
| claude-code | opus | 0.725 | 0 | 0 |
|
||||
| claude-code | sonnet | 0.727 | 0 | 0 |
|
||||
| mcp | mcp-sampling | — | 0 | 0 |
|
||||
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
|
||||
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
|
||||
| openai | gpt-4o | 0.332 | 2.5 | 10 |
|
||||
| openai | o1 | 0.489 | 15 | 60 |
|
||||
| openai | o3 | 0.5 | 2 | 8 |
|
||||
@@ -35,19 +29,21 @@
|
||||
| google | gemini-2.5-flash-preview-04-17 | 0.604 | — | — |
|
||||
| google | gemini-2.0-flash | 0.518 | 0.15 | 0.6 |
|
||||
| google | gemini-2.0-flash-lite | — | — | — |
|
||||
| perplexity | sonar-pro | — | 3 | 15 |
|
||||
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
|
||||
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
|
||||
| xai | grok-3 | — | 3 | 15 |
|
||||
| xai | grok-3-fast | — | 5 | 25 |
|
||||
| xai | grok-4 | — | 3 | 15 |
|
||||
| ollama | devstral:latest | — | 0 | 0 |
|
||||
| ollama | qwen3:latest | — | 0 | 0 |
|
||||
| ollama | qwen3:14b | — | 0 | 0 |
|
||||
| ollama | qwen3:32b | — | 0 | 0 |
|
||||
| ollama | mistral-small3.1:latest | — | 0 | 0 |
|
||||
| ollama | llama3.3:latest | — | 0 | 0 |
|
||||
| ollama | phi4:latest | — | 0 | 0 |
|
||||
| groq | llama-3.3-70b-versatile | 0.55 | 0.59 | 0.79 |
|
||||
| groq | llama-3.1-8b-instant | 0.32 | 0.05 | 0.08 |
|
||||
| groq | llama-4-scout | 0.45 | 0.11 | 0.34 |
|
||||
| groq | llama-4-maverick | 0.52 | 0.5 | 0.77 |
|
||||
| groq | mixtral-8x7b-32768 | 0.35 | 0.24 | 0.24 |
|
||||
| groq | qwen-qwq-32b-preview | 0.4 | 0.18 | 0.18 |
|
||||
| groq | deepseek-r1-distill-llama-70b | 0.52 | 0.75 | 0.99 |
|
||||
| groq | gemma2-9b-it | 0.3 | 0.2 | 0.2 |
|
||||
| groq | whisper-large-v3 | — | 0.11 | 0 |
|
||||
| perplexity | sonar-pro | — | 3 | 15 |
|
||||
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
|
||||
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
|
||||
| openrouter | google/gemini-2.5-flash-preview-05-20 | — | 0.15 | 0.6 |
|
||||
| openrouter | google/gemini-2.5-flash-preview-05-20:thinking | — | 0.15 | 3.5 |
|
||||
| openrouter | google/gemini-2.5-pro-exp-03-25 | — | 0 | 0 |
|
||||
@@ -73,57 +69,16 @@
|
||||
| openrouter | mistralai/devstral-small | — | 0.1 | 0.3 |
|
||||
| openrouter | mistralai/mistral-nemo | — | 0.03 | 0.07 |
|
||||
| openrouter | thudm/glm-4-32b:free | — | 0 | 0 |
|
||||
| groq | llama-3.3-70b-versatile | 0.55 | 0.59 | 0.79 |
|
||||
| groq | llama-3.1-8b-instant | 0.32 | 0.05 | 0.08 |
|
||||
| groq | llama-4-scout | 0.45 | 0.11 | 0.34 |
|
||||
| groq | llama-4-maverick | 0.52 | 0.5 | 0.77 |
|
||||
| groq | mixtral-8x7b-32768 | 0.35 | 0.24 | 0.24 |
|
||||
| groq | qwen-qwq-32b-preview | 0.4 | 0.18 | 0.18 |
|
||||
| groq | deepseek-r1-distill-llama-70b | 0.52 | 0.75 | 0.99 |
|
||||
| groq | gemma2-9b-it | 0.3 | 0.2 | 0.2 |
|
||||
| groq | whisper-large-v3 | — | 0.11 | 0 |
|
||||
| claude-code | opus | 0.725 | 0 | 0 |
|
||||
| claude-code | sonnet | 0.727 | 0 | 0 |
|
||||
| mcp | mcp-sampling | — | 0 | 0 |
|
||||
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
|
||||
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
|
||||
|
||||
## Research Models
|
||||
|
||||
| Provider | Model Name | SWE Score | Input Cost | Output Cost |
|
||||
| ----------- | -------------------------------------------- | --------- | ---------- | ----------- |
|
||||
| bedrock | us.anthropic.claude-3-opus-20240229-v1:0 | 0.725 | 15 | 75 |
|
||||
| bedrock | us.anthropic.claude-3-5-sonnet-20240620-v1:0 | 0.49 | 3 | 15 |
|
||||
| bedrock | us.anthropic.claude-3-5-sonnet-20241022-v2:0 | 0.49 | 3 | 15 |
|
||||
| bedrock | us.anthropic.claude-3-7-sonnet-20250219-v1:0 | 0.623 | 3 | 15 |
|
||||
| bedrock | us.anthropic.claude-opus-4-20250514-v1:0 | 0.725 | 15 | 75 |
|
||||
| bedrock | us.anthropic.claude-sonnet-4-20250514-v1:0 | 0.727 | 3 | 15 |
|
||||
| bedrock | us.deepseek.r1-v1:0 | — | 1.35 | 5.4 |
|
||||
| openai | gpt-4o-search-preview | 0.33 | 2.5 | 10 |
|
||||
| openai | gpt-4o-mini-search-preview | 0.3 | 0.15 | 0.6 |
|
||||
| perplexity | sonar-pro | — | 3 | 15 |
|
||||
| perplexity | sonar | — | 1 | 1 |
|
||||
| perplexity | deep-research | 0.211 | 2 | 8 |
|
||||
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
|
||||
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
|
||||
| xai | grok-3 | — | 3 | 15 |
|
||||
| xai | grok-3-fast | — | 5 | 25 |
|
||||
| xai | grok-4 | — | 3 | 15 |
|
||||
| groq | llama-3.3-70b-versatile | 0.55 | 0.59 | 0.79 |
|
||||
| groq | llama-4-scout | 0.45 | 0.11 | 0.34 |
|
||||
| groq | llama-4-maverick | 0.52 | 0.5 | 0.77 |
|
||||
| groq | qwen-qwq-32b-preview | 0.4 | 0.18 | 0.18 |
|
||||
| groq | deepseek-r1-distill-llama-70b | 0.52 | 0.75 | 0.99 |
|
||||
| claude-code | opus | 0.725 | 0 | 0 |
|
||||
| claude-code | sonnet | 0.727 | 0 | 0 |
|
||||
| mcp | mcp-sampling | — | 0 | 0 |
|
||||
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
|
||||
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
|
||||
|
||||
## Fallback Models
|
||||
|
||||
| Provider | Model Name | SWE Score | Input Cost | Output Cost |
|
||||
| ----------- | ---------------------------------------------- | --------- | ---------- | ----------- |
|
||||
| ollama | devstral:latest | — | 0 | 0 |
|
||||
| ollama | qwen3:latest | — | 0 | 0 |
|
||||
| ollama | qwen3:14b | — | 0 | 0 |
|
||||
| ollama | qwen3:32b | — | 0 | 0 |
|
||||
| ollama | mistral-small3.1:latest | — | 0 | 0 |
|
||||
| ollama | llama3.3:latest | — | 0 | 0 |
|
||||
| ollama | phi4:latest | — | 0 | 0 |
|
||||
| azure | gpt-4o | 0.332 | 2.5 | 10 |
|
||||
| azure | gpt-4o-mini | 0.3 | 0.15 | 0.6 |
|
||||
| azure | gpt-4-1 | — | 2 | 10 |
|
||||
| bedrock | us.anthropic.claude-3-haiku-20240307-v1:0 | 0.4 | 0.25 | 1.25 |
|
||||
| bedrock | us.anthropic.claude-3-opus-20240229-v1:0 | 0.725 | 15 | 75 |
|
||||
| bedrock | us.anthropic.claude-3-5-sonnet-20240620-v1:0 | 0.49 | 3 | 15 |
|
||||
@@ -132,13 +87,52 @@
|
||||
| bedrock | us.anthropic.claude-3-5-haiku-20241022-v1:0 | 0.4 | 0.8 | 4 |
|
||||
| bedrock | us.anthropic.claude-opus-4-20250514-v1:0 | 0.725 | 15 | 75 |
|
||||
| bedrock | us.anthropic.claude-sonnet-4-20250514-v1:0 | 0.727 | 3 | 15 |
|
||||
|
||||
## Research Models
|
||||
|
||||
| Provider | Model Name | SWE Score | Input Cost | Output Cost |
|
||||
| ----------- | -------------------------------------------- | --------- | ---------- | ----------- |
|
||||
| claude-code | opus | 0.725 | 0 | 0 |
|
||||
| claude-code | sonnet | 0.727 | 0 | 0 |
|
||||
| mcp | mcp-sampling | — | 0 | 0 |
|
||||
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
|
||||
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
|
||||
| openai | gpt-4o-search-preview | 0.33 | 2.5 | 10 |
|
||||
| openai | gpt-4o-mini-search-preview | 0.3 | 0.15 | 0.6 |
|
||||
| xai | grok-3 | — | 3 | 15 |
|
||||
| xai | grok-3-fast | — | 5 | 25 |
|
||||
| xai | grok-4 | — | 3 | 15 |
|
||||
| groq | llama-3.3-70b-versatile | 0.55 | 0.59 | 0.79 |
|
||||
| groq | llama-4-scout | 0.45 | 0.11 | 0.34 |
|
||||
| groq | llama-4-maverick | 0.52 | 0.5 | 0.77 |
|
||||
| groq | qwen-qwq-32b-preview | 0.4 | 0.18 | 0.18 |
|
||||
| groq | deepseek-r1-distill-llama-70b | 0.52 | 0.75 | 0.99 |
|
||||
| perplexity | sonar-pro | — | 3 | 15 |
|
||||
| perplexity | sonar | — | 1 | 1 |
|
||||
| perplexity | deep-research | 0.211 | 2 | 8 |
|
||||
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
|
||||
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
|
||||
| bedrock | us.anthropic.claude-3-opus-20240229-v1:0 | 0.725 | 15 | 75 |
|
||||
| bedrock | us.anthropic.claude-3-5-sonnet-20240620-v1:0 | 0.49 | 3 | 15 |
|
||||
| bedrock | us.anthropic.claude-3-5-sonnet-20241022-v2:0 | 0.49 | 3 | 15 |
|
||||
| bedrock | us.anthropic.claude-3-7-sonnet-20250219-v1:0 | 0.623 | 3 | 15 |
|
||||
| bedrock | us.anthropic.claude-opus-4-20250514-v1:0 | 0.725 | 15 | 75 |
|
||||
| bedrock | us.anthropic.claude-sonnet-4-20250514-v1:0 | 0.727 | 3 | 15 |
|
||||
| bedrock | us.deepseek.r1-v1:0 | — | 1.35 | 5.4 |
|
||||
|
||||
## Fallback Models
|
||||
|
||||
| Provider | Model Name | SWE Score | Input Cost | Output Cost |
|
||||
| ----------- | ---------------------------------------------- | --------- | ---------- | ----------- |
|
||||
| anthropic | claude-sonnet-4-20250514 | 0.727 | 3 | 15 |
|
||||
| anthropic | claude-opus-4-20250514 | 0.725 | 15 | 75 |
|
||||
| anthropic | claude-3-7-sonnet-20250219 | 0.623 | 3 | 15 |
|
||||
| anthropic | claude-3-5-sonnet-20241022 | 0.49 | 3 | 15 |
|
||||
| azure | gpt-4o | 0.332 | 2.5 | 10 |
|
||||
| azure | gpt-4o-mini | 0.3 | 0.15 | 0.6 |
|
||||
| azure | gpt-4-1 | — | 2 | 10 |
|
||||
| claude-code | opus | 0.725 | 0 | 0 |
|
||||
| claude-code | sonnet | 0.727 | 0 | 0 |
|
||||
| mcp | mcp-sampling | — | 0 | 0 |
|
||||
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
|
||||
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
|
||||
| openai | gpt-4o | 0.332 | 2.5 | 10 |
|
||||
| openai | o3 | 0.5 | 2 | 8 |
|
||||
| openai | o4-mini | 0.45 | 1.1 | 4.4 |
|
||||
@@ -147,18 +141,18 @@
|
||||
| google | gemini-2.5-flash-preview-04-17 | 0.604 | — | — |
|
||||
| google | gemini-2.0-flash | 0.518 | 0.15 | 0.6 |
|
||||
| google | gemini-2.0-flash-lite | — | — | — |
|
||||
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
|
||||
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
|
||||
| xai | grok-3 | — | 3 | 15 |
|
||||
| xai | grok-3-fast | — | 5 | 25 |
|
||||
| xai | grok-4 | — | 3 | 15 |
|
||||
| ollama | devstral:latest | — | 0 | 0 |
|
||||
| ollama | qwen3:latest | — | 0 | 0 |
|
||||
| ollama | qwen3:14b | — | 0 | 0 |
|
||||
| ollama | qwen3:32b | — | 0 | 0 |
|
||||
| ollama | mistral-small3.1:latest | — | 0 | 0 |
|
||||
| ollama | llama3.3:latest | — | 0 | 0 |
|
||||
| ollama | phi4:latest | — | 0 | 0 |
|
||||
| groq | llama-3.3-70b-versatile | 0.55 | 0.59 | 0.79 |
|
||||
| groq | llama-3.1-8b-instant | 0.32 | 0.05 | 0.08 |
|
||||
| groq | llama-4-scout | 0.45 | 0.11 | 0.34 |
|
||||
| groq | llama-4-maverick | 0.52 | 0.5 | 0.77 |
|
||||
| groq | mixtral-8x7b-32768 | 0.35 | 0.24 | 0.24 |
|
||||
| groq | qwen-qwq-32b-preview | 0.4 | 0.18 | 0.18 |
|
||||
| groq | gemma2-9b-it | 0.3 | 0.2 | 0.2 |
|
||||
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
|
||||
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
|
||||
| openrouter | google/gemini-2.5-flash-preview-05-20 | — | 0.15 | 0.6 |
|
||||
| openrouter | google/gemini-2.5-flash-preview-05-20:thinking | — | 0.15 | 3.5 |
|
||||
| openrouter | google/gemini-2.5-pro-exp-03-25 | — | 0 | 0 |
|
||||
@@ -182,15 +176,21 @@
|
||||
| openrouter | mistralai/mistral-small-3.1-24b-instruct | — | 0.1 | 0.3 |
|
||||
| openrouter | mistralai/mistral-nemo | — | 0.03 | 0.07 |
|
||||
| openrouter | thudm/glm-4-32b:free | — | 0 | 0 |
|
||||
| groq | llama-3.3-70b-versatile | 0.55 | 0.59 | 0.79 |
|
||||
| groq | llama-3.1-8b-instant | 0.32 | 0.05 | 0.08 |
|
||||
| groq | llama-4-scout | 0.45 | 0.11 | 0.34 |
|
||||
| groq | llama-4-maverick | 0.52 | 0.5 | 0.77 |
|
||||
| groq | mixtral-8x7b-32768 | 0.35 | 0.24 | 0.24 |
|
||||
| groq | qwen-qwq-32b-preview | 0.4 | 0.18 | 0.18 |
|
||||
| groq | gemma2-9b-it | 0.3 | 0.2 | 0.2 |
|
||||
| claude-code | opus | 0.725 | 0 | 0 |
|
||||
| claude-code | sonnet | 0.727 | 0 | 0 |
|
||||
| mcp | mcp-sampling | — | 0 | 0 |
|
||||
| gemini-cli | gemini-2.5-pro | 0.72 | 0 | 0 |
|
||||
| gemini-cli | gemini-2.5-flash | 0.71 | 0 | 0 |
|
||||
| ollama | devstral:latest | — | 0 | 0 |
|
||||
| ollama | qwen3:latest | — | 0 | 0 |
|
||||
| ollama | qwen3:14b | — | 0 | 0 |
|
||||
| ollama | qwen3:32b | — | 0 | 0 |
|
||||
| ollama | mistral-small3.1:latest | — | 0 | 0 |
|
||||
| ollama | llama3.3:latest | — | 0 | 0 |
|
||||
| ollama | phi4:latest | — | 0 | 0 |
|
||||
| azure | gpt-4o | 0.332 | 2.5 | 10 |
|
||||
| azure | gpt-4o-mini | 0.3 | 0.15 | 0.6 |
|
||||
| azure | gpt-4-1 | — | 2 | 10 |
|
||||
| bedrock | us.anthropic.claude-3-haiku-20240307-v1:0 | 0.4 | 0.25 | 1.25 |
|
||||
| bedrock | us.anthropic.claude-3-opus-20240229-v1:0 | 0.725 | 15 | 75 |
|
||||
| bedrock | us.anthropic.claude-3-5-sonnet-20240620-v1:0 | 0.49 | 3 | 15 |
|
||||
| bedrock | us.anthropic.claude-3-5-sonnet-20241022-v2:0 | 0.49 | 3 | 15 |
|
||||
| bedrock | us.anthropic.claude-3-7-sonnet-20250219-v1:0 | 0.623 | 3 | 15 |
|
||||
| bedrock | us.anthropic.claude-3-5-haiku-20241022-v1:0 | 0.4 | 0.8 | 4 |
|
||||
| bedrock | us.anthropic.claude-opus-4-20250514-v1:0 | 0.725 | 15 | 75 |
|
||||
| bedrock | us.anthropic.claude-sonnet-4-20250514-v1:0 | 0.727 | 3 | 15 |
|
||||
|
||||
@@ -125,8 +125,7 @@ export async function addTaskDirect(args, log, context = {}) {
|
||||
},
|
||||
'json', // outputFormat
|
||||
manualTaskData, // Pass the manual task data
|
||||
false, // research flag is false for manual creation
|
||||
projectRoot // Pass projectRoot
|
||||
false // research flag is false for manual creation
|
||||
);
|
||||
newTaskId = result.newTaskId;
|
||||
telemetryData = result.telemetryData;
|
||||
|
||||
2940
package-lock.json
generated
2940
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "task-master-ai",
|
||||
"version": "0.19.0",
|
||||
"version": "0.20.0",
|
||||
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
|
||||
"main": "index.js",
|
||||
"type": "module",
|
||||
@@ -54,6 +54,8 @@
|
||||
"@inquirer/search": "^3.0.15",
|
||||
"@openrouter/ai-sdk-provider": "^0.4.5",
|
||||
"ai": "^4.3.10",
|
||||
"ajv": "^8.17.1",
|
||||
"ajv-formats": "^3.0.1",
|
||||
"boxen": "^8.0.1",
|
||||
"chalk": "^5.4.1",
|
||||
"cli-highlight": "^2.1.11",
|
||||
|
||||
@@ -805,7 +805,7 @@ function registerCommands(programInstance) {
|
||||
'-i, --input <file>',
|
||||
'Path to the PRD file (alternative to positional argument)'
|
||||
)
|
||||
.option('-o, --output <file>', 'Output file path', TASKMASTER_TASKS_FILE)
|
||||
.option('-o, --output <file>', 'Output file path')
|
||||
.option(
|
||||
'-n, --num-tasks <number>',
|
||||
'Number of tasks to generate',
|
||||
@@ -825,14 +825,18 @@ function registerCommands(programInstance) {
|
||||
// Initialize TaskMaster
|
||||
let taskMaster;
|
||||
try {
|
||||
taskMaster = initTaskMaster({
|
||||
prdPath: file || options.input || true,
|
||||
tasksPath: options.output || true
|
||||
});
|
||||
const initOptions = {
|
||||
prdPath: file || options.input || true
|
||||
};
|
||||
// Only include tasksPath if output is explicitly specified
|
||||
if (options.output) {
|
||||
initOptions.tasksPath = options.output;
|
||||
}
|
||||
taskMaster = initTaskMaster(initOptions);
|
||||
} catch (error) {
|
||||
console.log(
|
||||
boxen(
|
||||
`${chalk.white.bold('Parse PRD Help')}\n\n${chalk.cyan('Usage:')}\n task-master parse-prd <prd-file.txt> [options]\n\n${chalk.cyan('Options:')}\n -i, --input <file> Path to the PRD file (alternative to positional argument)\n -o, --output <file> Output file path (default: "${TASKMASTER_TASKS_FILE}")\n -n, --num-tasks <number> Number of tasks to generate (default: 10)\n -f, --force Skip confirmation when overwriting existing tasks\n --append Append new tasks to existing tasks.json instead of overwriting\n -r, --research Use Perplexity AI for research-backed task generation\n\n${chalk.cyan('Example:')}\n task-master parse-prd requirements.txt --num-tasks 15\n task-master parse-prd --input=requirements.txt\n task-master parse-prd --force\n task-master parse-prd requirements_v2.txt --append\n task-master parse-prd requirements.txt --research\n\n${chalk.yellow('Note: This command will:')}\n 1. Look for a PRD file at ${TASKMASTER_DOCS_DIR}/PRD.md by default\n 2. Use the file specified by --input or positional argument if provided\n 3. Generate tasks from the PRD and either:\n - Overwrite any existing tasks.json file (default)\n - Append to existing tasks.json if --append is used`,
|
||||
`${chalk.white.bold('Parse PRD Help')}\n\n${chalk.cyan('Usage:')}\n task-master parse-prd <prd-file.txt> [options]\n\n${chalk.cyan('Options:')}\n -i, --input <file> Path to the PRD file (alternative to positional argument)\n -o, --output <file> Output file path (default: .taskmaster/tasks/tasks.json)\n -n, --num-tasks <number> Number of tasks to generate (default: 10)\n -f, --force Skip confirmation when overwriting existing tasks\n --append Append new tasks to existing tasks.json instead of overwriting\n -r, --research Use Perplexity AI for research-backed task generation\n\n${chalk.cyan('Example:')}\n task-master parse-prd requirements.txt --num-tasks 15\n task-master parse-prd --input=requirements.txt\n task-master parse-prd --force\n task-master parse-prd requirements_v2.txt --append\n task-master parse-prd requirements.txt --research\n\n${chalk.yellow('Note: This command will:')}\n 1. Look for a PRD file at ${TASKMASTER_DOCS_DIR}/PRD.md by default\n 2. Use the file specified by --input or positional argument if provided\n 3. Generate tasks from the PRD and either:\n - Overwrite any existing tasks.json file (default)\n - Append to existing tasks.json if --append is used`,
|
||||
{ padding: 1, borderColor: 'blue', borderStyle: 'round' }
|
||||
)
|
||||
);
|
||||
@@ -912,18 +916,17 @@ function registerCommands(programInstance) {
|
||||
}
|
||||
|
||||
spinner = ora('Parsing PRD and generating tasks...\n').start();
|
||||
await parsePRD(
|
||||
taskMaster.getPrdPath(),
|
||||
taskMaster.getTasksPath(),
|
||||
numTasks,
|
||||
{
|
||||
append: useAppend,
|
||||
force: useForce,
|
||||
research: research,
|
||||
projectRoot: taskMaster.getProjectRoot(),
|
||||
tag: tag
|
||||
}
|
||||
);
|
||||
// Handle case where getTasksPath() returns null
|
||||
const outputPath =
|
||||
taskMaster.getTasksPath() ||
|
||||
path.join(taskMaster.getProjectRoot(), TASKMASTER_TASKS_FILE);
|
||||
await parsePRD(taskMaster.getPrdPath(), outputPath, numTasks, {
|
||||
append: useAppend,
|
||||
force: useForce,
|
||||
research: research,
|
||||
projectRoot: taskMaster.getProjectRoot(),
|
||||
tag: tag
|
||||
});
|
||||
spinner.succeed('Tasks generated successfully!');
|
||||
} catch (error) {
|
||||
if (spinner) {
|
||||
@@ -1497,10 +1500,16 @@ function registerCommands(programInstance) {
|
||||
.option('--tag <tag>', 'Specify tag context for task operations')
|
||||
.action(async (options) => {
|
||||
// Initialize TaskMaster
|
||||
const taskMaster = initTaskMaster({
|
||||
tasksPath: options.file || true,
|
||||
complexityReportPath: options.report || false
|
||||
});
|
||||
const initOptions = {
|
||||
tasksPath: options.file || true
|
||||
};
|
||||
|
||||
// Only pass complexityReportPath if user provided a custom path
|
||||
if (options.report && options.report !== COMPLEXITY_REPORT_FILE) {
|
||||
initOptions.complexityReportPath = options.report;
|
||||
}
|
||||
|
||||
const taskMaster = initTaskMaster(initOptions);
|
||||
|
||||
const statusFilter = options.status;
|
||||
const withSubtasks = options.withSubtasks || false;
|
||||
@@ -1631,11 +1640,7 @@ function registerCommands(programInstance) {
|
||||
.description(
|
||||
`Analyze tasks and generate expansion recommendations${chalk.reset('')}`
|
||||
)
|
||||
.option(
|
||||
'-o, --output <file>',
|
||||
'Output file path for the report',
|
||||
COMPLEXITY_REPORT_FILE
|
||||
)
|
||||
.option('-o, --output <file>', 'Output file path for the report')
|
||||
.option(
|
||||
'-m, --model <model>',
|
||||
'LLM model to use for analysis (defaults to configured model)'
|
||||
@@ -1663,10 +1668,14 @@ function registerCommands(programInstance) {
|
||||
.option('--tag <tag>', 'Specify tag context for task operations')
|
||||
.action(async (options) => {
|
||||
// Initialize TaskMaster
|
||||
const taskMaster = initTaskMaster({
|
||||
tasksPath: options.file || true,
|
||||
complexityReportPath: options.output || true
|
||||
});
|
||||
const initOptions = {
|
||||
tasksPath: options.file || true // Tasks file is required to analyze
|
||||
};
|
||||
// Only include complexityReportPath if output is explicitly specified
|
||||
if (options.output) {
|
||||
initOptions.complexityReportPath = options.output;
|
||||
}
|
||||
const taskMaster = initTaskMaster(initOptions);
|
||||
|
||||
const tag = options.tag;
|
||||
const modelOverride = options.model;
|
||||
@@ -1681,11 +1690,13 @@ function registerCommands(programInstance) {
|
||||
displayCurrentTagIndicator(targetTag);
|
||||
|
||||
// Tag-aware output file naming: master -> task-complexity-report.json, other tags -> task-complexity-report_tagname.json
|
||||
const baseOutputPath = taskMaster.getComplexityReportPath();
|
||||
const baseOutputPath =
|
||||
taskMaster.getComplexityReportPath() ||
|
||||
path.join(taskMaster.getProjectRoot(), COMPLEXITY_REPORT_FILE);
|
||||
const outputPath =
|
||||
options.output === COMPLEXITY_REPORT_FILE && targetTag !== 'master'
|
||||
? baseOutputPath.replace('.json', `_${targetTag}.json`)
|
||||
: baseOutputPath;
|
||||
: options.output || baseOutputPath;
|
||||
|
||||
console.log(
|
||||
chalk.blue(
|
||||
@@ -1765,6 +1776,11 @@ function registerCommands(programInstance) {
|
||||
)
|
||||
.option('--tag <tag>', 'Specify tag context for task operations')
|
||||
.action(async (prompt, options) => {
|
||||
// Initialize TaskMaster
|
||||
const taskMaster = initTaskMaster({
|
||||
tasksPath: options.file || true
|
||||
});
|
||||
|
||||
// Parameter validation
|
||||
if (!prompt || typeof prompt !== 'string' || prompt.trim().length === 0) {
|
||||
console.error(
|
||||
@@ -2206,6 +2222,8 @@ ${result.result}
|
||||
tasksPath: options.file || true
|
||||
});
|
||||
|
||||
const projectRoot = taskMaster.getProjectRoot();
|
||||
|
||||
// Show current tag context
|
||||
displayCurrentTagIndicator(
|
||||
options.tag || getCurrentTag(taskMaster.getProjectRoot()) || 'master'
|
||||
@@ -3455,8 +3473,11 @@ Examples:
|
||||
.action(async (options) => {
|
||||
// Initialize TaskMaster
|
||||
const taskMaster = initTaskMaster({
|
||||
tasksPath: options.file || true
|
||||
tasksPath: options.file || false
|
||||
});
|
||||
|
||||
const projectRoot = taskMaster.getProjectRoot();
|
||||
|
||||
// Validate flags: cannot use multiple provider flags simultaneously
|
||||
const providerFlags = [
|
||||
options.openrouter,
|
||||
@@ -3485,7 +3506,7 @@ Examples:
|
||||
// Action 1: Run Interactive Setup
|
||||
console.log(chalk.blue('Starting interactive model setup...')); // Added feedback
|
||||
try {
|
||||
await runInteractiveSetup(projectRoot);
|
||||
await runInteractiveSetup(taskMaster.getProjectRoot());
|
||||
// runInteractiveSetup logs its own completion/error messages
|
||||
} catch (setupError) {
|
||||
console.error(
|
||||
|
||||
@@ -4,7 +4,10 @@ import chalk from 'chalk';
|
||||
import { z } from 'zod';
|
||||
import { fileURLToPath } from 'url';
|
||||
import { log, findProjectRoot, resolveEnvVariable, isEmpty } from './utils.js';
|
||||
import { LEGACY_CONFIG_FILE } from '../../src/constants/paths.js';
|
||||
import {
|
||||
LEGACY_CONFIG_FILE,
|
||||
TASKMASTER_DIR
|
||||
} from '../../src/constants/paths.js';
|
||||
import { findConfigPath } from '../../src/utils/path-utils.js';
|
||||
import {
|
||||
VALIDATED_PROVIDERS,
|
||||
@@ -99,17 +102,30 @@ function _loadAndValidateConfig(explicitRoot = null) {
|
||||
if (rootToUse) {
|
||||
configSource = `found root (${rootToUse})`;
|
||||
} else {
|
||||
// No root found, return defaults immediately
|
||||
return defaults;
|
||||
// No root found, use current working directory as fallback
|
||||
// This prevents infinite loops during initialization
|
||||
rootToUse = process.cwd();
|
||||
configSource = `current directory (${rootToUse}) - no project markers found`;
|
||||
}
|
||||
}
|
||||
// ---> End find project root logic <---
|
||||
|
||||
// --- Find configuration file using centralized path utility ---
|
||||
const configPath = findConfigPath(null, { projectRoot: rootToUse });
|
||||
// --- Find configuration file ---
|
||||
let configPath = null;
|
||||
let config = { ...defaults }; // Start with a deep copy of defaults
|
||||
let configExists = false;
|
||||
|
||||
// During initialization (no project markers), skip config file search entirely
|
||||
const hasProjectMarkers =
|
||||
fs.existsSync(path.join(rootToUse, TASKMASTER_DIR)) ||
|
||||
fs.existsSync(path.join(rootToUse, LEGACY_CONFIG_FILE));
|
||||
|
||||
if (hasProjectMarkers) {
|
||||
// Only try to find config if we have project markers
|
||||
// This prevents the repeated warnings during init
|
||||
configPath = findConfigPath(null, { projectRoot: rootToUse });
|
||||
}
|
||||
|
||||
if (configPath) {
|
||||
configExists = true;
|
||||
const isLegacy = configPath.endsWith(LEGACY_CONFIG_FILE);
|
||||
@@ -199,11 +215,22 @@ function _loadAndValidateConfig(explicitRoot = null) {
|
||||
)
|
||||
);
|
||||
} else {
|
||||
console.warn(
|
||||
chalk.yellow(
|
||||
`Warning: Configuration file not found at derived root (${rootToUse}). Using defaults.`
|
||||
)
|
||||
// Don't warn about missing config during initialization
|
||||
// Only warn if this looks like an existing project (has .taskmaster dir or legacy config marker)
|
||||
const hasTaskmasterDir = fs.existsSync(
|
||||
path.join(rootToUse, TASKMASTER_DIR)
|
||||
);
|
||||
const hasLegacyMarker = fs.existsSync(
|
||||
path.join(rootToUse, LEGACY_CONFIG_FILE)
|
||||
);
|
||||
|
||||
if (hasTaskmasterDir || hasLegacyMarker) {
|
||||
console.warn(
|
||||
chalk.yellow(
|
||||
`Warning: Configuration file not found at derived root (${rootToUse}). Using defaults.`
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
// Keep config as defaults
|
||||
config = { ...defaults };
|
||||
|
||||
@@ -4,7 +4,8 @@
|
||||
*/
|
||||
|
||||
// Export all modules
|
||||
export * from './utils.js';
|
||||
export * from './ui.js';
|
||||
export * from './task-manager.js';
|
||||
export * from './utils.js';
|
||||
export * from './commands.js';
|
||||
export * from './task-manager.js';
|
||||
export * from './prompt-manager.js';
|
||||
|
||||
509
scripts/modules/prompt-manager.js
Normal file
509
scripts/modules/prompt-manager.js
Normal file
@@ -0,0 +1,509 @@
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
import { log } from './utils.js';
|
||||
import Ajv from 'ajv';
|
||||
import addFormats from 'ajv-formats';
|
||||
|
||||
/**
|
||||
* Manages prompt templates for AI interactions
|
||||
*/
|
||||
export class PromptManager {
|
||||
constructor() {
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
this.promptsDir = path.join(__dirname, '..', '..', 'src', 'prompts');
|
||||
this.cache = new Map();
|
||||
this.setupValidation();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set up JSON schema validation
|
||||
* @private
|
||||
*/
|
||||
setupValidation() {
|
||||
this.ajv = new Ajv({ allErrors: true, strict: false });
|
||||
addFormats(this.ajv);
|
||||
|
||||
try {
|
||||
// Load schema from src/prompts/schemas
|
||||
const schemaPath = path.join(
|
||||
this.promptsDir,
|
||||
'schemas',
|
||||
'prompt-template.schema.json'
|
||||
);
|
||||
const schemaContent = fs.readFileSync(schemaPath, 'utf-8');
|
||||
const schema = JSON.parse(schemaContent);
|
||||
|
||||
this.validatePrompt = this.ajv.compile(schema);
|
||||
log('info', '✓ JSON schema validation enabled');
|
||||
} catch (error) {
|
||||
log('warn', `⚠ Schema validation disabled: ${error.message}`);
|
||||
this.validatePrompt = () => true; // Fallback to no validation
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Load a prompt template and render it with variables
|
||||
* @param {string} promptId - The prompt template ID
|
||||
* @param {Object} variables - Variables to inject into the template
|
||||
* @param {string} [variantKey] - Optional specific variant to use
|
||||
* @returns {{systemPrompt: string, userPrompt: string, metadata: Object}}
|
||||
*/
|
||||
loadPrompt(promptId, variables = {}, variantKey = null) {
|
||||
try {
|
||||
// Check cache first
|
||||
const cacheKey = `${promptId}-${JSON.stringify(variables)}-${variantKey}`;
|
||||
if (this.cache.has(cacheKey)) {
|
||||
return this.cache.get(cacheKey);
|
||||
}
|
||||
|
||||
// Load template
|
||||
const template = this.loadTemplate(promptId);
|
||||
|
||||
// Validate parameters if schema validation is available
|
||||
if (this.validatePrompt && this.validatePrompt !== true) {
|
||||
this.validateParameters(template, variables);
|
||||
}
|
||||
|
||||
// Select the variant - use specified key or select based on conditions
|
||||
const variant = variantKey
|
||||
? { ...template.prompts[variantKey], name: variantKey }
|
||||
: this.selectVariant(template, variables);
|
||||
|
||||
// Render the prompts with variables
|
||||
const rendered = {
|
||||
systemPrompt: this.renderTemplate(variant.system, variables),
|
||||
userPrompt: this.renderTemplate(variant.user, variables),
|
||||
metadata: {
|
||||
templateId: template.id,
|
||||
version: template.version,
|
||||
variant: variant.name || 'default',
|
||||
parameters: variables
|
||||
}
|
||||
};
|
||||
|
||||
// Cache the result
|
||||
this.cache.set(cacheKey, rendered);
|
||||
|
||||
return rendered;
|
||||
} catch (error) {
|
||||
log('error', `Failed to load prompt ${promptId}: ${error.message}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Load a prompt template from disk
|
||||
* @private
|
||||
*/
|
||||
loadTemplate(promptId) {
|
||||
const templatePath = path.join(this.promptsDir, `${promptId}.json`);
|
||||
|
||||
try {
|
||||
const content = fs.readFileSync(templatePath, 'utf-8');
|
||||
const template = JSON.parse(content);
|
||||
|
||||
// Schema validation if available (do this first for detailed errors)
|
||||
if (this.validatePrompt && this.validatePrompt !== true) {
|
||||
const valid = this.validatePrompt(template);
|
||||
if (!valid) {
|
||||
const errors = this.validatePrompt.errors
|
||||
.map((err) => `${err.instancePath || 'root'}: ${err.message}`)
|
||||
.join(', ');
|
||||
throw new Error(`Schema validation failed: ${errors}`);
|
||||
}
|
||||
} else {
|
||||
// Fallback basic validation if no schema validation available
|
||||
if (!template.id || !template.prompts || !template.prompts.default) {
|
||||
throw new Error(
|
||||
'Invalid template structure: missing required fields (id, prompts.default)'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return template;
|
||||
} catch (error) {
|
||||
if (error.code === 'ENOENT') {
|
||||
throw new Error(`Prompt template '${promptId}' not found`);
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate parameters against template schema
|
||||
* @private
|
||||
*/
|
||||
validateParameters(template, variables) {
|
||||
if (!template.parameters) return;
|
||||
|
||||
const errors = [];
|
||||
|
||||
for (const [paramName, paramConfig] of Object.entries(
|
||||
template.parameters
|
||||
)) {
|
||||
const value = variables[paramName];
|
||||
|
||||
// Check required parameters
|
||||
if (paramConfig.required && value === undefined) {
|
||||
errors.push(`Required parameter '${paramName}' missing`);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip validation for undefined optional parameters
|
||||
if (value === undefined) continue;
|
||||
|
||||
// Type validation
|
||||
if (!this.validateParameterType(value, paramConfig.type)) {
|
||||
errors.push(
|
||||
`Parameter '${paramName}' expected ${paramConfig.type}, got ${typeof value}`
|
||||
);
|
||||
}
|
||||
|
||||
// Enum validation
|
||||
if (paramConfig.enum && !paramConfig.enum.includes(value)) {
|
||||
errors.push(
|
||||
`Parameter '${paramName}' must be one of: ${paramConfig.enum.join(', ')}`
|
||||
);
|
||||
}
|
||||
|
||||
// Pattern validation for strings
|
||||
if (paramConfig.pattern && typeof value === 'string') {
|
||||
const regex = new RegExp(paramConfig.pattern);
|
||||
if (!regex.test(value)) {
|
||||
errors.push(
|
||||
`Parameter '${paramName}' does not match required pattern: ${paramConfig.pattern}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Range validation for numbers
|
||||
if (typeof value === 'number') {
|
||||
if (paramConfig.minimum !== undefined && value < paramConfig.minimum) {
|
||||
errors.push(
|
||||
`Parameter '${paramName}' must be >= ${paramConfig.minimum}`
|
||||
);
|
||||
}
|
||||
if (paramConfig.maximum !== undefined && value > paramConfig.maximum) {
|
||||
errors.push(
|
||||
`Parameter '${paramName}' must be <= ${paramConfig.maximum}`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (errors.length > 0) {
|
||||
throw new Error(`Parameter validation failed: ${errors.join('; ')}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate parameter type
|
||||
* @private
|
||||
*/
|
||||
validateParameterType(value, expectedType) {
|
||||
switch (expectedType) {
|
||||
case 'string':
|
||||
return typeof value === 'string';
|
||||
case 'number':
|
||||
return typeof value === 'number';
|
||||
case 'boolean':
|
||||
return typeof value === 'boolean';
|
||||
case 'array':
|
||||
return Array.isArray(value);
|
||||
case 'object':
|
||||
return (
|
||||
typeof value === 'object' && value !== null && !Array.isArray(value)
|
||||
);
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Select the best variant based on conditions
|
||||
* @private
|
||||
*/
|
||||
selectVariant(template, variables) {
|
||||
// Check each variant's condition
|
||||
for (const [name, variant] of Object.entries(template.prompts)) {
|
||||
if (name === 'default') continue;
|
||||
|
||||
if (
|
||||
variant.condition &&
|
||||
this.evaluateCondition(variant.condition, variables)
|
||||
) {
|
||||
return { ...variant, name };
|
||||
}
|
||||
}
|
||||
|
||||
// Fall back to default
|
||||
return { ...template.prompts.default, name: 'default' };
|
||||
}
|
||||
|
||||
/**
|
||||
* Evaluate a condition string
|
||||
* @private
|
||||
*/
|
||||
evaluateCondition(condition, variables) {
|
||||
try {
|
||||
// Create a safe evaluation context
|
||||
const context = { ...variables };
|
||||
|
||||
// Simple condition evaluation (can be enhanced)
|
||||
// For now, supports basic comparisons
|
||||
const func = new Function(...Object.keys(context), `return ${condition}`);
|
||||
return func(...Object.values(context));
|
||||
} catch (error) {
|
||||
log('warn', `Failed to evaluate condition: ${condition}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Render a template string with variables
|
||||
* @private
|
||||
*/
|
||||
renderTemplate(template, variables) {
|
||||
let rendered = template;
|
||||
|
||||
// Handle helper functions like (eq variable "value")
|
||||
rendered = rendered.replace(
|
||||
/\(eq\s+(\w+(?:\.\w+)*)\s+"([^"]+)"\)/g,
|
||||
(match, path, compareValue) => {
|
||||
const value = this.getNestedValue(variables, path);
|
||||
return value === compareValue ? 'true' : 'false';
|
||||
}
|
||||
);
|
||||
|
||||
// Handle not helper function like (not variable)
|
||||
rendered = rendered.replace(/\(not\s+(\w+(?:\.\w+)*)\)/g, (match, path) => {
|
||||
const value = this.getNestedValue(variables, path);
|
||||
return !value ? 'true' : 'false';
|
||||
});
|
||||
|
||||
// Handle gt (greater than) helper function like (gt variable 0)
|
||||
rendered = rendered.replace(
|
||||
/\(gt\s+(\w+(?:\.\w+)*)\s+(\d+(?:\.\d+)?)\)/g,
|
||||
(match, path, compareValue) => {
|
||||
const value = this.getNestedValue(variables, path);
|
||||
const numValue = parseFloat(compareValue);
|
||||
return typeof value === 'number' && value > numValue ? 'true' : 'false';
|
||||
}
|
||||
);
|
||||
|
||||
// Handle gte (greater than or equal) helper function like (gte variable 0)
|
||||
rendered = rendered.replace(
|
||||
/\(gte\s+(\w+(?:\.\w+)*)\s+(\d+(?:\.\d+)?)\)/g,
|
||||
(match, path, compareValue) => {
|
||||
const value = this.getNestedValue(variables, path);
|
||||
const numValue = parseFloat(compareValue);
|
||||
return typeof value === 'number' && value >= numValue
|
||||
? 'true'
|
||||
: 'false';
|
||||
}
|
||||
);
|
||||
|
||||
// Handle conditionals with else {{#if variable}}...{{else}}...{{/if}}
|
||||
rendered = rendered.replace(
|
||||
/\{\{#if\s+([^}]+)\}\}([\s\S]*?)(?:\{\{else\}\}([\s\S]*?))?\{\{\/if\}\}/g,
|
||||
(match, condition, trueContent, falseContent = '') => {
|
||||
// Handle boolean values and helper function results
|
||||
let value;
|
||||
if (condition === 'true') {
|
||||
value = true;
|
||||
} else if (condition === 'false') {
|
||||
value = false;
|
||||
} else {
|
||||
value = this.getNestedValue(variables, condition);
|
||||
}
|
||||
return value ? trueContent : falseContent;
|
||||
}
|
||||
);
|
||||
|
||||
// Handle each loops {{#each array}}...{{/each}}
|
||||
rendered = rendered.replace(
|
||||
/\{\{#each\s+(\w+(?:\.\w+)*)\}\}([\s\S]*?)\{\{\/each\}\}/g,
|
||||
(match, path, content) => {
|
||||
const array = this.getNestedValue(variables, path);
|
||||
if (!Array.isArray(array)) return '';
|
||||
|
||||
return array
|
||||
.map((item, index) => {
|
||||
// Create a context with item properties and special variables
|
||||
const itemContext = {
|
||||
...variables,
|
||||
...item,
|
||||
'@index': index,
|
||||
'@first': index === 0,
|
||||
'@last': index === array.length - 1
|
||||
};
|
||||
|
||||
// Recursively render the content with item context
|
||||
return this.renderTemplate(content, itemContext);
|
||||
})
|
||||
.join('');
|
||||
}
|
||||
);
|
||||
|
||||
// Handle json helper {{{json variable}}} (triple braces for raw output)
|
||||
rendered = rendered.replace(
|
||||
/\{\{\{json\s+(\w+(?:\.\w+)*)\}\}\}/g,
|
||||
(match, path) => {
|
||||
const value = this.getNestedValue(variables, path);
|
||||
return value !== undefined ? JSON.stringify(value, null, 2) : '';
|
||||
}
|
||||
);
|
||||
|
||||
// Handle variable substitution {{variable}}
|
||||
rendered = rendered.replace(/\{\{(\w+(?:\.\w+)*)\}\}/g, (match, path) => {
|
||||
const value = this.getNestedValue(variables, path);
|
||||
return value !== undefined ? value : '';
|
||||
});
|
||||
|
||||
return rendered;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get nested value from object using dot notation
|
||||
* @private
|
||||
*/
|
||||
getNestedValue(obj, path) {
|
||||
return path
|
||||
.split('.')
|
||||
.reduce(
|
||||
(current, key) =>
|
||||
current && current[key] !== undefined ? current[key] : undefined,
|
||||
obj
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate all prompt templates
|
||||
*/
|
||||
validateAllPrompts() {
|
||||
const results = { total: 0, errors: [], valid: [] };
|
||||
|
||||
try {
|
||||
const files = fs.readdirSync(this.promptsDir);
|
||||
const promptFiles = files.filter((file) => file.endsWith('.json'));
|
||||
|
||||
for (const file of promptFiles) {
|
||||
const promptId = file.replace('.json', '');
|
||||
results.total++;
|
||||
|
||||
try {
|
||||
this.loadTemplate(promptId);
|
||||
results.valid.push(promptId);
|
||||
} catch (error) {
|
||||
results.errors.push(`${promptId}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
results.errors.push(
|
||||
`Failed to read templates directory: ${error.message}`
|
||||
);
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* List all available prompt templates
|
||||
*/
|
||||
listPrompts() {
|
||||
try {
|
||||
const files = fs.readdirSync(this.promptsDir);
|
||||
const prompts = [];
|
||||
|
||||
for (const file of files) {
|
||||
if (!file.endsWith('.json')) continue;
|
||||
|
||||
const promptId = file.replace('.json', '');
|
||||
try {
|
||||
const template = this.loadTemplate(promptId);
|
||||
prompts.push({
|
||||
id: template.id,
|
||||
description: template.description,
|
||||
version: template.version,
|
||||
parameters: template.parameters,
|
||||
tags: template.metadata?.tags || []
|
||||
});
|
||||
} catch (error) {
|
||||
log('warn', `Failed to load template ${promptId}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
return prompts;
|
||||
} catch (error) {
|
||||
if (error.code === 'ENOENT') {
|
||||
// Templates directory doesn't exist yet
|
||||
return [];
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate template structure
|
||||
*/
|
||||
validateTemplate(templatePath) {
|
||||
try {
|
||||
const content = fs.readFileSync(templatePath, 'utf-8');
|
||||
const template = JSON.parse(content);
|
||||
|
||||
// Check required fields
|
||||
const required = ['id', 'version', 'description', 'prompts'];
|
||||
for (const field of required) {
|
||||
if (!template[field]) {
|
||||
return { valid: false, error: `Missing required field: ${field}` };
|
||||
}
|
||||
}
|
||||
|
||||
// Check default prompt exists
|
||||
if (!template.prompts.default) {
|
||||
return { valid: false, error: 'Missing default prompt variant' };
|
||||
}
|
||||
|
||||
// Check each variant has required fields
|
||||
for (const [name, variant] of Object.entries(template.prompts)) {
|
||||
if (!variant.system || !variant.user) {
|
||||
return {
|
||||
valid: false,
|
||||
error: `Variant '${name}' missing system or user prompt`
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Schema validation if available
|
||||
if (this.validatePrompt && this.validatePrompt !== true) {
|
||||
const valid = this.validatePrompt(template);
|
||||
if (!valid) {
|
||||
const errors = this.validatePrompt.errors
|
||||
.map((err) => `${err.instancePath || 'root'}: ${err.message}`)
|
||||
.join(', ');
|
||||
return { valid: false, error: `Schema validation failed: ${errors}` };
|
||||
}
|
||||
}
|
||||
|
||||
return { valid: true };
|
||||
} catch (error) {
|
||||
return { valid: false, error: error.message };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Singleton instance
|
||||
let promptManager = null;
|
||||
|
||||
/**
|
||||
* Get or create the prompt manager instance
|
||||
* @returns {PromptManager}
|
||||
*/
|
||||
export function getPromptManager() {
|
||||
if (!promptManager) {
|
||||
promptManager = new PromptManager();
|
||||
}
|
||||
return promptManager;
|
||||
}
|
||||
@@ -1,89 +1,4 @@
|
||||
{
|
||||
"bedrock": [
|
||||
{
|
||||
"id": "us.anthropic.claude-3-haiku-20240307-v1:0",
|
||||
"swe_score": 0.4,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0.25,
|
||||
"output": 1.25
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "us.anthropic.claude-3-opus-20240229-v1:0",
|
||||
"swe_score": 0.725,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 15,
|
||||
"output": 75
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"]
|
||||
},
|
||||
{
|
||||
"id": "us.anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
"swe_score": 0.49,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 3,
|
||||
"output": 15
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"]
|
||||
},
|
||||
{
|
||||
"id": "us.anthropic.claude-3-5-sonnet-20241022-v2:0",
|
||||
"swe_score": 0.49,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 3,
|
||||
"output": 15
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"]
|
||||
},
|
||||
{
|
||||
"id": "us.anthropic.claude-3-7-sonnet-20250219-v1:0",
|
||||
"swe_score": 0.623,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 3,
|
||||
"output": 15
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 65536
|
||||
},
|
||||
{
|
||||
"id": "us.anthropic.claude-3-5-haiku-20241022-v1:0",
|
||||
"swe_score": 0.4,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0.8,
|
||||
"output": 4
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "us.anthropic.claude-opus-4-20250514-v1:0",
|
||||
"swe_score": 0.725,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 15,
|
||||
"output": 75
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"]
|
||||
},
|
||||
{
|
||||
"id": "us.anthropic.claude-sonnet-4-20250514-v1:0",
|
||||
"swe_score": 0.727,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 3,
|
||||
"output": 15
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"]
|
||||
},
|
||||
{
|
||||
"id": "us.deepseek.r1-v1:0",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 1.35,
|
||||
"output": 5.4
|
||||
},
|
||||
"allowed_roles": ["research"],
|
||||
"max_tokens": 65536
|
||||
}
|
||||
],
|
||||
"anthropic": [
|
||||
{
|
||||
"id": "claude-sonnet-4-20250514",
|
||||
@@ -126,36 +41,60 @@
|
||||
"max_tokens": 8192
|
||||
}
|
||||
],
|
||||
"azure": [
|
||||
"claude-code": [
|
||||
{
|
||||
"id": "gpt-4o",
|
||||
"swe_score": 0.332,
|
||||
"id": "opus",
|
||||
"swe_score": 0.725,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 2.5,
|
||||
"output": 10.0
|
||||
"input": 0,
|
||||
"output": 0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 16384
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 32000
|
||||
},
|
||||
{
|
||||
"id": "gpt-4o-mini",
|
||||
"swe_score": 0.3,
|
||||
"id": "sonnet",
|
||||
"swe_score": 0.727,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0.15,
|
||||
"output": 0.6
|
||||
"input": 0,
|
||||
"output": 0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 16384
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 64000
|
||||
}
|
||||
],
|
||||
"mcp": [
|
||||
{
|
||||
"id": "mcp-sampling",
|
||||
"swe_score": null,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0,
|
||||
"output": 0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 100000
|
||||
}
|
||||
],
|
||||
"gemini-cli": [
|
||||
{
|
||||
"id": "gemini-2.5-pro",
|
||||
"swe_score": 0.72,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0,
|
||||
"output": 0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 65536
|
||||
},
|
||||
{
|
||||
"id": "gpt-4-1",
|
||||
"swe_score": 0,
|
||||
"id": "gemini-2.5-flash",
|
||||
"swe_score": 0.71,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 2.0,
|
||||
"output": 10.0
|
||||
"input": 0,
|
||||
"output": 0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 16384
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 65536
|
||||
}
|
||||
],
|
||||
"openai": [
|
||||
@@ -320,6 +259,133 @@
|
||||
"max_tokens": 1048000
|
||||
}
|
||||
],
|
||||
"xai": [
|
||||
{
|
||||
"id": "grok-3",
|
||||
"name": "Grok 3",
|
||||
"swe_score": null,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 3,
|
||||
"output": 15
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "grok-3-fast",
|
||||
"name": "Grok 3 Fast",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 5,
|
||||
"output": 25
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "grok-4",
|
||||
"name": "Grok 4",
|
||||
"swe_score": null,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 3,
|
||||
"output": 15
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 131072
|
||||
}
|
||||
],
|
||||
"groq": [
|
||||
{
|
||||
"id": "llama-3.3-70b-versatile",
|
||||
"swe_score": 0.55,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0.59,
|
||||
"output": 0.79
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 32768
|
||||
},
|
||||
{
|
||||
"id": "llama-3.1-8b-instant",
|
||||
"swe_score": 0.32,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0.05,
|
||||
"output": 0.08
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "llama-4-scout",
|
||||
"swe_score": 0.45,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0.11,
|
||||
"output": 0.34
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 32768
|
||||
},
|
||||
{
|
||||
"id": "llama-4-maverick",
|
||||
"swe_score": 0.52,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0.5,
|
||||
"output": 0.77
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 32768
|
||||
},
|
||||
{
|
||||
"id": "mixtral-8x7b-32768",
|
||||
"swe_score": 0.35,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0.24,
|
||||
"output": 0.24
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 32768
|
||||
},
|
||||
{
|
||||
"id": "qwen-qwq-32b-preview",
|
||||
"swe_score": 0.4,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0.18,
|
||||
"output": 0.18
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 32768
|
||||
},
|
||||
{
|
||||
"id": "deepseek-r1-distill-llama-70b",
|
||||
"swe_score": 0.52,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0.75,
|
||||
"output": 0.99
|
||||
},
|
||||
"allowed_roles": ["main", "research"],
|
||||
"max_tokens": 8192
|
||||
},
|
||||
{
|
||||
"id": "gemma2-9b-it",
|
||||
"swe_score": 0.3,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0.2,
|
||||
"output": 0.2
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 8192
|
||||
},
|
||||
{
|
||||
"id": "whisper-large-v3",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0.11,
|
||||
"output": 0
|
||||
},
|
||||
"allowed_roles": ["main"],
|
||||
"max_tokens": 0
|
||||
}
|
||||
],
|
||||
"perplexity": [
|
||||
{
|
||||
"id": "sonar-pro",
|
||||
@@ -372,106 +438,6 @@
|
||||
"max_tokens": 8700
|
||||
}
|
||||
],
|
||||
"xai": [
|
||||
{
|
||||
"id": "grok-3",
|
||||
"name": "Grok 3",
|
||||
"swe_score": null,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 3,
|
||||
"output": 15
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "grok-3-fast",
|
||||
"name": "Grok 3 Fast",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 5,
|
||||
"output": 25
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "grok-4",
|
||||
"name": "Grok 4",
|
||||
"swe_score": null,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 3,
|
||||
"output": 15
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 131072
|
||||
}
|
||||
],
|
||||
"ollama": [
|
||||
{
|
||||
"id": "devstral:latest",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0,
|
||||
"output": 0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "qwen3:latest",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0,
|
||||
"output": 0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "qwen3:14b",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0,
|
||||
"output": 0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "qwen3:32b",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0,
|
||||
"output": 0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "mistral-small3.1:latest",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0,
|
||||
"output": 0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "llama3.3:latest",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0,
|
||||
"output": 0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "phi4:latest",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0,
|
||||
"output": 0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
}
|
||||
],
|
||||
"openrouter": [
|
||||
{
|
||||
"id": "google/gemini-2.5-flash-preview-05-20",
|
||||
@@ -724,151 +690,185 @@
|
||||
"max_tokens": 32768
|
||||
}
|
||||
],
|
||||
"groq": [
|
||||
"ollama": [
|
||||
{
|
||||
"id": "llama-3.3-70b-versatile",
|
||||
"swe_score": 0.55,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0.59,
|
||||
"output": 0.79
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 32768
|
||||
},
|
||||
{
|
||||
"id": "llama-3.1-8b-instant",
|
||||
"swe_score": 0.32,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0.05,
|
||||
"output": 0.08
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "llama-4-scout",
|
||||
"swe_score": 0.45,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0.11,
|
||||
"output": 0.34
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 32768
|
||||
},
|
||||
{
|
||||
"id": "llama-4-maverick",
|
||||
"swe_score": 0.52,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0.5,
|
||||
"output": 0.77
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 32768
|
||||
},
|
||||
{
|
||||
"id": "mixtral-8x7b-32768",
|
||||
"swe_score": 0.35,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0.24,
|
||||
"output": 0.24
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 32768
|
||||
},
|
||||
{
|
||||
"id": "qwen-qwq-32b-preview",
|
||||
"swe_score": 0.4,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0.18,
|
||||
"output": 0.18
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 32768
|
||||
},
|
||||
{
|
||||
"id": "deepseek-r1-distill-llama-70b",
|
||||
"swe_score": 0.52,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0.75,
|
||||
"output": 0.99
|
||||
},
|
||||
"allowed_roles": ["main", "research"],
|
||||
"max_tokens": 8192
|
||||
},
|
||||
{
|
||||
"id": "gemma2-9b-it",
|
||||
"swe_score": 0.3,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0.2,
|
||||
"output": 0.2
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 8192
|
||||
},
|
||||
{
|
||||
"id": "whisper-large-v3",
|
||||
"id": "devstral:latest",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0.11,
|
||||
"output": 0
|
||||
},
|
||||
"allowed_roles": ["main"],
|
||||
"max_tokens": 0
|
||||
}
|
||||
],
|
||||
"claude-code": [
|
||||
{
|
||||
"id": "opus",
|
||||
"swe_score": 0.725,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0,
|
||||
"output": 0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 32000
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "sonnet",
|
||||
"swe_score": 0.727,
|
||||
"id": "qwen3:latest",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0,
|
||||
"output": 0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 64000
|
||||
}
|
||||
],
|
||||
"mcp": [
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "mcp-sampling",
|
||||
"swe_score": null,
|
||||
"id": "qwen3:14b",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0,
|
||||
"output": 0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 100000
|
||||
}
|
||||
],
|
||||
"gemini-cli": [
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "gemini-2.5-pro",
|
||||
"swe_score": 0.72,
|
||||
"id": "qwen3:32b",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0,
|
||||
"output": 0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "mistral-small3.1:latest",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0,
|
||||
"output": 0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "llama3.3:latest",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0,
|
||||
"output": 0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "phi4:latest",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0,
|
||||
"output": 0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
}
|
||||
],
|
||||
"azure": [
|
||||
{
|
||||
"id": "gpt-4o",
|
||||
"swe_score": 0.332,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 2.5,
|
||||
"output": 10.0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 16384
|
||||
},
|
||||
{
|
||||
"id": "gpt-4o-mini",
|
||||
"swe_score": 0.3,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0.15,
|
||||
"output": 0.6
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 16384
|
||||
},
|
||||
{
|
||||
"id": "gpt-4-1",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 2.0,
|
||||
"output": 10.0
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 16384
|
||||
}
|
||||
],
|
||||
"bedrock": [
|
||||
{
|
||||
"id": "us.anthropic.claude-3-haiku-20240307-v1:0",
|
||||
"swe_score": 0.4,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0.25,
|
||||
"output": 1.25
|
||||
},
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "us.anthropic.claude-3-opus-20240229-v1:0",
|
||||
"swe_score": 0.725,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 15,
|
||||
"output": 75
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"]
|
||||
},
|
||||
{
|
||||
"id": "us.anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
"swe_score": 0.49,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 3,
|
||||
"output": 15
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"]
|
||||
},
|
||||
{
|
||||
"id": "us.anthropic.claude-3-5-sonnet-20241022-v2:0",
|
||||
"swe_score": 0.49,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 3,
|
||||
"output": 15
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"]
|
||||
},
|
||||
{
|
||||
"id": "us.anthropic.claude-3-7-sonnet-20250219-v1:0",
|
||||
"swe_score": 0.623,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 3,
|
||||
"output": 15
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 65536
|
||||
},
|
||||
{
|
||||
"id": "gemini-2.5-flash",
|
||||
"swe_score": 0.71,
|
||||
"id": "us.anthropic.claude-3-5-haiku-20241022-v1:0",
|
||||
"swe_score": 0.4,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 0,
|
||||
"output": 0
|
||||
"input": 0.8,
|
||||
"output": 4
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "us.anthropic.claude-opus-4-20250514-v1:0",
|
||||
"swe_score": 0.725,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 15,
|
||||
"output": 75
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"]
|
||||
},
|
||||
{
|
||||
"id": "us.anthropic.claude-sonnet-4-20250514-v1:0",
|
||||
"swe_score": 0.727,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 3,
|
||||
"output": 15
|
||||
},
|
||||
"allowed_roles": ["main", "fallback", "research"]
|
||||
},
|
||||
{
|
||||
"id": "us.deepseek.r1-v1:0",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": {
|
||||
"input": 1.35,
|
||||
"output": 5.4
|
||||
},
|
||||
"allowed_roles": ["research"],
|
||||
"max_tokens": 65536
|
||||
}
|
||||
]
|
||||
|
||||
@@ -27,6 +27,7 @@ import {
|
||||
} from '../utils.js';
|
||||
import { generateObjectService } from '../ai-services-unified.js';
|
||||
import { getDefaultPriority } from '../config-manager.js';
|
||||
import { getPromptManager } from '../prompt-manager.js';
|
||||
import ContextGatherer from '../utils/contextGatherer.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
import {
|
||||
@@ -403,30 +404,6 @@ async function addTask(
|
||||
displayContextAnalysis(analysisData, prompt, gatheredContext.length);
|
||||
}
|
||||
|
||||
// System Prompt - Enhanced for dependency awareness
|
||||
const systemPrompt =
|
||||
"You are a helpful assistant that creates well-structured tasks for a software development project. Generate a single new task based on the user's description, adhering strictly to the provided JSON schema. Pay special attention to dependencies between tasks, ensuring the new task correctly references any tasks it depends on.\n\n" +
|
||||
'When determining dependencies for a new task, follow these principles:\n' +
|
||||
'1. Select dependencies based on logical requirements - what must be completed before this task can begin.\n' +
|
||||
'2. Prioritize task dependencies that are semantically related to the functionality being built.\n' +
|
||||
'3. Consider both direct dependencies (immediately prerequisite) and indirect dependencies.\n' +
|
||||
'4. Avoid adding unnecessary dependencies - only include tasks that are genuinely prerequisite.\n' +
|
||||
'5. Consider the current status of tasks - prefer completed tasks as dependencies when possible.\n' +
|
||||
"6. Pay special attention to foundation tasks (1-5) but don't automatically include them without reason.\n" +
|
||||
'7. Recent tasks (higher ID numbers) may be more relevant for newer functionality.\n\n' +
|
||||
'The dependencies array should contain task IDs (numbers) of prerequisite tasks.\n';
|
||||
|
||||
// Task Structure Description (for user prompt)
|
||||
const taskStructureDesc = `
|
||||
{
|
||||
"title": "Task title goes here",
|
||||
"description": "A concise one or two sentence description of what the task involves",
|
||||
"details": "Detailed implementation steps, considerations, code examples, or technical approach",
|
||||
"testStrategy": "Specific steps to verify correct implementation and functionality",
|
||||
"dependencies": [1, 3] // Example: IDs of tasks that must be completed before this task
|
||||
}
|
||||
`;
|
||||
|
||||
// Add any manually provided details to the prompt for context
|
||||
let contextFromArgs = '';
|
||||
if (manualTaskData?.title)
|
||||
@@ -438,18 +415,21 @@ async function addTask(
|
||||
if (manualTaskData?.testStrategy)
|
||||
contextFromArgs += `\n- Additional Test Strategy Context: "${manualTaskData.testStrategy}"`;
|
||||
|
||||
// User Prompt
|
||||
const userPrompt = `You are generating the details for Task #${newTaskId}. Based on the user's request: "${prompt}", create a comprehensive new task for a software development project.
|
||||
|
||||
${gatheredContext}
|
||||
|
||||
Based on the information about existing tasks provided above, include appropriate dependencies in the "dependencies" array. Only include task IDs that this new task directly depends on.
|
||||
|
||||
Return your answer as a single JSON object matching the schema precisely:
|
||||
${taskStructureDesc}
|
||||
|
||||
Make sure the details and test strategy are comprehensive and specific. DO NOT include the task ID in the title.
|
||||
`;
|
||||
// Load prompts using PromptManager
|
||||
const promptManager = getPromptManager();
|
||||
const { systemPrompt, userPrompt } = await promptManager.loadPrompt(
|
||||
'add-task',
|
||||
{
|
||||
prompt,
|
||||
newTaskId,
|
||||
existingTasks: allTasks,
|
||||
gatheredContext,
|
||||
contextFromArgs,
|
||||
useResearch,
|
||||
priority: effectivePriority,
|
||||
dependencies: numericDependencies
|
||||
}
|
||||
);
|
||||
|
||||
// Start the loading indicator - only for text mode
|
||||
if (outputFormat === 'text') {
|
||||
@@ -581,16 +561,6 @@ async function addTask(
|
||||
writeJSON(tasksPath, rawData, projectRoot, targetTag);
|
||||
report('DEBUG: tasks.json written.', 'debug');
|
||||
|
||||
// Generate markdown task files
|
||||
report('Generating task files...', 'info');
|
||||
report('DEBUG: Calling generateTaskFiles...', 'debug');
|
||||
// Pass mcpLog if available to generateTaskFiles
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath), {
|
||||
projectRoot,
|
||||
tag: targetTag
|
||||
});
|
||||
report('DEBUG: generateTaskFiles finished.', 'debug');
|
||||
|
||||
// Show success message - only for text output (CLI)
|
||||
if (outputFormat === 'text') {
|
||||
const table = new Table({
|
||||
|
||||
@@ -14,6 +14,7 @@ import {
|
||||
import { generateTextService } from '../ai-services-unified.js';
|
||||
|
||||
import { getDebugFlag, getProjectName } from '../config-manager.js';
|
||||
import { getPromptManager } from '../prompt-manager.js';
|
||||
import {
|
||||
COMPLEXITY_REPORT_FILE,
|
||||
LEGACY_TASKS_FILE
|
||||
@@ -239,7 +240,7 @@ async function analyzeTaskComplexity(options, context = {}) {
|
||||
tasks: relevantTaskIds,
|
||||
format: 'research'
|
||||
});
|
||||
gatheredContext = contextResult;
|
||||
gatheredContext = contextResult.context || '';
|
||||
}
|
||||
} catch (contextError) {
|
||||
reportLog(
|
||||
@@ -396,12 +397,20 @@ async function analyzeTaskComplexity(options, context = {}) {
|
||||
}
|
||||
|
||||
// Continue with regular analysis path
|
||||
const prompt = generateInternalComplexityAnalysisPrompt(
|
||||
tasksData,
|
||||
gatheredContext
|
||||
// Load prompts using PromptManager
|
||||
const promptManager = getPromptManager();
|
||||
|
||||
const promptParams = {
|
||||
tasks: tasksData.tasks,
|
||||
gatheredContext: gatheredContext || '',
|
||||
useResearch: useResearch
|
||||
};
|
||||
|
||||
const { systemPrompt, userPrompt: prompt } = await promptManager.loadPrompt(
|
||||
'analyze-complexity',
|
||||
promptParams,
|
||||
'default'
|
||||
);
|
||||
const systemPrompt =
|
||||
'You are an expert software architect and project manager analyzing task complexity. Respond only with the requested valid JSON array.';
|
||||
|
||||
let loadingIndicator = null;
|
||||
if (outputFormat === 'text') {
|
||||
|
||||
@@ -19,6 +19,7 @@ import {
|
||||
import { generateTextService } from '../ai-services-unified.js';
|
||||
|
||||
import { getDefaultSubtasks, getDebugFlag } from '../config-manager.js';
|
||||
import { getPromptManager } from '../prompt-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
import { COMPLEXITY_REPORT_FILE } from '../../../src/constants/paths.js';
|
||||
import { ContextGatherer } from '../utils/contextGatherer.js';
|
||||
@@ -60,128 +61,6 @@ const subtaskWrapperSchema = z.object({
|
||||
});
|
||||
// --- End Zod Schemas ---
|
||||
|
||||
/**
|
||||
* Generates the system prompt for the main AI role (e.g., Claude).
|
||||
* @param {number} subtaskCount - The target number of subtasks.
|
||||
* @returns {string} The system prompt.
|
||||
*/
|
||||
function generateMainSystemPrompt(subtaskCount) {
|
||||
return `You are an AI assistant helping with task breakdown for software development.
|
||||
You need to break down a high-level task into ${subtaskCount > 0 ? subtaskCount : 'an appropriate number of'} specific subtasks that can be implemented one by one.
|
||||
|
||||
Subtasks should:
|
||||
1. Be specific and actionable implementation steps
|
||||
2. Follow a logical sequence
|
||||
3. Each handle a distinct part of the parent task
|
||||
4. Include clear guidance on implementation approach
|
||||
5. Have appropriate dependency chains between subtasks (using the new sequential IDs)
|
||||
6. Collectively cover all aspects of the parent task
|
||||
|
||||
For each subtask, provide:
|
||||
- id: Sequential integer starting from the provided nextSubtaskId
|
||||
- title: Clear, specific title
|
||||
- description: Detailed description
|
||||
- dependencies: Array of prerequisite subtask IDs (use the new sequential IDs)
|
||||
- details: Implementation details, the output should be in string
|
||||
- testStrategy: Optional testing approach
|
||||
|
||||
|
||||
Respond ONLY with a valid JSON object containing a single key "subtasks" whose value is an array matching the structure described. Do not include any explanatory text, markdown formatting, or code block markers.`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates the user prompt for the main AI role (e.g., Claude).
|
||||
* @param {Object} task - The parent task object.
|
||||
* @param {number} subtaskCount - The target number of subtasks.
|
||||
* @param {string} additionalContext - Optional additional context.
|
||||
* @param {number} nextSubtaskId - The starting ID for the new subtasks.
|
||||
* @returns {string} The user prompt.
|
||||
*/
|
||||
function generateMainUserPrompt(
|
||||
task,
|
||||
subtaskCount,
|
||||
additionalContext,
|
||||
nextSubtaskId
|
||||
) {
|
||||
const contextPrompt = additionalContext
|
||||
? `\n\nAdditional context: ${additionalContext}`
|
||||
: '';
|
||||
const schemaDescription = `
|
||||
{
|
||||
"subtasks": [
|
||||
{
|
||||
"id": ${nextSubtaskId}, // First subtask ID
|
||||
"title": "Specific subtask title",
|
||||
"description": "Detailed description",
|
||||
"dependencies": [], // e.g., [${nextSubtaskId + 1}] if it depends on the next
|
||||
"details": "Implementation guidance",
|
||||
"testStrategy": "Optional testing approach"
|
||||
},
|
||||
// ... (repeat for ${subtaskCount ? 'a total of ' + subtaskCount : 'each of the'} subtasks with sequential IDs)
|
||||
]
|
||||
}`;
|
||||
|
||||
return `Break down this task into ${subtaskCount > 0 ? 'exactly ' + subtaskCount : 'an appropriate number of'} specific subtasks:
|
||||
|
||||
Task ID: ${task.id}
|
||||
Title: ${task.title}
|
||||
Description: ${task.description}
|
||||
Current details: ${task.details || 'None'}
|
||||
${contextPrompt}
|
||||
|
||||
Return ONLY the JSON object containing the "subtasks" array, matching this structure:
|
||||
${schemaDescription}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates the user prompt for the research AI role (e.g., Perplexity).
|
||||
* @param {Object} task - The parent task object.
|
||||
* @param {number} subtaskCount - The target number of subtasks.
|
||||
* @param {string} additionalContext - Optional additional context.
|
||||
* @param {number} nextSubtaskId - The starting ID for the new subtasks.
|
||||
* @returns {string} The user prompt.
|
||||
*/
|
||||
function generateResearchUserPrompt(
|
||||
task,
|
||||
subtaskCount,
|
||||
additionalContext,
|
||||
nextSubtaskId
|
||||
) {
|
||||
const contextPrompt = additionalContext
|
||||
? `\n\nConsider this context: ${additionalContext}`
|
||||
: '';
|
||||
const schemaDescription = `
|
||||
{
|
||||
"subtasks": [
|
||||
{
|
||||
"id": <number>, // Sequential ID starting from ${nextSubtaskId}
|
||||
"title": "<string>",
|
||||
"description": "<string>",
|
||||
"dependencies": [<number>], // e.g., [${nextSubtaskId + 1}]. If no dependencies, use an empty array [].
|
||||
"details": "<string>",
|
||||
"testStrategy": "<string>" // Optional
|
||||
},
|
||||
// ... (repeat for ${subtaskCount} subtasks)
|
||||
]
|
||||
}`;
|
||||
|
||||
return `Analyze the following task and break it down into ${subtaskCount > 0 ? 'exactly ' + subtaskCount : 'an appropriate number of'} specific subtasks using your research capabilities. Assign sequential IDs starting from ${nextSubtaskId}.
|
||||
|
||||
Parent Task:
|
||||
ID: ${task.id}
|
||||
Title: ${task.title}
|
||||
Description: ${task.description}
|
||||
Current details: ${task.details || 'None'}
|
||||
${contextPrompt}
|
||||
|
||||
CRITICAL: Respond ONLY with a valid JSON object containing a single key "subtasks". The value must be an array of the generated subtasks, strictly matching this structure:
|
||||
${schemaDescription}
|
||||
|
||||
Important: For the 'dependencies' field, if a subtask has no dependencies, you MUST use an empty array, for example: "dependencies": []. Do not use null or omit the field.
|
||||
|
||||
Do not include ANY explanatory text, markdown, or code block markers. Just the JSON object.`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse subtasks from AI's text response. Includes basic cleanup.
|
||||
* @param {string} text - Response text from AI.
|
||||
@@ -490,7 +369,7 @@ async function expandTask(
|
||||
tasks: finalTaskIds,
|
||||
format: 'research'
|
||||
});
|
||||
gatheredContext = contextResult;
|
||||
gatheredContext = contextResult.context || '';
|
||||
}
|
||||
} catch (contextError) {
|
||||
logger.warn(`Could not gather context: ${contextError.message}`);
|
||||
@@ -499,9 +378,7 @@ async function expandTask(
|
||||
|
||||
// --- Complexity Report Integration ---
|
||||
let finalSubtaskCount;
|
||||
let promptContent = '';
|
||||
let complexityReasoningContext = '';
|
||||
let systemPrompt; // Declare systemPrompt here
|
||||
|
||||
// Use tag-aware complexity report path
|
||||
const complexityReportPath = getTagAwareFilePath(
|
||||
@@ -570,52 +447,71 @@ async function expandTask(
|
||||
// Determine prompt content AND system prompt
|
||||
const nextSubtaskId = (task.subtasks?.length || 0) + 1;
|
||||
|
||||
// Load prompts using PromptManager
|
||||
const promptManager = getPromptManager();
|
||||
|
||||
// Combine all context sources into a single additionalContext parameter
|
||||
let combinedAdditionalContext = '';
|
||||
if (additionalContext || complexityReasoningContext) {
|
||||
combinedAdditionalContext =
|
||||
`\n\n${additionalContext}${complexityReasoningContext}`.trim();
|
||||
}
|
||||
if (gatheredContext) {
|
||||
combinedAdditionalContext =
|
||||
`${combinedAdditionalContext}\n\n# Project Context\n\n${gatheredContext}`.trim();
|
||||
}
|
||||
|
||||
// Ensure expansionPrompt is a string (handle both string and object formats)
|
||||
let expansionPromptText = undefined;
|
||||
if (taskAnalysis?.expansionPrompt) {
|
||||
// Use prompt from complexity report
|
||||
promptContent = taskAnalysis.expansionPrompt;
|
||||
// Append additional context and reasoning
|
||||
promptContent += `\n\n${additionalContext}`.trim();
|
||||
promptContent += `${complexityReasoningContext}`.trim();
|
||||
if (gatheredContext) {
|
||||
promptContent += `\n\n# Project Context\n\n${gatheredContext}`;
|
||||
if (typeof taskAnalysis.expansionPrompt === 'string') {
|
||||
expansionPromptText = taskAnalysis.expansionPrompt;
|
||||
} else if (
|
||||
typeof taskAnalysis.expansionPrompt === 'object' &&
|
||||
taskAnalysis.expansionPrompt.text
|
||||
) {
|
||||
expansionPromptText = taskAnalysis.expansionPrompt.text;
|
||||
}
|
||||
}
|
||||
|
||||
// --- Use Simplified System Prompt for Report Prompts ---
|
||||
systemPrompt = `You are an AI assistant helping with task breakdown. Generate ${finalSubtaskCount > 0 ? 'exactly ' + finalSubtaskCount : 'an appropriate number of'} subtasks based on the provided prompt and context. Respond ONLY with a valid JSON object containing a single key "subtasks" whose value is an array of the generated subtask objects. Each subtask object in the array must have keys: "id", "title", "description", "dependencies", "details", "status". Ensure the 'id' starts from ${nextSubtaskId} and is sequential. Ensure 'dependencies' only reference valid prior subtask IDs generated in this response (starting from ${nextSubtaskId}). Ensure 'status' is 'pending'. Do not include any other text or explanation.`;
|
||||
logger.info(
|
||||
`Using expansion prompt from complexity report and simplified system prompt for task ${task.id}.`
|
||||
);
|
||||
// --- End Simplified System Prompt ---
|
||||
} else {
|
||||
// Use standard prompt generation
|
||||
let combinedAdditionalContext =
|
||||
`${additionalContext}${complexityReasoningContext}`.trim();
|
||||
if (gatheredContext) {
|
||||
combinedAdditionalContext =
|
||||
`${combinedAdditionalContext}\n\n# Project Context\n\n${gatheredContext}`.trim();
|
||||
}
|
||||
|
||||
if (useResearch) {
|
||||
promptContent = generateResearchUserPrompt(
|
||||
task,
|
||||
finalSubtaskCount,
|
||||
combinedAdditionalContext,
|
||||
nextSubtaskId
|
||||
);
|
||||
// Use the specific research system prompt if needed, or a standard one
|
||||
systemPrompt = `You are an AI assistant that responds ONLY with valid JSON objects as requested. The object should contain a 'subtasks' array.`; // Or keep generateResearchSystemPrompt if it exists
|
||||
// Ensure gatheredContext is a string (handle both string and object formats)
|
||||
let gatheredContextText = gatheredContext;
|
||||
if (typeof gatheredContext === 'object' && gatheredContext !== null) {
|
||||
if (gatheredContext.data) {
|
||||
gatheredContextText = gatheredContext.data;
|
||||
} else if (gatheredContext.text) {
|
||||
gatheredContextText = gatheredContext.text;
|
||||
} else {
|
||||
promptContent = generateMainUserPrompt(
|
||||
task,
|
||||
finalSubtaskCount,
|
||||
combinedAdditionalContext,
|
||||
nextSubtaskId
|
||||
);
|
||||
// Use the original detailed system prompt for standard generation
|
||||
systemPrompt = generateMainSystemPrompt(finalSubtaskCount);
|
||||
gatheredContextText = JSON.stringify(gatheredContext);
|
||||
}
|
||||
}
|
||||
|
||||
const promptParams = {
|
||||
task: task,
|
||||
subtaskCount: finalSubtaskCount,
|
||||
nextSubtaskId: nextSubtaskId,
|
||||
additionalContext: additionalContext,
|
||||
complexityReasoningContext: complexityReasoningContext,
|
||||
gatheredContext: gatheredContextText || '',
|
||||
useResearch: useResearch,
|
||||
expansionPrompt: expansionPromptText || undefined
|
||||
};
|
||||
|
||||
let variantKey = 'default';
|
||||
if (expansionPromptText) {
|
||||
variantKey = 'complexity-report';
|
||||
logger.info(
|
||||
`Using expansion prompt from complexity report for task ${task.id}.`
|
||||
);
|
||||
} else if (useResearch) {
|
||||
variantKey = 'research';
|
||||
logger.info(`Using research variant for task ${task.id}.`);
|
||||
} else {
|
||||
logger.info(`Using standard prompt generation for task ${task.id}.`);
|
||||
}
|
||||
|
||||
const { systemPrompt, userPrompt: promptContent } =
|
||||
await promptManager.loadPrompt('expand-task', promptParams, variantKey);
|
||||
// --- End Complexity Report / Prompt Logic ---
|
||||
|
||||
// --- AI Subtask Generation using generateTextService ---
|
||||
|
||||
@@ -864,64 +864,54 @@ function generateMarkdownOutput(data, filteredTasks, stats) {
|
||||
return '█'.repeat(filled) + '░'.repeat(empty);
|
||||
};
|
||||
|
||||
const taskProgressBar = createMarkdownProgressBar(completionPercentage, 20);
|
||||
const subtaskProgressBar = createMarkdownProgressBar(
|
||||
subtaskCompletionPercentage,
|
||||
20
|
||||
);
|
||||
|
||||
// Dashboard section
|
||||
markdown += '```\n';
|
||||
markdown +=
|
||||
'╭─────────────────────────────────────────────────────────╮╭─────────────────────────────────────────────────────────╮\n';
|
||||
markdown +=
|
||||
'│ ││ │\n';
|
||||
markdown +=
|
||||
'│ Project Dashboard ││ Dependency Status & Next Task │\n';
|
||||
markdown += `│ Tasks Progress: ${createMarkdownProgressBar(completionPercentage, 20)} ${Math.round(completionPercentage)}% ││ Dependency Metrics: │\n`;
|
||||
markdown += `│ ${Math.round(completionPercentage)}% ││ • Tasks with no dependencies: ${tasksWithNoDeps} │\n`;
|
||||
markdown += `│ Done: ${doneCount} In Progress: ${inProgressCount} Pending: ${pendingCount} Blocked: ${blockedCount} ││ • Tasks ready to work on: ${tasksReadyToWork} │\n`;
|
||||
markdown += `│ Deferred: ${deferredCount} Cancelled: ${cancelledCount} ││ • Tasks blocked by dependencies: ${tasksWithUnsatisfiedDeps} │\n`;
|
||||
markdown += `│ ││ • Most depended-on task: #${mostDependedOnTaskId} (${maxDependents} dependents) │\n`;
|
||||
markdown += `│ Subtasks Progress: ${createMarkdownProgressBar(subtaskCompletionPercentage, 20)} ││ • Avg dependencies per task: ${avgDependenciesPerTask.toFixed(1)} │\n`;
|
||||
markdown += `│ ${Math.round(subtaskCompletionPercentage)}% ${Math.round(subtaskCompletionPercentage)}% ││ │\n`;
|
||||
markdown += `│ Completed: ${completedSubtasks}/${totalSubtasks} In Progress: ${inProgressSubtasks} Pending: ${pendingSubtasks} ││ Next Task to Work On: │\n`;
|
||||
// markdown += '```\n';
|
||||
markdown += '| Project Dashboard | |\n';
|
||||
markdown += '| :- |:-|\n';
|
||||
markdown += `| Task Progress | ${taskProgressBar} ${Math.round(completionPercentage)}% |\n`;
|
||||
markdown += `| Done | ${doneCount} |\n`;
|
||||
markdown += `| In Progress | ${inProgressCount} |\n`;
|
||||
markdown += `| Pending | ${pendingCount} |\n`;
|
||||
markdown += `| Deferred | ${deferredCount} |\n`;
|
||||
markdown += `| Cancelled | ${cancelledCount} |\n`;
|
||||
markdown += `|-|-|\n`;
|
||||
markdown += `| Subtask Progress | ${subtaskProgressBar} ${Math.round(subtaskCompletionPercentage)}% |\n`;
|
||||
markdown += `| Completed | ${completedSubtasks} |\n`;
|
||||
markdown += `| In Progress | ${inProgressSubtasks} |\n`;
|
||||
markdown += `| Pending | ${pendingSubtasks} |\n`;
|
||||
|
||||
const nextTaskTitle = nextItem
|
||||
? nextItem.title.length > 40
|
||||
? nextItem.title.substring(0, 37) + '...'
|
||||
: nextItem.title
|
||||
: 'No task available';
|
||||
|
||||
markdown += `│ Blocked: ${blockedSubtasks} Deferred: ${deferredSubtasks} Cancelled: ${cancelledSubtasks} ││ ID: ${nextItem ? nextItem.id : 'N/A'} - ${nextTaskTitle} │\n`;
|
||||
markdown += `│ ││ Priority: ${nextItem ? nextItem.priority || 'medium' : ''} Dependencies: ${nextItem && nextItem.dependencies && nextItem.dependencies.length > 0 ? 'Some' : 'None'} │\n`;
|
||||
markdown += `│ Priority Breakdown: ││ Complexity: ${nextItem && nextItem.complexityScore ? '● ' + nextItem.complexityScore : 'N/A'} │\n`;
|
||||
markdown += `│ • High priority: ${data.tasks.filter((t) => t.priority === 'high').length} │╰─────────────────────────────────────────────────────────╯\n`;
|
||||
markdown += `│ • Medium priority: ${data.tasks.filter((t) => t.priority === 'medium').length} │\n`;
|
||||
markdown += `│ • Low priority: ${data.tasks.filter((t) => t.priority === 'low').length} │\n`;
|
||||
markdown += '│ │\n';
|
||||
markdown += '╰─────────────────────────────────────────────────────────╯\n';
|
||||
markdown += '\n\n';
|
||||
|
||||
// Tasks table
|
||||
markdown +=
|
||||
'┌───────────┬──────────────────────────────────────┬─────────────────┬──────────────┬───────────────────────┬───────────┐\n';
|
||||
'| ID | Title | Status | Priority | Dependencies | Complexity |\n';
|
||||
markdown +=
|
||||
'│ ID │ Title │ Status │ Priority │ Dependencies │ Complexi… │\n';
|
||||
markdown +=
|
||||
'├───────────┼──────────────────────────────────────┼─────────────────┼──────────────┼───────────────────────┼───────────┤\n';
|
||||
'| :- | :- | :- | :- | :- | :- |\n';
|
||||
|
||||
// Helper function to format status with symbols
|
||||
const getStatusSymbol = (status) => {
|
||||
switch (status) {
|
||||
case 'done':
|
||||
case 'completed':
|
||||
return '✓ done';
|
||||
return '✓ done';
|
||||
case 'in-progress':
|
||||
return '► in-progress';
|
||||
return '► in-progress';
|
||||
case 'pending':
|
||||
return '○ pending';
|
||||
return '○ pending';
|
||||
case 'blocked':
|
||||
return '⭕ blocked';
|
||||
return '⭕ blocked';
|
||||
case 'deferred':
|
||||
return 'x deferred';
|
||||
return 'x deferred';
|
||||
case 'cancelled':
|
||||
return 'x cancelled';
|
||||
return 'x cancelled';
|
||||
case 'review':
|
||||
return '? review';
|
||||
return '? review';
|
||||
default:
|
||||
return status || 'pending';
|
||||
}
|
||||
@@ -948,12 +938,12 @@ function generateMarkdownOutput(data, filteredTasks, stats) {
|
||||
? `● ${task.complexityScore}`
|
||||
: 'N/A';
|
||||
|
||||
markdown += `│ ${task.id.toString().padEnd(9)} │ ${taskTitle.substring(0, 36).padEnd(36)} │ ${statusSymbol.padEnd(15)} │ ${priority.padEnd(12)} │ ${deps.substring(0, 21).padEnd(21)} │ ${complexity.padEnd(9)} │\n`;
|
||||
markdown += `| ${task.id} | ${taskTitle} | ${statusSymbol} | ${priority} | ${deps} | ${complexity} |\n`;
|
||||
|
||||
// Add subtasks if requested
|
||||
if (withSubtasks && task.subtasks && task.subtasks.length > 0) {
|
||||
task.subtasks.forEach((subtask) => {
|
||||
const subtaskTitle = `└─ ${subtask.title}`; // No truncation
|
||||
const subtaskTitle = `${subtask.title}`; // No truncation
|
||||
const subtaskStatus = getStatusSymbol(subtask.status);
|
||||
const subtaskDeps = formatDependenciesForMarkdown(
|
||||
subtask.dependencies,
|
||||
@@ -963,85 +953,11 @@ function generateMarkdownOutput(data, filteredTasks, stats) {
|
||||
? subtask.complexityScore.toString()
|
||||
: 'N/A';
|
||||
|
||||
markdown +=
|
||||
'├───────────┼──────────────────────────────────────┼─────────────────┼──────────────┼───────────────────────┼───────────┤\n';
|
||||
markdown += `│ ${task.id}.${subtask.id}${' '.padEnd(6)} │ ${subtaskTitle.substring(0, 36).padEnd(36)} │ ${subtaskStatus.padEnd(15)} │ - │ ${subtaskDeps.substring(0, 21).padEnd(21)} │ ${subtaskComplexity.padEnd(9)} │\n`;
|
||||
markdown += `| ${task.id}.${subtask.id} | ${subtaskTitle} | ${subtaskStatus} | - | ${subtaskDeps} | ${subtaskComplexity} |\n`;
|
||||
});
|
||||
}
|
||||
|
||||
markdown +=
|
||||
'├───────────┼──────────────────────────────────────┼─────────────────┼──────────────┼───────────────────────┼───────────┤\n';
|
||||
});
|
||||
|
||||
// Close the table
|
||||
markdown = markdown.slice(
|
||||
0,
|
||||
-1 *
|
||||
'├───────────┼──────────────────────────────────────┼─────────────────┼──────────────┼───────────────────────┼───────────┤\n'
|
||||
.length
|
||||
);
|
||||
markdown +=
|
||||
'└───────────┴──────────────────────────────────────┴─────────────────┴──────────────┴───────────────────────┴───────────┘\n';
|
||||
markdown += '```\n\n';
|
||||
|
||||
// Next task recommendation
|
||||
if (nextItem) {
|
||||
markdown +=
|
||||
'╭────────────────────────────────────────────── ⚡ RECOMMENDED NEXT TASK ⚡ ──────────────────────────────────────────────╮\n';
|
||||
markdown +=
|
||||
'│ │\n';
|
||||
markdown += `│ 🔥 Next Task to Work On: #${nextItem.id} - ${nextItem.title} │\n`;
|
||||
markdown +=
|
||||
'│ │\n';
|
||||
markdown += `│ Priority: ${nextItem.priority || 'medium'} Status: ${getStatusSymbol(nextItem.status)} │\n`;
|
||||
markdown += `│ Dependencies: ${nextItem.dependencies && nextItem.dependencies.length > 0 ? formatDependenciesForMarkdown(nextItem.dependencies, data.tasks) : 'None'} │\n`;
|
||||
markdown +=
|
||||
'│ │\n';
|
||||
markdown += `│ Description: ${getWorkItemDescription(nextItem, data.tasks)} │\n`;
|
||||
markdown +=
|
||||
'│ │\n';
|
||||
|
||||
// Add subtasks if they exist
|
||||
const parentTask = data.tasks.find((t) => t.id === nextItem.id);
|
||||
if (parentTask && parentTask.subtasks && parentTask.subtasks.length > 0) {
|
||||
markdown +=
|
||||
'│ Subtasks: │\n';
|
||||
parentTask.subtasks.forEach((subtask) => {
|
||||
markdown += `│ ${nextItem.id}.${subtask.id} [${subtask.status || 'pending'}] ${subtask.title} │\n`;
|
||||
});
|
||||
markdown +=
|
||||
'│ │\n';
|
||||
}
|
||||
|
||||
markdown += `│ Start working: task-master set-status --id=${nextItem.id} --status=in-progress │\n`;
|
||||
markdown += `│ View details: task-master show ${nextItem.id} │\n`;
|
||||
markdown +=
|
||||
'│ │\n';
|
||||
markdown +=
|
||||
'╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯\n\n';
|
||||
}
|
||||
|
||||
// Suggested next steps
|
||||
markdown += '\n';
|
||||
markdown +=
|
||||
'╭──────────────────────────────────────────────────────────────────────────────────────╮\n';
|
||||
markdown +=
|
||||
'│ │\n';
|
||||
markdown +=
|
||||
'│ Suggested Next Steps: │\n';
|
||||
markdown +=
|
||||
'│ │\n';
|
||||
markdown +=
|
||||
'│ 1. Run task-master next to see what to work on next │\n';
|
||||
markdown +=
|
||||
'│ 2. Run task-master expand --id=<id> to break down a task into subtasks │\n';
|
||||
markdown +=
|
||||
'│ 3. Run task-master set-status --id=<id> --status=done to mark a task as complete │\n';
|
||||
markdown +=
|
||||
'│ │\n';
|
||||
markdown +=
|
||||
'╰──────────────────────────────────────────────────────────────────────────────────────╯\n';
|
||||
|
||||
return markdown;
|
||||
}
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ import {
|
||||
|
||||
import { generateObjectService } from '../ai-services-unified.js';
|
||||
import { getDebugFlag } from '../config-manager.js';
|
||||
import { getPromptManager } from '../prompt-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
import { displayAiUsageSummary } from '../ui.js';
|
||||
|
||||
@@ -147,10 +148,8 @@ async function parsePRD(prdPath, tasksPath, numTasks, options = {}) {
|
||||
report(overwriteError.message, 'error');
|
||||
if (outputFormat === 'text') {
|
||||
console.error(chalk.red(overwriteError.message));
|
||||
process.exit(1);
|
||||
} else {
|
||||
throw overwriteError;
|
||||
}
|
||||
throw overwriteError;
|
||||
} else {
|
||||
// Force overwrite is true
|
||||
report(
|
||||
@@ -172,74 +171,24 @@ async function parsePRD(prdPath, tasksPath, numTasks, options = {}) {
|
||||
throw new Error(`Input file ${prdPath} is empty or could not be read.`);
|
||||
}
|
||||
|
||||
// Research-specific enhancements to the system prompt
|
||||
const researchPromptAddition = research
|
||||
? `\nBefore breaking down the PRD into tasks, you will:
|
||||
1. Research and analyze the latest technologies, libraries, frameworks, and best practices that would be appropriate for this project
|
||||
2. Identify any potential technical challenges, security concerns, or scalability issues not explicitly mentioned in the PRD without discarding any explicit requirements or going overboard with complexity -- always aim to provide the most direct path to implementation, avoiding over-engineering or roundabout approaches
|
||||
3. Consider current industry standards and evolving trends relevant to this project (this step aims to solve LLM hallucinations and out of date information due to training data cutoff dates)
|
||||
4. Evaluate alternative implementation approaches and recommend the most efficient path
|
||||
5. Include specific library versions, helpful APIs, and concrete implementation guidance based on your research
|
||||
6. Always aim to provide the most direct path to implementation, avoiding over-engineering or roundabout approaches
|
||||
// Load prompts using PromptManager
|
||||
const promptManager = getPromptManager();
|
||||
|
||||
Your task breakdown should incorporate this research, resulting in more detailed implementation guidance, more accurate dependency mapping, and more precise technology recommendations than would be possible from the PRD text alone, while maintaining all explicit requirements and best practices and all details and nuances of the PRD.`
|
||||
: '';
|
||||
// Get defaultTaskPriority from config
|
||||
const { getDefaultPriority } = await import('../config-manager.js');
|
||||
const defaultTaskPriority = getDefaultPriority(projectRoot) || 'medium';
|
||||
|
||||
// Base system prompt for PRD parsing
|
||||
const systemPrompt = `You are an AI assistant specialized in analyzing Product Requirements Documents (PRDs) and generating a structured, logically ordered, dependency-aware and sequenced list of development tasks in JSON format.${researchPromptAddition}
|
||||
|
||||
Analyze the provided PRD content and generate ${numTasks > 0 ? 'approximately ' + numTasks : 'an appropriate number of'} top-level development tasks. If the complexity or the level of detail of the PRD is high, generate more tasks relative to the complexity of the PRD
|
||||
Each task should represent a logical unit of work needed to implement the requirements and focus on the most direct and effective way to implement the requirements without unnecessary complexity or overengineering. Include pseudo-code, implementation details, and test strategy for each task. Find the most up to date information to implement each task.
|
||||
Assign sequential IDs starting from ${nextId}. Infer title, description, details, and test strategy for each task based *only* on the PRD content.
|
||||
Set status to 'pending', dependencies to an empty array [], and priority to 'medium' initially for all tasks.
|
||||
Respond ONLY with a valid JSON object containing a single key "tasks", where the value is an array of task objects adhering to the provided Zod schema. Do not include any explanation or markdown formatting.
|
||||
|
||||
Each task should follow this JSON structure:
|
||||
{
|
||||
"id": number,
|
||||
"title": string,
|
||||
"description": string,
|
||||
"status": "pending",
|
||||
"dependencies": number[] (IDs of tasks this depends on),
|
||||
"priority": "high" | "medium" | "low",
|
||||
"details": string (implementation details),
|
||||
"testStrategy": string (validation approach)
|
||||
}
|
||||
|
||||
Guidelines:
|
||||
1. ${numTasks > 0 ? 'Unless complexity warrants otherwise' : 'Depending on the complexity'}, create ${numTasks > 0 ? 'exactly ' + numTasks : 'an appropriate number of'} tasks, numbered sequentially starting from ${nextId}
|
||||
2. Each task should be atomic and focused on a single responsibility following the most up to date best practices and standards
|
||||
3. Order tasks logically - consider dependencies and implementation sequence
|
||||
4. Early tasks should focus on setup, core functionality first, then advanced features
|
||||
5. Include clear validation/testing approach for each task
|
||||
6. Set appropriate dependency IDs (a task can only depend on tasks with lower IDs, potentially including existing tasks with IDs less than ${nextId} if applicable)
|
||||
7. Assign priority (high/medium/low) based on criticality and dependency order
|
||||
8. Include detailed implementation guidance in the "details" field${research ? ', with specific libraries and version recommendations based on your research' : ''}
|
||||
9. If the PRD contains specific requirements for libraries, database schemas, frameworks, tech stacks, or any other implementation details, STRICTLY ADHERE to these requirements in your task breakdown and do not discard them under any circumstance
|
||||
10. Focus on filling in any gaps left by the PRD or areas that aren't fully specified, while preserving all explicit requirements
|
||||
11. Always aim to provide the most direct path to implementation, avoiding over-engineering or roundabout approaches${research ? '\n12. For each task, include specific, actionable guidance based on current industry standards and best practices discovered through research' : ''}`;
|
||||
|
||||
// Build user prompt with PRD content
|
||||
const userPrompt = `Here's the Product Requirements Document (PRD) to break down into approximately ${numTasks > 0 ? 'approximately ' + numTasks : 'an appropriate number of'} tasks, starting IDs from ${nextId}:${research ? '\n\nRemember to thoroughly research current best practices and technologies before task breakdown to provide specific, actionable implementation details.' : ''}\n\n${prdContent}\n\n
|
||||
|
||||
Return your response in this format:
|
||||
{
|
||||
"tasks": [
|
||||
{
|
||||
"id": 1,
|
||||
"title": "Setup Project Repository",
|
||||
"description": "...",
|
||||
...
|
||||
},
|
||||
...
|
||||
],
|
||||
"metadata": {
|
||||
"projectName": "PRD Implementation",
|
||||
"totalTasks": {number of tasks},
|
||||
"sourceFile": "${prdPath}",
|
||||
"generatedAt": "YYYY-MM-DD"
|
||||
}
|
||||
}`;
|
||||
const { systemPrompt, userPrompt } = await promptManager.loadPrompt(
|
||||
'parse-prd',
|
||||
{
|
||||
research,
|
||||
numTasks,
|
||||
nextId,
|
||||
prdContent,
|
||||
prdPath,
|
||||
defaultTaskPriority
|
||||
}
|
||||
);
|
||||
|
||||
// Call the unified AI service
|
||||
report(
|
||||
@@ -420,11 +369,9 @@ Guidelines:
|
||||
// Use projectRoot for debug flag check
|
||||
console.error(error);
|
||||
}
|
||||
|
||||
process.exit(1);
|
||||
} else {
|
||||
throw error; // Re-throw for JSON output
|
||||
}
|
||||
|
||||
throw error; // Always re-throw for proper error handling
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ import { highlight } from 'cli-highlight';
|
||||
import { ContextGatherer } from '../utils/contextGatherer.js';
|
||||
import { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';
|
||||
import { generateTextService } from '../ai-services-unified.js';
|
||||
import { getPromptManager } from '../prompt-manager.js';
|
||||
import {
|
||||
log as consoleLog,
|
||||
findProjectRoot,
|
||||
@@ -190,14 +191,24 @@ async function performResearch(
|
||||
const gatheredContext = contextResult.context;
|
||||
const tokenBreakdown = contextResult.tokenBreakdown;
|
||||
|
||||
// Build system prompt based on detail level
|
||||
const systemPrompt = buildResearchSystemPrompt(detailLevel, projectRoot);
|
||||
// Load prompts using PromptManager
|
||||
const promptManager = getPromptManager();
|
||||
|
||||
// Build user prompt with context
|
||||
const userPrompt = buildResearchUserPrompt(
|
||||
query,
|
||||
gatheredContext,
|
||||
detailLevel
|
||||
const promptParams = {
|
||||
query: query,
|
||||
gatheredContext: gatheredContext || '',
|
||||
detailLevel: detailLevel,
|
||||
projectInfo: {
|
||||
root: projectRoot,
|
||||
taskCount: finalTaskIds.length,
|
||||
fileCount: filePaths.length
|
||||
}
|
||||
};
|
||||
|
||||
// Load prompts - the research template handles detail level internally
|
||||
const { systemPrompt, userPrompt } = await promptManager.loadPrompt(
|
||||
'research',
|
||||
promptParams
|
||||
);
|
||||
|
||||
// Count tokens for system and user prompts
|
||||
@@ -349,94 +360,6 @@ async function performResearch(
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build system prompt for research based on detail level
|
||||
* @param {string} detailLevel - Detail level: 'low', 'medium', 'high'
|
||||
* @param {string} projectRoot - Project root for context
|
||||
* @returns {string} System prompt
|
||||
*/
|
||||
function buildResearchSystemPrompt(detailLevel, projectRoot) {
|
||||
const basePrompt = `You are an expert AI research assistant helping with a software development project. You have access to project context including tasks, files, and project structure.
|
||||
|
||||
Your role is to provide comprehensive, accurate, and actionable research responses based on the user's query and the provided project context.`;
|
||||
|
||||
const detailInstructions = {
|
||||
low: `
|
||||
**Response Style: Concise & Direct**
|
||||
- Provide brief, focused answers (2-4 paragraphs maximum)
|
||||
- Focus on the most essential information
|
||||
- Use bullet points for key takeaways
|
||||
- Avoid lengthy explanations unless critical
|
||||
- Skip pleasantries, introductions, and conclusions
|
||||
- No phrases like "Based on your project context" or "I'll provide guidance"
|
||||
- No summary outros or alignment statements
|
||||
- Get straight to the actionable information
|
||||
- Use simple, direct language - users want info, not explanation`,
|
||||
|
||||
medium: `
|
||||
**Response Style: Balanced & Comprehensive**
|
||||
- Provide thorough but well-structured responses (4-8 paragraphs)
|
||||
- Include relevant examples and explanations
|
||||
- Balance depth with readability
|
||||
- Use headings and bullet points for organization`,
|
||||
|
||||
high: `
|
||||
**Response Style: Detailed & Exhaustive**
|
||||
- Provide comprehensive, in-depth analysis (8+ paragraphs)
|
||||
- Include multiple perspectives and approaches
|
||||
- Provide detailed examples, code snippets, and step-by-step guidance
|
||||
- Cover edge cases and potential pitfalls
|
||||
- Use clear structure with headings, subheadings, and lists`
|
||||
};
|
||||
|
||||
return `${basePrompt}
|
||||
|
||||
${detailInstructions[detailLevel]}
|
||||
|
||||
**Guidelines:**
|
||||
- Always consider the project context when formulating responses
|
||||
- Reference specific tasks, files, or project elements when relevant
|
||||
- Provide actionable insights that can be applied to the project
|
||||
- If the query relates to existing project tasks, suggest how the research applies to those tasks
|
||||
- Use markdown formatting for better readability
|
||||
- Be precise and avoid speculation unless clearly marked as such
|
||||
|
||||
**For LOW detail level specifically:**
|
||||
- Start immediately with the core information
|
||||
- No introductory phrases or context acknowledgments
|
||||
- No concluding summaries or project alignment statements
|
||||
- Focus purely on facts, steps, and actionable items`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build user prompt with query and context
|
||||
* @param {string} query - User's research query
|
||||
* @param {string} gatheredContext - Gathered project context
|
||||
* @param {string} detailLevel - Detail level for response guidance
|
||||
* @returns {string} Complete user prompt
|
||||
*/
|
||||
function buildResearchUserPrompt(query, gatheredContext, detailLevel) {
|
||||
let prompt = `# Research Query
|
||||
|
||||
${query}`;
|
||||
|
||||
if (gatheredContext && gatheredContext.trim()) {
|
||||
prompt += `
|
||||
|
||||
# Project Context
|
||||
|
||||
${gatheredContext}`;
|
||||
}
|
||||
|
||||
prompt += `
|
||||
|
||||
# Instructions
|
||||
|
||||
Please research and provide a ${detailLevel}-detail response to the query above. Consider the project context provided and make your response as relevant and actionable as possible for this specific project.`;
|
||||
|
||||
return prompt;
|
||||
}
|
||||
|
||||
/**
|
||||
* Display detailed token breakdown for context and prompts
|
||||
* @param {Object} tokenBreakdown - Token breakdown from context gatherer
|
||||
|
||||
@@ -22,6 +22,7 @@ import {
|
||||
} from '../utils.js';
|
||||
import { generateTextService } from '../ai-services-unified.js';
|
||||
import { getDebugFlag } from '../config-manager.js';
|
||||
import { getPromptManager } from '../prompt-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
import { ContextGatherer } from '../utils/contextGatherer.js';
|
||||
import { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';
|
||||
@@ -160,7 +161,7 @@ async function updateSubtaskById(
|
||||
tasks: finalTaskIds,
|
||||
format: 'research'
|
||||
});
|
||||
gatheredContext = contextResult;
|
||||
gatheredContext = contextResult.context || '';
|
||||
}
|
||||
} catch (contextError) {
|
||||
report('warn', `Could not gather context: ${contextError.message}`);
|
||||
@@ -213,7 +214,7 @@ async function updateSubtaskById(
|
||||
title: parentTask.subtasks[subtaskIndex - 1].title,
|
||||
status: parentTask.subtasks[subtaskIndex - 1].status
|
||||
}
|
||||
: null;
|
||||
: undefined;
|
||||
const nextSubtask =
|
||||
subtaskIndex < parentTask.subtasks.length - 1
|
||||
? {
|
||||
@@ -221,32 +222,27 @@ async function updateSubtaskById(
|
||||
title: parentTask.subtasks[subtaskIndex + 1].title,
|
||||
status: parentTask.subtasks[subtaskIndex + 1].status
|
||||
}
|
||||
: null;
|
||||
: undefined;
|
||||
|
||||
const contextString = `
|
||||
Parent Task: ${JSON.stringify(parentContext)}
|
||||
${prevSubtask ? `Previous Subtask: ${JSON.stringify(prevSubtask)}` : ''}
|
||||
${nextSubtask ? `Next Subtask: ${JSON.stringify(nextSubtask)}` : ''}
|
||||
Current Subtask Details (for context only):\n${subtask.details || '(No existing details)'}
|
||||
`;
|
||||
// Build prompts using PromptManager
|
||||
const promptManager = getPromptManager();
|
||||
|
||||
const systemPrompt = `You are an AI assistant helping to update a subtask. You will be provided with the subtask's existing details, context about its parent and sibling tasks, and a user request string.
|
||||
const promptParams = {
|
||||
parentTask: parentContext,
|
||||
prevSubtask: prevSubtask,
|
||||
nextSubtask: nextSubtask,
|
||||
currentDetails: subtask.details || '(No existing details)',
|
||||
updatePrompt: prompt,
|
||||
useResearch: useResearch,
|
||||
gatheredContext: gatheredContext || ''
|
||||
};
|
||||
|
||||
Your Goal: Based *only* on the user's request and all the provided context (including existing details if relevant to the request), GENERATE the new text content that should be added to the subtask's details.
|
||||
Focus *only* on generating the substance of the update.
|
||||
|
||||
Output Requirements:
|
||||
1. Return *only* the newly generated text content as a plain string. Do NOT return a JSON object or any other structured data.
|
||||
2. Your string response should NOT include any of the subtask's original details, unless the user's request explicitly asks to rephrase, summarize, or directly modify existing text.
|
||||
3. Do NOT include any timestamps, XML-like tags, markdown, or any other special formatting in your string response.
|
||||
4. Ensure the generated text is concise yet complete for the update based on the user request. Avoid conversational fillers or explanations about what you are doing (e.g., do not start with "Okay, here's the update...").`;
|
||||
|
||||
// Pass the existing subtask.details in the user prompt for the AI's context.
|
||||
let userPrompt = `Task Context:\n${contextString}\n\nUser Request: "${prompt}"\n\nBased on the User Request and all the Task Context (including current subtask details provided above), what is the new information or text that should be appended to this subtask's details? Return ONLY this new text as a plain string.`;
|
||||
|
||||
if (gatheredContext) {
|
||||
userPrompt += `\n\n# Additional Project Context\n\n${gatheredContext}`;
|
||||
}
|
||||
const variantKey = useResearch ? 'research' : 'default';
|
||||
const { systemPrompt, userPrompt } = await promptManager.loadPrompt(
|
||||
'update-subtask',
|
||||
promptParams,
|
||||
variantKey
|
||||
);
|
||||
|
||||
const role = useResearch ? 'research' : 'main';
|
||||
report('info', `Using AI text service with role: ${role}`);
|
||||
|
||||
@@ -25,6 +25,7 @@ import {
|
||||
|
||||
import { generateTextService } from '../ai-services-unified.js';
|
||||
import { getDebugFlag, isApiKeySet } from '../config-manager.js';
|
||||
import { getPromptManager } from '../prompt-manager.js';
|
||||
import { ContextGatherer } from '../utils/contextGatherer.js';
|
||||
import { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';
|
||||
|
||||
@@ -345,7 +346,7 @@ async function updateTaskById(
|
||||
tasks: finalTaskIds,
|
||||
format: 'research'
|
||||
});
|
||||
gatheredContext = contextResult;
|
||||
gatheredContext = contextResult.context || '';
|
||||
}
|
||||
} catch (contextError) {
|
||||
report('warn', `Could not gather context: ${contextError.message}`);
|
||||
@@ -408,69 +409,61 @@ async function updateTaskById(
|
||||
);
|
||||
}
|
||||
|
||||
// --- Build Prompts (Different for append vs full update) ---
|
||||
// --- Build Prompts using PromptManager ---
|
||||
const promptManager = getPromptManager();
|
||||
|
||||
const promptParams = {
|
||||
task: taskToUpdate,
|
||||
taskJson: JSON.stringify(taskToUpdate, null, 2),
|
||||
updatePrompt: prompt,
|
||||
appendMode: appendMode,
|
||||
useResearch: useResearch,
|
||||
currentDetails: taskToUpdate.details || '(No existing details)',
|
||||
gatheredContext: gatheredContext || ''
|
||||
};
|
||||
|
||||
const variantKey = appendMode
|
||||
? 'append'
|
||||
: useResearch
|
||||
? 'research'
|
||||
: 'default';
|
||||
|
||||
report(
|
||||
'info',
|
||||
`Loading prompt template with variant: ${variantKey}, appendMode: ${appendMode}, useResearch: ${useResearch}`
|
||||
);
|
||||
|
||||
let systemPrompt;
|
||||
let userPrompt;
|
||||
try {
|
||||
const promptResult = await promptManager.loadPrompt(
|
||||
'update-task',
|
||||
promptParams,
|
||||
variantKey
|
||||
);
|
||||
report(
|
||||
'info',
|
||||
`Prompt result type: ${typeof promptResult}, keys: ${promptResult ? Object.keys(promptResult).join(', ') : 'null'}`
|
||||
);
|
||||
|
||||
if (appendMode) {
|
||||
// Append mode: generate new content to add to task details
|
||||
systemPrompt = `You are an AI assistant helping to append additional information to a software development task. You will be provided with the task's existing details, context, and a user request string.
|
||||
// Extract prompts - loadPrompt returns { systemPrompt, userPrompt, metadata }
|
||||
systemPrompt = promptResult.systemPrompt;
|
||||
userPrompt = promptResult.userPrompt;
|
||||
|
||||
Your Goal: Based *only* on the user's request and all the provided context (including existing details if relevant to the request), GENERATE the new text content that should be added to the task's details.
|
||||
Focus *only* on generating the substance of the update.
|
||||
report(
|
||||
'info',
|
||||
`Loaded prompts - systemPrompt length: ${systemPrompt?.length}, userPrompt length: ${userPrompt?.length}`
|
||||
);
|
||||
} catch (error) {
|
||||
report('error', `Failed to load prompt template: ${error.message}`);
|
||||
throw new Error(`Failed to load prompt template: ${error.message}`);
|
||||
}
|
||||
|
||||
Output Requirements:
|
||||
1. Return *only* the newly generated text content as a plain string. Do NOT return a JSON object or any other structured data.
|
||||
2. Your string response should NOT include any of the task's original details, unless the user's request explicitly asks to rephrase, summarize, or directly modify existing text.
|
||||
3. Do NOT include any timestamps, XML-like tags, markdown, or any other special formatting in your string response.
|
||||
4. Ensure the generated text is concise yet complete for the update based on the user request. Avoid conversational fillers or explanations about what you are doing (e.g., do not start with "Okay, here's the update...").`;
|
||||
|
||||
const taskContext = `
|
||||
Task: ${JSON.stringify({
|
||||
id: taskToUpdate.id,
|
||||
title: taskToUpdate.title,
|
||||
description: taskToUpdate.description,
|
||||
status: taskToUpdate.status
|
||||
})}
|
||||
Current Task Details (for context only):\n${taskToUpdate.details || '(No existing details)'}
|
||||
`;
|
||||
|
||||
userPrompt = `Task Context:\n${taskContext}\n\nUser Request: "${prompt}"\n\nBased on the User Request and all the Task Context (including current task details provided above), what is the new information or text that should be appended to this task's details? Return ONLY this new text as a plain string.`;
|
||||
|
||||
if (gatheredContext) {
|
||||
userPrompt += `\n\n# Additional Project Context\n\n${gatheredContext}`;
|
||||
}
|
||||
} else {
|
||||
// Full update mode: use original prompts
|
||||
systemPrompt = `You are an AI assistant helping to update a software development task based on new context.
|
||||
You will be given a task and a prompt describing changes or new implementation details.
|
||||
Your job is to update the task to reflect these changes, while preserving its basic structure.
|
||||
|
||||
Guidelines:
|
||||
1. VERY IMPORTANT: NEVER change the title of the task - keep it exactly as is
|
||||
2. Maintain the same ID, status, and dependencies unless specifically mentioned in the prompt
|
||||
3. Update the description, details, and test strategy to reflect the new information
|
||||
4. Do not change anything unnecessarily - just adapt what needs to change based on the prompt
|
||||
5. Return a complete valid JSON object representing the updated task
|
||||
6. VERY IMPORTANT: Preserve all subtasks marked as "done" or "completed" - do not modify their content
|
||||
7. For tasks with completed subtasks, build upon what has already been done rather than rewriting everything
|
||||
8. If an existing completed subtask needs to be changed/undone based on the new context, DO NOT modify it directly
|
||||
9. Instead, add a new subtask that clearly indicates what needs to be changed or replaced
|
||||
10. Use the existence of completed subtasks as an opportunity to make new subtasks more specific and targeted
|
||||
11. Ensure any new subtasks have unique IDs that don't conflict with existing ones
|
||||
12. CRITICAL: For subtask IDs, use ONLY numeric values (1, 2, 3, etc.) NOT strings ("1", "2", "3")
|
||||
13. CRITICAL: Subtask IDs should start from 1 and increment sequentially (1, 2, 3...) - do NOT use parent task ID as prefix
|
||||
|
||||
The changes described in the prompt should be thoughtfully applied to make the task more accurate and actionable.`;
|
||||
|
||||
const taskDataString = JSON.stringify(taskToUpdate, null, 2);
|
||||
userPrompt = `Here is the task to update:\n${taskDataString}\n\nPlease update this task based on the following new context:\n${prompt}\n\nIMPORTANT: In the task JSON above, any subtasks with "status": "done" or "status": "completed" should be preserved exactly as is. Build your changes around these completed items.`;
|
||||
|
||||
if (gatheredContext) {
|
||||
userPrompt += `\n\n# Project Context\n\n${gatheredContext}`;
|
||||
}
|
||||
|
||||
userPrompt += `\n\nReturn only the updated task as a valid JSON object.`;
|
||||
// If prompts are still not set, throw an error
|
||||
if (!systemPrompt || !userPrompt) {
|
||||
throw new Error(
|
||||
`Failed to load prompts: systemPrompt=${!!systemPrompt}, userPrompt=${!!userPrompt}`
|
||||
);
|
||||
}
|
||||
// --- End Build Prompts ---
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ import {
|
||||
} from '../ui.js';
|
||||
|
||||
import { getDebugFlag } from '../config-manager.js';
|
||||
import { getPromptManager } from '../prompt-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
import { generateTextService } from '../ai-services-unified.js';
|
||||
import { getModelConfiguration } from './models.js';
|
||||
@@ -299,7 +300,7 @@ async function updateTasks(
|
||||
tasks: finalTaskIds,
|
||||
format: 'research'
|
||||
});
|
||||
gatheredContext = contextResult; // contextResult is a string
|
||||
gatheredContext = contextResult.context || '';
|
||||
}
|
||||
} catch (contextError) {
|
||||
logFn(
|
||||
@@ -368,35 +369,18 @@ async function updateTasks(
|
||||
}
|
||||
// --- End Display Tasks ---
|
||||
|
||||
// --- Build Prompts (Unchanged Core Logic) ---
|
||||
// Keep the original system prompt logic
|
||||
const systemPrompt = `You are an AI assistant helping to update software development tasks based on new context.
|
||||
You will be given a set of tasks and a prompt describing changes or new implementation details.
|
||||
Your job is to update the tasks to reflect these changes, while preserving their basic structure.
|
||||
|
||||
Guidelines:
|
||||
1. Maintain the same IDs, statuses, and dependencies unless specifically mentioned in the prompt
|
||||
2. Update titles, descriptions, details, and test strategies to reflect the new information
|
||||
3. Do not change anything unnecessarily - just adapt what needs to change based on the prompt
|
||||
4. You should return ALL the tasks in order, not just the modified ones
|
||||
5. Return a complete valid JSON object with the updated tasks array
|
||||
6. VERY IMPORTANT: Preserve all subtasks marked as "done" or "completed" - do not modify their content
|
||||
7. For tasks with completed subtasks, build upon what has already been done rather than rewriting everything
|
||||
8. If an existing completed subtask needs to be changed/undone based on the new context, DO NOT modify it directly
|
||||
9. Instead, add a new subtask that clearly indicates what needs to be changed or replaced
|
||||
10. Use the existence of completed subtasks as an opportunity to make new subtasks more specific and targeted
|
||||
|
||||
The changes described in the prompt should be applied to ALL tasks in the list.`;
|
||||
|
||||
// Keep the original user prompt logic
|
||||
const taskDataString = JSON.stringify(tasksToUpdate, null, 2);
|
||||
let userPrompt = `Here are the tasks to update:\n${taskDataString}\n\nPlease update these tasks based on the following new context:\n${prompt}\n\nIMPORTANT: In the tasks JSON above, any subtasks with "status": "done" or "status": "completed" should be preserved exactly as is. Build your changes around these completed items.`;
|
||||
|
||||
if (gatheredContext) {
|
||||
userPrompt += `\n\n# Project Context\n\n${gatheredContext}`;
|
||||
}
|
||||
|
||||
userPrompt += `\n\nReturn only the updated tasks as a valid JSON array.`;
|
||||
// --- Build Prompts (Using PromptManager) ---
|
||||
// Load prompts using PromptManager
|
||||
const promptManager = getPromptManager();
|
||||
const { systemPrompt, userPrompt } = await promptManager.loadPrompt(
|
||||
'update-tasks',
|
||||
{
|
||||
tasks: tasksToUpdate,
|
||||
updatePrompt: prompt,
|
||||
useResearch,
|
||||
projectContext: gatheredContext
|
||||
}
|
||||
);
|
||||
// --- End Build Prompts ---
|
||||
|
||||
// --- AI Call ---
|
||||
|
||||
572
src/prompts/README.md
Normal file
572
src/prompts/README.md
Normal file
@@ -0,0 +1,572 @@
|
||||
# Task Master Prompt Management System
|
||||
|
||||
This directory contains the centralized prompt templates for all AI-powered features in Task Master.
|
||||
|
||||
## Overview
|
||||
|
||||
The prompt management system provides:
|
||||
- **Centralized Storage**: All prompts in one location (`/src/prompts`)
|
||||
- **JSON Schema Validation**: Comprehensive validation using AJV with detailed error reporting
|
||||
- **Version Control**: Track changes to prompts over time
|
||||
- **Variant Support**: Different prompts for different contexts (research mode, complexity levels, etc.)
|
||||
- **Template Variables**: Dynamic prompt generation with variable substitution
|
||||
- **IDE Integration**: VS Code IntelliSense and validation support
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
src/prompts/
|
||||
├── README.md # This file
|
||||
├── schemas/ # JSON schemas for validation
|
||||
│ ├── README.md # Schema documentation
|
||||
│ ├── prompt-template.schema.json # Main template schema
|
||||
│ ├── parameter.schema.json # Parameter validation schema
|
||||
│ └── variant.schema.json # Prompt variant schema
|
||||
├── parse-prd.json # PRD parsing prompts
|
||||
├── expand-task.json # Task expansion prompts
|
||||
├── add-task.json # Task creation prompts
|
||||
├── update-tasks.json # Bulk task update prompts
|
||||
├── update-task.json # Single task update prompts
|
||||
├── update-subtask.json # Subtask update prompts
|
||||
├── analyze-complexity.json # Complexity analysis prompts
|
||||
└── research.json # Research query prompts
|
||||
```
|
||||
|
||||
## Schema Validation
|
||||
|
||||
All prompt templates are validated against JSON schemas located in `/src/prompts/schemas/`. The validation system:
|
||||
|
||||
- **Structural Validation**: Ensures required fields and proper nesting
|
||||
- **Parameter Type Checking**: Validates parameter types, patterns, and ranges
|
||||
- **Template Syntax**: Validates Handlebars syntax and variable references
|
||||
- **Semantic Versioning**: Enforces proper version format
|
||||
- **Cross-Reference Validation**: Ensures parameters match template variables
|
||||
|
||||
### Validation Features
|
||||
- **Required Fields**: `id`, `version`, `description`, `prompts.default`
|
||||
- **Type Safety**: String, number, boolean, array, object validation
|
||||
- **Pattern Matching**: Regex validation for string parameters
|
||||
- **Range Validation**: Min/max values for numeric parameters
|
||||
- **Enum Constraints**: Restricted value sets for categorical parameters
|
||||
|
||||
## Development Workflow
|
||||
|
||||
### Setting Up Development Environment
|
||||
1. **VS Code Integration**: Schemas are automatically configured for IntelliSense
|
||||
2. **Dependencies**: `ajv` and `ajv-formats` are required for validation
|
||||
3. **File Watching**: Changes to templates trigger automatic validation
|
||||
|
||||
### Creating New Prompts
|
||||
1. Create a new `.json` file in `/src/prompts/`
|
||||
2. Follow the schema structure (see Template Structure section)
|
||||
3. Define parameters with proper types and validation
|
||||
4. Create system and user prompts with template variables
|
||||
5. Test with the PromptManager before committing
|
||||
|
||||
### Modifying Existing Prompts
|
||||
1. Update the `version` field following semantic versioning
|
||||
2. Maintain backward compatibility when possible
|
||||
3. Test with existing code that uses the prompt
|
||||
4. Update documentation if parameters change
|
||||
|
||||
## Prompt Template Reference
|
||||
|
||||
### 1. parse-prd.json
|
||||
**Purpose**: Parse a Product Requirements Document into structured tasks
|
||||
**Variants**: `default`, `research` (when research mode is enabled)
|
||||
|
||||
**Required Parameters**:
|
||||
- `numTasks` (number): Target number of tasks to generate
|
||||
- `nextId` (number): Starting ID for tasks
|
||||
- `prdContent` (string): Content of the PRD file
|
||||
- `prdPath` (string): Path to the PRD file
|
||||
- `defaultTaskPriority` (string): Default priority for generated tasks
|
||||
|
||||
**Optional Parameters**:
|
||||
- `research` (boolean): Enable research mode for latest best practices (default: false)
|
||||
|
||||
**Usage**: Used by `task-master parse-prd` command to convert PRD documents into actionable task lists.
|
||||
|
||||
### 2. add-task.json
|
||||
**Purpose**: Generate a new task based on user description
|
||||
**Variants**: `default`, `research` (when research mode is enabled)
|
||||
|
||||
**Required Parameters**:
|
||||
- `prompt` (string): User's task description
|
||||
- `newTaskId` (number): ID for the new task
|
||||
|
||||
**Optional Parameters**:
|
||||
- `existingTasks` (array): List of existing tasks for context
|
||||
- `gatheredContext` (string): Context gathered from codebase analysis
|
||||
- `contextFromArgs` (string): Additional context from manual args
|
||||
- `priority` (string): Task priority (high/medium/low, default: medium)
|
||||
- `dependencies` (array): Task dependency IDs
|
||||
- `useResearch` (boolean): Use research mode (default: false)
|
||||
|
||||
**Usage**: Used by `task-master add-task` command to create new tasks with AI assistance.
|
||||
|
||||
### 3. expand-task.json
|
||||
**Purpose**: Break down a task into detailed subtasks with three sophisticated strategies
|
||||
**Variants**: `complexity-report` (when expansionPrompt exists), `research` (when research mode is enabled), `default` (standard case)
|
||||
|
||||
**Required Parameters**:
|
||||
- `subtaskCount` (number): Number of subtasks to generate
|
||||
- `task` (object): The task to expand
|
||||
- `nextSubtaskId` (number): Starting ID for new subtasks
|
||||
|
||||
**Optional Parameters**:
|
||||
- `additionalContext` (string): Additional context for expansion (default: "")
|
||||
- `complexityReasoningContext` (string): Complexity analysis reasoning context (default: "")
|
||||
- `gatheredContext` (string): Gathered project context (default: "")
|
||||
- `useResearch` (boolean): Use research mode (default: false)
|
||||
- `expansionPrompt` (string): Expansion prompt from complexity report
|
||||
|
||||
**Variant Selection Strategy**:
|
||||
1. **complexity-report**: Used when `expansionPrompt` exists (highest priority)
|
||||
2. **research**: Used when `useResearch === true && !expansionPrompt`
|
||||
3. **default**: Standard fallback strategy
|
||||
|
||||
**Usage**: Used by `task-master expand` command to break complex tasks into manageable subtasks using the most appropriate strategy based on available context and complexity analysis.
|
||||
|
||||
### 4. update-task.json
|
||||
**Purpose**: Update a single task with new information, supporting full updates and append mode
|
||||
**Variants**: `default`, `append` (when appendMode is true), `research` (when research mode is enabled)
|
||||
|
||||
**Required Parameters**:
|
||||
- `task` (object): The task to update
|
||||
- `taskJson` (string): JSON string representation of the task
|
||||
- `updatePrompt` (string): Description of changes to apply
|
||||
|
||||
**Optional Parameters**:
|
||||
- `appendMode` (boolean): Whether to append to details or do full update (default: false)
|
||||
- `useResearch` (boolean): Use research mode (default: false)
|
||||
- `currentDetails` (string): Current task details for context (default: "(No existing details)")
|
||||
- `gatheredContext` (string): Additional project context
|
||||
|
||||
**Usage**: Used by `task-master update-task` command to modify existing tasks.
|
||||
|
||||
### 5. update-tasks.json
|
||||
**Purpose**: Update multiple tasks based on new context or changes
|
||||
**Variants**: `default`, `research` (when research mode is enabled)
|
||||
|
||||
**Required Parameters**:
|
||||
- `tasks` (array): Array of tasks to update
|
||||
- `updatePrompt` (string): Description of changes to apply
|
||||
|
||||
**Optional Parameters**:
|
||||
- `useResearch` (boolean): Use research mode (default: false)
|
||||
- `projectContext` (string): Additional project context
|
||||
|
||||
**Usage**: Used by `task-master update` command to bulk update multiple tasks.
|
||||
|
||||
### 6. update-subtask.json
|
||||
**Purpose**: Append information to a subtask by generating only new content
|
||||
**Variants**: `default`, `research` (when research mode is enabled)
|
||||
|
||||
**Required Parameters**:
|
||||
- `parentTask` (object): The parent task context
|
||||
- `currentDetails` (string): Current subtask details (default: "(No existing details)")
|
||||
- `updatePrompt` (string): User request for what to add
|
||||
|
||||
**Optional Parameters**:
|
||||
- `prevSubtask` (object): The previous subtask if any
|
||||
- `nextSubtask` (object): The next subtask if any
|
||||
- `useResearch` (boolean): Use research mode (default: false)
|
||||
- `gatheredContext` (string): Additional project context
|
||||
|
||||
**Usage**: Used by `task-master update-subtask` command to log progress and findings on subtasks.
|
||||
|
||||
### 7. analyze-complexity.json
|
||||
**Purpose**: Analyze task complexity and generate expansion recommendations
|
||||
**Variants**: `default`, `research` (when research mode is enabled), `batch` (when analyzing >10 tasks)
|
||||
|
||||
**Required Parameters**:
|
||||
- `tasks` (array): Array of tasks to analyze
|
||||
|
||||
**Optional Parameters**:
|
||||
- `gatheredContext` (string): Additional project context
|
||||
- `threshold` (number): Complexity threshold for expansion recommendation (1-10, default: 5)
|
||||
- `useResearch` (boolean): Use research mode for deeper analysis (default: false)
|
||||
|
||||
**Usage**: Used by `task-master analyze-complexity` command to determine which tasks need breakdown.
|
||||
|
||||
### 8. research.json
|
||||
**Purpose**: Perform AI-powered research with project context
|
||||
**Variants**: `default`, `low` (concise responses), `medium` (balanced), `high` (detailed)
|
||||
|
||||
**Required Parameters**:
|
||||
- `query` (string): Research query
|
||||
|
||||
**Optional Parameters**:
|
||||
- `gatheredContext` (string): Gathered project context
|
||||
- `detailLevel` (string): Level of detail (low/medium/high, default: medium)
|
||||
- `projectInfo` (object): Project information with properties:
|
||||
- `root` (string): Project root path
|
||||
- `taskCount` (number): Number of related tasks
|
||||
- `fileCount` (number): Number of related files
|
||||
|
||||
**Usage**: Used by `task-master research` command to get contextual information and guidance.
|
||||
|
||||
## Template Structure
|
||||
|
||||
Each prompt template is a JSON file with the following structure:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "unique-identifier",
|
||||
"version": "1.0.0",
|
||||
"description": "What this prompt does",
|
||||
"metadata": {
|
||||
"author": "system",
|
||||
"created": "2024-01-01T00:00:00Z",
|
||||
"updated": "2024-01-01T00:00:00Z",
|
||||
"tags": ["category", "feature"],
|
||||
"category": "task"
|
||||
},
|
||||
"parameters": {
|
||||
"paramName": {
|
||||
"type": "string|number|boolean|array|object",
|
||||
"required": true|false,
|
||||
"default": "default value",
|
||||
"description": "Parameter description",
|
||||
"enum": ["option1", "option2"],
|
||||
"pattern": "^[a-z]+$",
|
||||
"minimum": 1,
|
||||
"maximum": 100
|
||||
}
|
||||
},
|
||||
"prompts": {
|
||||
"default": {
|
||||
"system": "System prompt template",
|
||||
"user": "User prompt template"
|
||||
},
|
||||
"variant-name": {
|
||||
"condition": "JavaScript expression",
|
||||
"system": "Variant system prompt",
|
||||
"user": "Variant user prompt",
|
||||
"metadata": {
|
||||
"description": "When to use this variant"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Template Features
|
||||
|
||||
### Variable Substitution
|
||||
Use `{{variableName}}` to inject dynamic values:
|
||||
```
|
||||
"user": "Analyze these {{tasks.length}} tasks with threshold {{threshold}}"
|
||||
```
|
||||
|
||||
### Conditionals
|
||||
Use `{{#if variable}}...{{/if}}` for conditional content:
|
||||
```
|
||||
"user": "{{#if useResearch}}Research and {{/if}}create a task"
|
||||
```
|
||||
|
||||
### Helper Functions
|
||||
|
||||
#### Equality Helper
|
||||
Use `{{#if (eq variable "value")}}...{{/if}}` for string comparisons:
|
||||
```
|
||||
"user": "{{#if (eq detailLevel \"low\")}}Provide a brief summary{{/if}}"
|
||||
"user": "{{#if (eq priority \"high\")}}URGENT: {{/if}}{{taskTitle}}"
|
||||
```
|
||||
|
||||
The `eq` helper enables clean conditional logic based on parameter values:
|
||||
- Compare strings: `(eq detailLevel "medium")`
|
||||
- Compare with enum values: `(eq status "pending")`
|
||||
- Multiple conditions: `{{#if (eq level "1")}}First{{/if}}{{#if (eq level "2")}}Second{{/if}}`
|
||||
|
||||
#### Negation Helper
|
||||
Use `{{#if (not variable)}}...{{/if}}` for negation conditions:
|
||||
```
|
||||
"user": "{{#if (not useResearch)}}Use basic analysis{{/if}}"
|
||||
"user": "{{#if (not hasSubtasks)}}This task has no subtasks{{/if}}"
|
||||
```
|
||||
|
||||
The `not` helper enables clean negative conditional logic:
|
||||
- Negate boolean values: `(not useResearch)`
|
||||
- Negate truthy/falsy values: `(not emptyArray)`
|
||||
- Cleaner than separate boolean parameters: No need for `notUseResearch` flags
|
||||
|
||||
#### Numeric Comparison Helpers
|
||||
Use `{{#if (gt variable number)}}...{{/if}}` for greater than comparisons:
|
||||
```
|
||||
"user": "generate {{#if (gt numTasks 0)}}approximately {{numTasks}}{{else}}an appropriate number of{{/if}} top-level development tasks"
|
||||
"user": "{{#if (gt complexity 5)}}This is a complex task{{/if}}"
|
||||
"system": "create {{#if (gt subtaskCount 0)}}exactly {{subtaskCount}}{{else}}an appropriate number of{{/if}} subtasks"
|
||||
```
|
||||
|
||||
Use `{{#if (gte variable number)}}...{{/if}}` for greater than or equal comparisons:
|
||||
```
|
||||
"user": "{{#if (gte priority 8)}}HIGH PRIORITY{{/if}}"
|
||||
"user": "{{#if (gte threshold 1)}}Analysis enabled{{/if}}"
|
||||
"system": "{{#if (gte complexityScore 8)}}Use detailed breakdown approach{{/if}}"
|
||||
```
|
||||
|
||||
The numeric comparison helpers enable sophisticated conditional logic:
|
||||
- **Dynamic counting**: `{{#if (gt numTasks 0)}}exactly {{numTasks}}{{else}}an appropriate number of{{/if}}`
|
||||
- **Threshold-based behavior**: `(gte complexityScore 8)` for high-complexity handling
|
||||
- **Zero checks**: `(gt subtaskCount 0)` for conditional content generation
|
||||
- **Decimal support**: `(gt score 7.5)` for fractional comparisons
|
||||
- **Enhanced prompt sophistication**: Enables parse-prd and expand-task logic matching GitHub specifications
|
||||
|
||||
### Loops
|
||||
Use `{{#each array}}...{{/each}}` to iterate over arrays:
|
||||
```
|
||||
"user": "Tasks:\n{{#each tasks}}- {{id}}: {{title}}\n{{/each}}"
|
||||
```
|
||||
|
||||
### Special Loop Variables
|
||||
Inside `{{#each}}` blocks, you have access to:
|
||||
- `{{@index}}`: Current array index (0-based)
|
||||
- `{{@first}}`: Boolean, true for first item
|
||||
- `{{@last}}`: Boolean, true for last item
|
||||
|
||||
```
|
||||
"user": "{{#each tasks}}{{@index}}. {{title}}{{#unless @last}}\n{{/unless}}{{/each}}"
|
||||
```
|
||||
|
||||
### JSON Serialization
|
||||
Use `{{{json variable}}}` (triple braces) to serialize objects/arrays to JSON:
|
||||
```
|
||||
"user": "Analyze these tasks: {{{json tasks}}}"
|
||||
```
|
||||
|
||||
### Nested Properties
|
||||
Access nested properties with dot notation:
|
||||
```
|
||||
"user": "Project: {{context.projectName}}"
|
||||
```
|
||||
|
||||
## Prompt Variants
|
||||
|
||||
Variants allow different prompts based on conditions:
|
||||
|
||||
```json
|
||||
{
|
||||
"prompts": {
|
||||
"default": {
|
||||
"system": "Default system prompt",
|
||||
"user": "Default user prompt"
|
||||
},
|
||||
"research": {
|
||||
"condition": "useResearch === true",
|
||||
"system": "Research-focused system prompt",
|
||||
"user": "Research-focused user prompt"
|
||||
},
|
||||
"high-complexity": {
|
||||
"condition": "complexityScore >= 8",
|
||||
"system": "Complex task handling prompt",
|
||||
"user": "Detailed breakdown request"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Condition Evaluation
|
||||
Conditions are JavaScript expressions evaluated with parameter values as context:
|
||||
- Simple comparisons: `useResearch === true`
|
||||
- Numeric comparisons: `threshold >= 5`
|
||||
- String matching: `priority === 'high'`
|
||||
- Complex logic: `useResearch && threshold > 7`
|
||||
|
||||
## PromptManager Module
|
||||
|
||||
The PromptManager is implemented in `scripts/modules/prompt-manager.js` and provides:
|
||||
- **Template loading and caching**: Templates are loaded once and cached for performance
|
||||
- **Schema validation**: Comprehensive validation using AJV with detailed error reporting
|
||||
- **Variable substitution**: Handlebars-like syntax for dynamic content
|
||||
- **Variant selection**: Automatic selection based on conditions
|
||||
- **Error handling**: Graceful fallbacks and detailed error messages
|
||||
- **Singleton pattern**: One instance per project root for efficiency
|
||||
|
||||
### Validation Behavior
|
||||
- **Schema Available**: Full validation with detailed error messages
|
||||
- **Schema Missing**: Falls back to basic structural validation
|
||||
- **Invalid Templates**: Throws descriptive errors with field-level details
|
||||
- **Parameter Validation**: Type checking, pattern matching, range validation
|
||||
|
||||
## Usage in Code
|
||||
|
||||
### Basic Usage
|
||||
```javascript
|
||||
import { getPromptManager } from '../prompt-manager.js';
|
||||
|
||||
const promptManager = getPromptManager();
|
||||
const { systemPrompt, userPrompt, metadata } = promptManager.loadPrompt('add-task', {
|
||||
// Parameters matching the template's parameter definitions
|
||||
prompt: 'Create a user authentication system',
|
||||
newTaskId: 5,
|
||||
priority: 'high',
|
||||
useResearch: false
|
||||
});
|
||||
|
||||
// Use with AI service
|
||||
const result = await generateObjectService({
|
||||
systemPrompt,
|
||||
prompt: userPrompt,
|
||||
// ... other AI parameters
|
||||
});
|
||||
```
|
||||
|
||||
### With Variants
|
||||
```javascript
|
||||
// Research variant will be selected automatically
|
||||
const { systemPrompt, userPrompt } = promptManager.loadPrompt('expand-task', {
|
||||
useResearch: true, // Triggers research variant
|
||||
task: taskObject,
|
||||
subtaskCount: 5
|
||||
});
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
```javascript
|
||||
try {
|
||||
const result = promptManager.loadPrompt('invalid-template', {});
|
||||
} catch (error) {
|
||||
if (error.message.includes('Schema validation failed')) {
|
||||
console.error('Template validation error:', error.message);
|
||||
} else if (error.message.includes('not found')) {
|
||||
console.error('Template not found:', error.message);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Adding New Prompts
|
||||
|
||||
1. **Create the JSON file** following the template structure
|
||||
2. **Define parameters** with proper types, validation, and descriptions
|
||||
3. **Create prompts** with clear system and user templates
|
||||
4. **Use template variables** for dynamic content
|
||||
5. **Add variants** if needed for different contexts
|
||||
6. **Test thoroughly** with the PromptManager
|
||||
7. **Update this documentation** with the new prompt details
|
||||
|
||||
### Example New Prompt
|
||||
```json
|
||||
{
|
||||
"id": "new-feature",
|
||||
"version": "1.0.0",
|
||||
"description": "Generate code for a new feature",
|
||||
"parameters": {
|
||||
"featureName": {
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"pattern": "^[a-zA-Z][a-zA-Z0-9-]*$",
|
||||
"description": "Name of the feature to implement"
|
||||
},
|
||||
"complexity": {
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"enum": ["simple", "medium", "complex"],
|
||||
"default": "medium",
|
||||
"description": "Feature complexity level"
|
||||
}
|
||||
},
|
||||
"prompts": {
|
||||
"default": {
|
||||
"system": "You are a senior software engineer.",
|
||||
"user": "Create a {{complexity}} {{featureName}} feature."
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### Template Design
|
||||
1. **Clear IDs**: Use kebab-case, descriptive identifiers
|
||||
2. **Semantic Versioning**: Follow semver for version management
|
||||
3. **Comprehensive Parameters**: Define all required and optional parameters
|
||||
4. **Type Safety**: Use proper parameter types and validation
|
||||
5. **Clear Descriptions**: Document what each prompt and parameter does
|
||||
|
||||
### Variable Usage
|
||||
1. **Meaningful Names**: Use descriptive variable names
|
||||
2. **Consistent Patterns**: Follow established naming conventions
|
||||
3. **Safe Defaults**: Provide sensible default values
|
||||
4. **Validation**: Use patterns, enums, and ranges for validation
|
||||
|
||||
### Variant Strategy
|
||||
1. **Simple Conditions**: Keep variant conditions easy to understand
|
||||
2. **Clear Purpose**: Each variant should have a distinct use case
|
||||
3. **Fallback Logic**: Always provide a default variant
|
||||
4. **Documentation**: Explain when each variant is used
|
||||
|
||||
### Performance
|
||||
1. **Caching**: Templates are cached automatically
|
||||
2. **Lazy Loading**: Templates load only when needed
|
||||
3. **Minimal Variants**: Don't create unnecessary variants
|
||||
4. **Efficient Conditions**: Keep condition evaluation fast
|
||||
|
||||
## Testing Prompts
|
||||
|
||||
### Validation Testing
|
||||
```javascript
|
||||
// Test schema validation
|
||||
const promptManager = getPromptManager();
|
||||
const results = promptManager.validateAllPrompts();
|
||||
console.log(`Valid: ${results.valid.length}, Errors: ${results.errors.length}`);
|
||||
```
|
||||
|
||||
### Integration Testing
|
||||
When modifying prompts, ensure to test:
|
||||
- Variable substitution works with actual data structures
|
||||
- Variant selection triggers correctly based on conditions
|
||||
- AI responses remain consistent with expected behavior
|
||||
- All parameters are properly validated
|
||||
- Error handling works for invalid inputs
|
||||
|
||||
### Quick Testing
|
||||
```javascript
|
||||
// Test prompt loading and variable substitution
|
||||
const promptManager = getPromptManager();
|
||||
const result = promptManager.loadPrompt('research', {
|
||||
query: 'What are the latest React best practices?',
|
||||
detailLevel: 'medium',
|
||||
gatheredContext: 'React project with TypeScript'
|
||||
});
|
||||
console.log('System:', result.systemPrompt);
|
||||
console.log('User:', result.userPrompt);
|
||||
console.log('Metadata:', result.metadata);
|
||||
```
|
||||
|
||||
### Testing Checklist
|
||||
- [ ] Template validates against schema
|
||||
- [ ] All required parameters are defined
|
||||
- [ ] Variable substitution works correctly
|
||||
- [ ] Variants trigger under correct conditions
|
||||
- [ ] Error messages are clear and helpful
|
||||
- [ ] Performance is acceptable for repeated usage
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
**Schema Validation Errors**:
|
||||
- Check required fields are present
|
||||
- Verify parameter types match schema
|
||||
- Ensure version follows semantic versioning
|
||||
- Validate JSON syntax
|
||||
|
||||
**Variable Substitution Problems**:
|
||||
- Check variable names match parameter names
|
||||
- Verify nested property access syntax
|
||||
- Ensure array iteration syntax is correct
|
||||
- Test with actual data structures
|
||||
|
||||
**Variant Selection Issues**:
|
||||
- Verify condition syntax is valid JavaScript
|
||||
- Check parameter values match condition expectations
|
||||
- Ensure default variant exists
|
||||
- Test condition evaluation with debug logging
|
||||
|
||||
**Performance Issues**:
|
||||
- Check for circular references in templates
|
||||
- Verify caching is working correctly
|
||||
- Monitor template loading frequency
|
||||
- Consider simplifying complex conditions
|
||||
56
src/prompts/add-task.json
Normal file
56
src/prompts/add-task.json
Normal file
@@ -0,0 +1,56 @@
|
||||
{
|
||||
"id": "add-task",
|
||||
"version": "1.0.0",
|
||||
"description": "Generate a new task based on description",
|
||||
"metadata": {
|
||||
"author": "system",
|
||||
"created": "2024-01-01T00:00:00Z",
|
||||
"updated": "2024-01-01T00:00:00Z",
|
||||
"tags": ["task-creation", "generation"]
|
||||
},
|
||||
"parameters": {
|
||||
"prompt": {
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "User's task description"
|
||||
},
|
||||
"newTaskId": {
|
||||
"type": "number",
|
||||
"required": true,
|
||||
"description": "ID for the new task"
|
||||
},
|
||||
"existingTasks": {
|
||||
"type": "array",
|
||||
"description": "List of existing tasks for context"
|
||||
},
|
||||
"gatheredContext": {
|
||||
"type": "string",
|
||||
"description": "Context gathered from codebase analysis"
|
||||
},
|
||||
"contextFromArgs": {
|
||||
"type": "string",
|
||||
"description": "Additional context from manual args"
|
||||
},
|
||||
"priority": {
|
||||
"type": "string",
|
||||
"default": "medium",
|
||||
"enum": ["high", "medium", "low"],
|
||||
"description": "Task priority"
|
||||
},
|
||||
"dependencies": {
|
||||
"type": "array",
|
||||
"description": "Task dependency IDs"
|
||||
},
|
||||
"useResearch": {
|
||||
"type": "boolean",
|
||||
"default": false,
|
||||
"description": "Use research mode"
|
||||
}
|
||||
},
|
||||
"prompts": {
|
||||
"default": {
|
||||
"system": "You are a helpful assistant that creates well-structured tasks for a software development project. Generate a single new task based on the user's description, adhering strictly to the provided JSON schema. Pay special attention to dependencies between tasks, ensuring the new task correctly references any tasks it depends on.\n\nWhen determining dependencies for a new task, follow these principles:\n1. Select dependencies based on logical requirements - what must be completed before this task can begin.\n2. Prioritize task dependencies that are semantically related to the functionality being built.\n3. Consider both direct dependencies (immediately prerequisite) and indirect dependencies.\n4. Avoid adding unnecessary dependencies - only include tasks that are genuinely prerequisite.\n5. Consider the current status of tasks - prefer completed tasks as dependencies when possible.\n6. Pay special attention to foundation tasks (1-5) but don't automatically include them without reason.\n7. Recent tasks (higher ID numbers) may be more relevant for newer functionality.\n\nThe dependencies array should contain task IDs (numbers) of prerequisite tasks.{{#if useResearch}}\n\nResearch current best practices and technologies relevant to this task.{{/if}}",
|
||||
"user": "You are generating the details for Task #{{newTaskId}}. Based on the user's request: \"{{prompt}}\", create a comprehensive new task for a software development project.\n \n {{gatheredContext}}\n \n {{#if useResearch}}Research current best practices, technologies, and implementation patterns relevant to this task. {{/if}}Based on the information about existing tasks provided above, include appropriate dependencies in the \"dependencies\" array. Only include task IDs that this new task directly depends on.\n \n Return your answer as a single JSON object matching the schema precisely:\n \n {\n \"title\": \"Task title goes here\",\n \"description\": \"A concise one or two sentence description of what the task involves\",\n \"details\": \"Detailed implementation steps, considerations, code examples, or technical approach\",\n \"testStrategy\": \"Specific steps to verify correct implementation and functionality\",\n \"dependencies\": [1, 3] // Example: IDs of tasks that must be completed before this task\n }\n \n Make sure the details and test strategy are comprehensive and specific{{#if useResearch}}, incorporating current best practices from your research{{/if}}. DO NOT include the task ID in the title.\n {{#if contextFromArgs}}{{contextFromArgs}}{{/if}}"
|
||||
}
|
||||
}
|
||||
}
|
||||
41
src/prompts/analyze-complexity.json
Normal file
41
src/prompts/analyze-complexity.json
Normal file
@@ -0,0 +1,41 @@
|
||||
{
|
||||
"id": "analyze-complexity",
|
||||
"version": "1.0.0",
|
||||
"description": "Analyze task complexity and generate expansion recommendations",
|
||||
"metadata": {
|
||||
"author": "system",
|
||||
"created": "2024-01-01T00:00:00Z",
|
||||
"updated": "2024-01-01T00:00:00Z",
|
||||
"tags": ["analysis", "complexity", "expansion", "recommendations"]
|
||||
},
|
||||
"parameters": {
|
||||
"tasks": {
|
||||
"type": "array",
|
||||
"required": true,
|
||||
"description": "Array of tasks to analyze"
|
||||
},
|
||||
"gatheredContext": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Additional project context"
|
||||
},
|
||||
"threshold": {
|
||||
"type": "number",
|
||||
"default": 5,
|
||||
"min": 1,
|
||||
"max": 10,
|
||||
"description": "Complexity threshold for expansion recommendation"
|
||||
},
|
||||
"useResearch": {
|
||||
"type": "boolean",
|
||||
"default": false,
|
||||
"description": "Use research mode for deeper analysis"
|
||||
}
|
||||
},
|
||||
"prompts": {
|
||||
"default": {
|
||||
"system": "You are an expert software architect and project manager analyzing task complexity. Respond only with the requested valid JSON array.",
|
||||
"user": "Analyze the following tasks to determine their complexity (1-10 scale) and recommend the number of subtasks for expansion. Provide a brief reasoning and an initial expansion prompt for each.{{#if useResearch}} Consider current best practices, common implementation patterns, and industry standards in your analysis.{{/if}}\n\nTasks:\n{{{json tasks}}}\n{{#if gatheredContext}}\n\n# Project Context\n\n{{gatheredContext}}\n{{/if}}\n\nRespond ONLY with a valid JSON array matching the schema:\n[\n {\n \"taskId\": <number>,\n \"taskTitle\": \"<string>\",\n \"complexityScore\": <number 1-10>,\n \"recommendedSubtasks\": <number>,\n \"expansionPrompt\": \"<string>\",\n \"reasoning\": \"<string>\"\n },\n ...\n]\n\nDo not include any explanatory text, markdown formatting, or code block markers before or after the JSON array."
|
||||
}
|
||||
}
|
||||
}
|
||||
72
src/prompts/expand-task.json
Normal file
72
src/prompts/expand-task.json
Normal file
@@ -0,0 +1,72 @@
|
||||
{
|
||||
"id": "expand-task",
|
||||
"version": "1.0.0",
|
||||
"description": "Break down a task into detailed subtasks",
|
||||
"metadata": {
|
||||
"author": "system",
|
||||
"created": "2024-01-01T00:00:00Z",
|
||||
"updated": "2024-01-01T00:00:00Z",
|
||||
"tags": ["expansion", "subtasks", "breakdown"]
|
||||
},
|
||||
"parameters": {
|
||||
"subtaskCount": {
|
||||
"type": "number",
|
||||
"required": true,
|
||||
"description": "Number of subtasks to generate"
|
||||
},
|
||||
"task": {
|
||||
"type": "object",
|
||||
"required": true,
|
||||
"description": "The task to expand"
|
||||
},
|
||||
"nextSubtaskId": {
|
||||
"type": "number",
|
||||
"required": true,
|
||||
"description": "Starting ID for new subtasks"
|
||||
},
|
||||
"useResearch": {
|
||||
"type": "boolean",
|
||||
"default": false,
|
||||
"description": "Use research mode"
|
||||
},
|
||||
"expansionPrompt": {
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"description": "Expansion prompt from complexity report"
|
||||
},
|
||||
"additionalContext": {
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"default": "",
|
||||
"description": "Additional context for task expansion"
|
||||
},
|
||||
"complexityReasoningContext": {
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"default": "",
|
||||
"description": "Complexity analysis reasoning context"
|
||||
},
|
||||
"gatheredContext": {
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"default": "",
|
||||
"description": "Gathered project context"
|
||||
}
|
||||
},
|
||||
"prompts": {
|
||||
"complexity-report": {
|
||||
"condition": "expansionPrompt",
|
||||
"system": "You are an AI assistant helping with task breakdown. Generate {{#if (gt subtaskCount 0)}}exactly {{subtaskCount}}{{else}}an appropriate number of{{/if}} subtasks based on the provided prompt and context.\nRespond ONLY with a valid JSON object containing a single key \"subtasks\" whose value is an array of the generated subtask objects.\nEach subtask object in the array must have keys: \"id\", \"title\", \"description\", \"dependencies\", \"details\", \"status\".\nEnsure the 'id' starts from {{nextSubtaskId}} and is sequential.\nEnsure 'dependencies' only reference valid prior subtask IDs generated in this response (starting from {{nextSubtaskId}}).\nEnsure 'status' is 'pending'.\nDo not include any other text or explanation.",
|
||||
"user": "{{expansionPrompt}}{{#if additionalContext}}\n\n{{additionalContext}}{{/if}}{{#if complexityReasoningContext}}\n\n{{complexityReasoningContext}}{{/if}}{{#if gatheredContext}}\n\n# Project Context\n\n{{gatheredContext}}{{/if}}"
|
||||
},
|
||||
"research": {
|
||||
"condition": "useResearch === true && !expansionPrompt",
|
||||
"system": "You are an AI assistant that responds ONLY with valid JSON objects as requested. The object should contain a 'subtasks' array.",
|
||||
"user": "Analyze the following task and break it down into {{#if (gt subtaskCount 0)}}exactly {{subtaskCount}}{{else}}an appropriate number of{{/if}} specific subtasks using your research capabilities. Assign sequential IDs starting from {{nextSubtaskId}}.\n\nParent Task:\nID: {{task.id}}\nTitle: {{task.title}}\nDescription: {{task.description}}\nCurrent details: {{#if task.details}}{{task.details}}{{else}}None{{/if}}{{#if additionalContext}}\nConsider this context: {{additionalContext}}{{/if}}{{#if complexityReasoningContext}}\nComplexity Analysis Reasoning: {{complexityReasoningContext}}{{/if}}{{#if gatheredContext}}\n\n# Project Context\n\n{{gatheredContext}}{{/if}}\n\nCRITICAL: Respond ONLY with a valid JSON object containing a single key \"subtasks\". The value must be an array of the generated subtasks, strictly matching this structure:\n\n{\n \"subtasks\": [\n {\n \"id\": <number>, // Sequential ID starting from {{nextSubtaskId}}\n \"title\": \"<string>\",\n \"description\": \"<string>\",\n \"dependencies\": [<number>], // e.g., [{{nextSubtaskId}} + 1]. If no dependencies, use an empty array [].\n \"details\": \"<string>\",\n \"testStrategy\": \"<string>\" // Optional\n },\n // ... (repeat for {{#if (gt subtaskCount 0)}}{{subtaskCount}}{{else}}appropriate number of{{/if}} subtasks)\n ]\n}\n\nImportant: For the 'dependencies' field, if a subtask has no dependencies, you MUST use an empty array, for example: \"dependencies\": []. Do not use null or omit the field.\n\nDo not include ANY explanatory text, markdown, or code block markers. Just the JSON object."
|
||||
},
|
||||
"default": {
|
||||
"system": "You are an AI assistant helping with task breakdown for software development.\nYou need to break down a high-level task into {{#if (gt subtaskCount 0)}}{{subtaskCount}}{{else}}an appropriate number of{{/if}} specific subtasks that can be implemented one by one.\n\nSubtasks should:\n1. Be specific and actionable implementation steps\n2. Follow a logical sequence\n3. Each handle a distinct part of the parent task\n4. Include clear guidance on implementation approach\n5. Have appropriate dependency chains between subtasks (using the new sequential IDs)\n6. Collectively cover all aspects of the parent task\n\nFor each subtask, provide:\n- id: Sequential integer starting from the provided nextSubtaskId\n- title: Clear, specific title\n- description: Detailed description\n- dependencies: Array of prerequisite subtask IDs (use the new sequential IDs)\n- details: Implementation details, the output should be in string\n- testStrategy: Optional testing approach\n\nRespond ONLY with a valid JSON object containing a single key \"subtasks\" whose value is an array matching the structure described. Do not include any explanatory text, markdown formatting, or code block markers.",
|
||||
"user": "Break down this task into {{#if (gt subtaskCount 0)}}exactly {{subtaskCount}}{{else}}an appropriate number of{{/if}} specific subtasks:\n\nTask ID: {{task.id}}\nTitle: {{task.title}}\nDescription: {{task.description}}\nCurrent details: {{#if task.details}}{{task.details}}{{else}}None{{/if}}{{#if additionalContext}}\nAdditional context: {{additionalContext}}{{/if}}{{#if complexityReasoningContext}}\nComplexity Analysis Reasoning: {{complexityReasoningContext}}{{/if}}{{#if gatheredContext}}\n\n# Project Context\n\n{{gatheredContext}}{{/if}}\n\nReturn ONLY the JSON object containing the \"subtasks\" array, matching this structure:\n\n{\n \"subtasks\": [\n {\n \"id\": {{nextSubtaskId}}, // First subtask ID\n \"title\": \"Specific subtask title\",\n \"description\": \"Detailed description\",\n \"dependencies\": [], // e.g., [{{nextSubtaskId}} + 1] if it depends on the next\n \"details\": \"Implementation guidance\",\n \"testStrategy\": \"Optional testing approach\"\n },\n // ... (repeat for {{#if (gt subtaskCount 0)}}a total of {{subtaskCount}}{{else}}an appropriate number of{{/if}} subtasks with sequential IDs)\n ]\n}"
|
||||
}
|
||||
}
|
||||
}
|
||||
51
src/prompts/parse-prd.json
Normal file
51
src/prompts/parse-prd.json
Normal file
@@ -0,0 +1,51 @@
|
||||
{
|
||||
"id": "parse-prd",
|
||||
"version": "1.0.0",
|
||||
"description": "Parse a Product Requirements Document into structured tasks",
|
||||
"metadata": {
|
||||
"author": "system",
|
||||
"created": "2024-01-01T00:00:00Z",
|
||||
"updated": "2024-01-01T00:00:00Z",
|
||||
"tags": ["prd", "parsing", "initialization"]
|
||||
},
|
||||
"parameters": {
|
||||
"numTasks": {
|
||||
"type": "number",
|
||||
"required": true,
|
||||
"description": "Target number of tasks to generate"
|
||||
},
|
||||
"nextId": {
|
||||
"type": "number",
|
||||
"required": true,
|
||||
"description": "Starting ID for tasks"
|
||||
},
|
||||
"research": {
|
||||
"type": "boolean",
|
||||
"default": false,
|
||||
"description": "Enable research mode for latest best practices"
|
||||
},
|
||||
"prdContent": {
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Content of the PRD file"
|
||||
},
|
||||
"prdPath": {
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Path to the PRD file"
|
||||
},
|
||||
"defaultTaskPriority": {
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"default": "medium",
|
||||
"enum": ["high", "medium", "low"],
|
||||
"description": "Default priority for generated tasks"
|
||||
}
|
||||
},
|
||||
"prompts": {
|
||||
"default": {
|
||||
"system": "You are an AI assistant specialized in analyzing Product Requirements Documents (PRDs) and generating a structured, logically ordered, dependency-aware and sequenced list of development tasks in JSON format.{{#if research}}\nBefore breaking down the PRD into tasks, you will:\n1. Research and analyze the latest technologies, libraries, frameworks, and best practices that would be appropriate for this project\n2. Identify any potential technical challenges, security concerns, or scalability issues not explicitly mentioned in the PRD without discarding any explicit requirements or going overboard with complexity -- always aim to provide the most direct path to implementation, avoiding over-engineering or roundabout approaches\n3. Consider current industry standards and evolving trends relevant to this project (this step aims to solve LLM hallucinations and out of date information due to training data cutoff dates)\n4. Evaluate alternative implementation approaches and recommend the most efficient path\n5. Include specific library versions, helpful APIs, and concrete implementation guidance based on your research\n6. Always aim to provide the most direct path to implementation, avoiding over-engineering or roundabout approaches\n\nYour task breakdown should incorporate this research, resulting in more detailed implementation guidance, more accurate dependency mapping, and more precise technology recommendations than would be possible from the PRD text alone, while maintaining all explicit requirements and best practices and all details and nuances of the PRD.{{/if}}\n\nAnalyze the provided PRD content and generate {{#if (gt numTasks 0)}}approximately {{numTasks}}{{else}}an appropriate number of{{/if}} top-level development tasks. If the complexity or the level of detail of the PRD is high, generate more tasks relative to the complexity of the PRD\nEach task should represent a logical unit of work needed to implement the requirements and focus on the most direct and effective way to implement the requirements without unnecessary complexity or overengineering. Include pseudo-code, implementation details, and test strategy for each task. Find the most up to date information to implement each task.\nAssign sequential IDs starting from {{nextId}}. Infer title, description, details, and test strategy for each task based *only* on the PRD content.\nSet status to 'pending', dependencies to an empty array [], and priority to '{{defaultTaskPriority}}' initially for all tasks.\nRespond ONLY with a valid JSON object containing a single key \"tasks\", where the value is an array of task objects adhering to the provided Zod schema. Do not include any explanation or markdown formatting.\n\nEach task should follow this JSON structure:\n{\n\t\"id\": number,\n\t\"title\": string,\n\t\"description\": string,\n\t\"status\": \"pending\",\n\t\"dependencies\": number[] (IDs of tasks this depends on),\n\t\"priority\": \"high\" | \"medium\" | \"low\",\n\t\"details\": string (implementation details),\n\t\"testStrategy\": string (validation approach)\n}\n\nGuidelines:\n1. {{#if (gt numTasks 0)}}Unless complexity warrants otherwise{{else}}Depending on the complexity{{/if}}, create {{#if (gt numTasks 0)}}exactly {{numTasks}}{{else}}an appropriate number of{{/if}} tasks, numbered sequentially starting from {{nextId}}\n2. Each task should be atomic and focused on a single responsibility following the most up to date best practices and standards\n3. Order tasks logically - consider dependencies and implementation sequence\n4. Early tasks should focus on setup, core functionality first, then advanced features\n5. Include clear validation/testing approach for each task\n6. Set appropriate dependency IDs (a task can only depend on tasks with lower IDs, potentially including existing tasks with IDs less than {{nextId}} if applicable)\n7. Assign priority (high/medium/low) based on criticality and dependency order\n8. Include detailed implementation guidance in the \"details\" field{{#if research}}, with specific libraries and version recommendations based on your research{{/if}}\n9. If the PRD contains specific requirements for libraries, database schemas, frameworks, tech stacks, or any other implementation details, STRICTLY ADHERE to these requirements in your task breakdown and do not discard them under any circumstance\n10. Focus on filling in any gaps left by the PRD or areas that aren't fully specified, while preserving all explicit requirements\n11. Always aim to provide the most direct path to implementation, avoiding over-engineering or roundabout approaches{{#if research}}\n12. For each task, include specific, actionable guidance based on current industry standards and best practices discovered through research{{/if}}",
|
||||
"user": "Here's the Product Requirements Document (PRD) to break down into {{#if (gt numTasks 0)}}approximately {{numTasks}}{{else}}an appropriate number of{{/if}} tasks, starting IDs from {{nextId}}:{{#if research}}\n\nRemember to thoroughly research current best practices and technologies before task breakdown to provide specific, actionable implementation details.{{/if}}\n\n{{prdContent}}\n\n\n\t\tReturn your response in this format:\n{\n \"tasks\": [\n {\n \"id\": 1,\n \"title\": \"Setup Project Repository\",\n \"description\": \"...\",\n ...\n },\n ...\n ],\n \"metadata\": {\n \"projectName\": \"PRD Implementation\",\n \"totalTasks\": {{#if (gt numTasks 0)}}{{numTasks}}{{else}}{number of tasks}{{/if}},\n \"sourceFile\": \"{{prdPath}}\",\n \"generatedAt\": \"YYYY-MM-DD\"\n }\n}"
|
||||
}
|
||||
}
|
||||
}
|
||||
53
src/prompts/research.json
Normal file
53
src/prompts/research.json
Normal file
@@ -0,0 +1,53 @@
|
||||
{
|
||||
"id": "research",
|
||||
"version": "1.0.0",
|
||||
"description": "Perform AI-powered research with project context",
|
||||
"metadata": {
|
||||
"author": "system",
|
||||
"created": "2024-01-01T00:00:00Z",
|
||||
"updated": "2024-01-01T00:00:00Z",
|
||||
"tags": ["research", "context-aware", "information-gathering"]
|
||||
},
|
||||
"parameters": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Research query"
|
||||
},
|
||||
"gatheredContext": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Gathered project context"
|
||||
},
|
||||
"detailLevel": {
|
||||
"type": "string",
|
||||
"enum": ["low", "medium", "high"],
|
||||
"default": "medium",
|
||||
"description": "Level of detail for the response"
|
||||
},
|
||||
"projectInfo": {
|
||||
"type": "object",
|
||||
"description": "Project information",
|
||||
"properties": {
|
||||
"root": {
|
||||
"type": "string",
|
||||
"description": "Project root path"
|
||||
},
|
||||
"taskCount": {
|
||||
"type": "number",
|
||||
"description": "Number of related tasks"
|
||||
},
|
||||
"fileCount": {
|
||||
"type": "number",
|
||||
"description": "Number of related files"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"prompts": {
|
||||
"default": {
|
||||
"system": "You are an expert AI research assistant helping with a software development project. You have access to project context including tasks, files, and project structure.\n\nYour role is to provide comprehensive, accurate, and actionable research responses based on the user's query and the provided project context.\n{{#if (eq detailLevel \"low\")}}\n**Response Style: Concise & Direct**\n- Provide brief, focused answers (2-4 paragraphs maximum)\n- Focus on the most essential information\n- Use bullet points for key takeaways\n- Avoid lengthy explanations unless critical\n- Skip pleasantries, introductions, and conclusions\n- No phrases like \"Based on your project context\" or \"I'll provide guidance\"\n- No summary outros or alignment statements\n- Get straight to the actionable information\n- Use simple, direct language - users want info, not explanation{{/if}}{{#if (eq detailLevel \"medium\")}}\n**Response Style: Balanced & Comprehensive**\n- Provide thorough but well-structured responses (4-8 paragraphs)\n- Include relevant examples and explanations\n- Balance depth with readability\n- Use headings and bullet points for organization{{/if}}{{#if (eq detailLevel \"high\")}}\n**Response Style: Detailed & Exhaustive**\n- Provide comprehensive, in-depth analysis (8+ paragraphs)\n- Include multiple perspectives and approaches\n- Provide detailed examples, code snippets, and step-by-step guidance\n- Cover edge cases and potential pitfalls\n- Use clear structure with headings, subheadings, and lists{{/if}}\n\n**Guidelines:**\n- Always consider the project context when formulating responses\n- Reference specific tasks, files, or project elements when relevant\n- Provide actionable insights that can be applied to the project\n- If the query relates to existing project tasks, suggest how the research applies to those tasks\n- Use markdown formatting for better readability\n- Be precise and avoid speculation unless clearly marked as such\n{{#if (eq detailLevel \"low\")}}\n**For LOW detail level specifically:**\n- Start immediately with the core information\n- No introductory phrases or context acknowledgments\n- No concluding summaries or project alignment statements\n- Focus purely on facts, steps, and actionable items{{/if}}",
|
||||
"user": "# Research Query\n\n{{query}}\n{{#if gatheredContext}}\n\n# Project Context\n\n{{gatheredContext}}\n{{/if}}\n\n# Instructions\n\nPlease research and provide a {{detailLevel}}-detail response to the query above. Consider the project context provided and make your response as relevant and actionable as possible for this specific project."
|
||||
}
|
||||
}
|
||||
}
|
||||
402
src/prompts/schemas/README.md
Normal file
402
src/prompts/schemas/README.md
Normal file
@@ -0,0 +1,402 @@
|
||||
# Task Master JSON Schemas
|
||||
|
||||
This directory contains JSON schemas for validating Task Master prompt templates. These schemas provide IDE support, validation, and better developer experience when working with prompt templates.
|
||||
|
||||
## Overview
|
||||
|
||||
The schema system provides:
|
||||
- **Structural Validation**: Ensures all required fields and proper JSON structure
|
||||
- **Type Safety**: Validates parameter types and value constraints
|
||||
- **IDE Integration**: IntelliSense and auto-completion in VS Code
|
||||
- **Development Safety**: Catches errors before runtime
|
||||
- **Documentation**: Self-documenting templates through schema definitions
|
||||
|
||||
## Schema Files
|
||||
|
||||
### `prompt-template.schema.json` (Main Schema)
|
||||
**Version**: 1.0.0
|
||||
**Purpose**: Main schema for Task Master prompt template files
|
||||
|
||||
**Validates**:
|
||||
- Template metadata (id, version, description)
|
||||
- Parameter definitions with comprehensive type validation
|
||||
- Prompt variants with conditional logic
|
||||
- Cross-references between parameters and template variables
|
||||
- Semantic versioning compliance
|
||||
- Handlebars template syntax
|
||||
|
||||
**Required Fields**:
|
||||
- `id`: Unique template identifier (kebab-case)
|
||||
- `version`: Semantic version (e.g., "1.0.0")
|
||||
- `description`: Human-readable description
|
||||
- `prompts.default`: Default prompt variant
|
||||
|
||||
**Optional Fields**:
|
||||
- `metadata`: Additional template information
|
||||
- `parameters`: Parameter definitions for template variables
|
||||
- `prompts.*`: Additional prompt variants
|
||||
|
||||
### `parameter.schema.json` (Parameter Schema)
|
||||
**Version**: 1.0.0
|
||||
**Purpose**: Reusable schema for individual prompt parameters
|
||||
|
||||
**Supports**:
|
||||
- **Type Validation**: `string`, `number`, `boolean`, `array`, `object`
|
||||
- **Constraints**: Required/optional parameters, default values
|
||||
- **String Validation**: Pattern matching (regex), enum constraints
|
||||
- **Numeric Validation**: Minimum/maximum values, integer constraints
|
||||
- **Array Validation**: Item types, minimum/maximum length
|
||||
- **Object Validation**: Property definitions and required fields
|
||||
|
||||
**Parameter Properties**:
|
||||
```json
|
||||
{
|
||||
"type": "string|number|boolean|array|object",
|
||||
"required": true|false,
|
||||
"default": "any value matching type",
|
||||
"description": "Parameter documentation",
|
||||
"enum": ["option1", "option2"],
|
||||
"pattern": "^regex$",
|
||||
"minimum": 0,
|
||||
"maximum": 100,
|
||||
"minLength": 1,
|
||||
"maxLength": 255,
|
||||
"items": { "type": "string" },
|
||||
"properties": { "key": { "type": "string" } }
|
||||
}
|
||||
```
|
||||
|
||||
### `variant.schema.json` (Variant Schema)
|
||||
**Version**: 1.0.0
|
||||
**Purpose**: Schema for prompt template variants
|
||||
|
||||
**Validates**:
|
||||
- System and user prompt templates
|
||||
- Conditional expressions for variant selection
|
||||
- Variable placeholders using Handlebars syntax
|
||||
- Variant metadata and descriptions
|
||||
|
||||
**Variant Structure**:
|
||||
```json
|
||||
{
|
||||
"condition": "JavaScript expression",
|
||||
"system": "System prompt template",
|
||||
"user": "User prompt template",
|
||||
"metadata": {
|
||||
"description": "When to use this variant"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Schema Validation Rules
|
||||
|
||||
### Template ID Validation
|
||||
- **Pattern**: `^[a-z][a-z0-9-]*[a-z0-9]$`
|
||||
- **Format**: Kebab-case, alphanumeric with hyphens
|
||||
- **Examples**:
|
||||
- ✅ `add-task`, `parse-prd`, `analyze-complexity`
|
||||
- ❌ `AddTask`, `add_task`, `-invalid-`, `task-`
|
||||
|
||||
### Version Validation
|
||||
- **Pattern**: Semantic versioning (semver)
|
||||
- **Format**: `MAJOR.MINOR.PATCH`
|
||||
- **Examples**:
|
||||
- ✅ `1.0.0`, `2.1.3`, `10.0.0`
|
||||
- ❌ `1.0`, `v1.0.0`, `1.0.0-beta`
|
||||
|
||||
### Parameter Type Validation
|
||||
- **String**: Text values with optional pattern/enum constraints
|
||||
- **Number**: Numeric values with optional min/max constraints
|
||||
- **Boolean**: True/false values
|
||||
- **Array**: Lists with optional item type validation
|
||||
- **Object**: Complex structures with property definitions
|
||||
|
||||
### Template Variable Validation
|
||||
- **Handlebars Syntax**: `{{variable}}`, `{{#if condition}}`, `{{#each array}}`
|
||||
- **Parameter References**: All template variables must have corresponding parameters
|
||||
- **Nested Access**: Support for `{{object.property}}` notation
|
||||
- **Special Variables**: `{{@index}}`, `{{@first}}`, `{{@last}}` in loops
|
||||
|
||||
## IDE Integration
|
||||
|
||||
### VS Code Setup
|
||||
The VS Code profile automatically configures schema validation:
|
||||
|
||||
```json
|
||||
{
|
||||
"json.schemas": [
|
||||
{
|
||||
"fileMatch": [
|
||||
"src/prompts/**/*.json",
|
||||
".taskmaster/prompts/**/*.json",
|
||||
"prompts/**/*.json"
|
||||
],
|
||||
"url": "./src/prompts/schemas/prompt-template.schema.json"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Features Provided**:
|
||||
- **Auto-completion**: IntelliSense for all schema properties
|
||||
- **Real-time Validation**: Immediate error highlighting
|
||||
- **Hover Documentation**: Parameter descriptions on hover
|
||||
- **Error Messages**: Detailed validation error explanations
|
||||
|
||||
### Other IDEs
|
||||
For other development environments:
|
||||
|
||||
**Schema URLs**:
|
||||
- **Local Development**: `./src/prompts/schemas/prompt-template.schema.json`
|
||||
- **GitHub Reference**: `https://github.com/eyaltoledano/claude-task-master/blob/main/src/prompts/schemas/prompt-template.schema.json`
|
||||
|
||||
**File Patterns**:
|
||||
- `src/prompts/**/*.json`
|
||||
- `.taskmaster/prompts/**/*.json`
|
||||
- `prompts/**/*.json`
|
||||
|
||||
## Validation Examples
|
||||
|
||||
### Valid Template Example
|
||||
```json
|
||||
{
|
||||
"id": "example-prompt",
|
||||
"version": "1.0.0",
|
||||
"description": "Example prompt template with comprehensive validation",
|
||||
"metadata": {
|
||||
"author": "Task Master Team",
|
||||
"category": "task",
|
||||
"tags": ["example", "validation"]
|
||||
},
|
||||
"parameters": {
|
||||
"taskDescription": {
|
||||
"type": "string",
|
||||
"description": "Description of the task to perform",
|
||||
"required": true,
|
||||
"minLength": 5,
|
||||
"maxLength": 500
|
||||
},
|
||||
"priority": {
|
||||
"type": "string",
|
||||
"description": "Task priority level",
|
||||
"required": false,
|
||||
"enum": ["high", "medium", "low"],
|
||||
"default": "medium"
|
||||
},
|
||||
"maxTokens": {
|
||||
"type": "number",
|
||||
"description": "Maximum tokens for response",
|
||||
"required": false,
|
||||
"minimum": 100,
|
||||
"maximum": 4000,
|
||||
"default": 1000
|
||||
},
|
||||
"useResearch": {
|
||||
"type": "boolean",
|
||||
"description": "Whether to include research context",
|
||||
"required": false,
|
||||
"default": false
|
||||
},
|
||||
"tags": {
|
||||
"type": "array",
|
||||
"description": "Task tags for categorization",
|
||||
"required": false,
|
||||
"items": {
|
||||
"type": "string",
|
||||
"pattern": "^[a-z][a-z0-9-]*$"
|
||||
}
|
||||
}
|
||||
},
|
||||
"prompts": {
|
||||
"default": {
|
||||
"system": "You are a helpful AI assistant that creates tasks with {{priority}} priority.",
|
||||
"user": "Create a task: {{taskDescription}}{{#if tags}}\nTags: {{#each tags}}{{this}}{{#unless @last}}, {{/unless}}{{/each}}{{/if}}"
|
||||
},
|
||||
"research": {
|
||||
"condition": "useResearch === true",
|
||||
"system": "You are a research-focused AI assistant with access to current information.",
|
||||
"user": "Research and create a task: {{taskDescription}}"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Common Validation Errors
|
||||
|
||||
**Missing Required Fields**:
|
||||
```json
|
||||
// ❌ Error: Missing required 'id' field
|
||||
{
|
||||
"version": "1.0.0",
|
||||
"description": "Missing ID"
|
||||
}
|
||||
```
|
||||
|
||||
**Invalid ID Format**:
|
||||
```json
|
||||
// ❌ Error: ID must be kebab-case
|
||||
{
|
||||
"id": "InvalidID_Format",
|
||||
"version": "1.0.0"
|
||||
}
|
||||
```
|
||||
|
||||
**Parameter Type Mismatch**:
|
||||
```json
|
||||
// ❌ Error: Parameter type doesn't match usage
|
||||
{
|
||||
"parameters": {
|
||||
"count": { "type": "string" }
|
||||
},
|
||||
"prompts": {
|
||||
"default": {
|
||||
"user": "Process {{count}} items" // Should be number for counting
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Invalid Condition Syntax**:
|
||||
```json
|
||||
// ❌ Error: Invalid JavaScript in condition
|
||||
{
|
||||
"prompts": {
|
||||
"variant": {
|
||||
"condition": "useResearch = true", // Should be ===
|
||||
"user": "Research prompt"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Development Workflow
|
||||
|
||||
### Creating New Templates
|
||||
1. **Start with Schema**: Use VS Code with schema validation enabled
|
||||
2. **Define Structure**: Begin with required fields (id, version, description)
|
||||
3. **Add Parameters**: Define all template variables with proper types
|
||||
4. **Create Prompts**: Write system and user prompts with template variables
|
||||
5. **Test Validation**: Ensure template validates without errors
|
||||
6. **Add Variants**: Create additional variants if needed
|
||||
7. **Document Usage**: Update the main README with template details
|
||||
|
||||
### Modifying Existing Templates
|
||||
1. **Check Current Version**: Note the current version number
|
||||
2. **Assess Changes**: Determine if changes are breaking or non-breaking
|
||||
3. **Update Version**: Increment version following semantic versioning
|
||||
4. **Maintain Compatibility**: Avoid breaking existing parameter contracts
|
||||
5. **Test Thoroughly**: Verify all existing code still works
|
||||
6. **Update Documentation**: Reflect changes in README files
|
||||
|
||||
### Schema Evolution
|
||||
When updating schemas themselves:
|
||||
|
||||
1. **Backward Compatibility**: Ensure existing templates remain valid
|
||||
2. **Version Increment**: Update schema version in `$id` and `version` fields
|
||||
3. **Test Migration**: Validate all existing templates against new schema
|
||||
4. **Document Changes**: Update this README with schema changes
|
||||
5. **Coordinate Release**: Ensure schema and template changes are synchronized
|
||||
|
||||
## Advanced Validation Features
|
||||
|
||||
### Cross-Reference Validation
|
||||
The schema validates that:
|
||||
- All template variables have corresponding parameters
|
||||
- Parameter types match their usage in templates
|
||||
- Variant conditions reference valid parameters
|
||||
- Nested property access is properly defined
|
||||
|
||||
### Conditional Validation
|
||||
- **Dynamic Schemas**: Different validation rules based on parameter values
|
||||
- **Variant Conditions**: JavaScript expression validation
|
||||
- **Template Syntax**: Handlebars syntax validation
|
||||
- **Parameter Dependencies**: Required parameters based on other parameters
|
||||
|
||||
### Custom Validation Rules
|
||||
The schema includes custom validation for:
|
||||
- **Semantic Versioning**: Proper version format validation
|
||||
- **Template Variables**: Handlebars syntax and parameter references
|
||||
- **Condition Expressions**: JavaScript expression syntax validation
|
||||
- **File Patterns**: Consistent naming conventions
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
### Schema Loading
|
||||
- **Caching**: Schemas are loaded once and cached
|
||||
- **Lazy Loading**: Validation only occurs when templates are accessed
|
||||
- **Memory Efficiency**: Shared schema instances across templates
|
||||
|
||||
### Validation Performance
|
||||
- **Fast Validation**: AJV provides optimized validation
|
||||
- **Error Batching**: Multiple errors reported in single validation pass
|
||||
- **Minimal Overhead**: Validation adds minimal runtime cost
|
||||
|
||||
### Development Impact
|
||||
- **IDE Responsiveness**: Real-time validation without performance impact
|
||||
- **Build Time**: Schema validation during development, not production
|
||||
- **Testing Speed**: Fast validation during test execution
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Schema Issues
|
||||
|
||||
**Schema Not Loading**:
|
||||
- Check file paths in VS Code settings
|
||||
- Verify schema files exist and are valid JSON
|
||||
- Restart VS Code if changes aren't recognized
|
||||
|
||||
**Validation Not Working**:
|
||||
- Ensure `ajv` and `ajv-formats` dependencies are installed
|
||||
- Check for JSON syntax errors in templates
|
||||
- Verify schema file paths are correct
|
||||
|
||||
**Performance Issues**:
|
||||
- Check for circular references in schemas
|
||||
- Verify schema caching is working
|
||||
- Monitor validation frequency in development
|
||||
|
||||
### Debugging Validation Errors
|
||||
|
||||
**Understanding Error Messages**:
|
||||
```javascript
|
||||
// Example error output
|
||||
{
|
||||
"instancePath": "/parameters/priority/type",
|
||||
"schemaPath": "#/properties/parameters/additionalProperties/properties/type/enum",
|
||||
"keyword": "enum",
|
||||
"params": { "allowedValues": ["string", "number", "boolean", "array", "object"] },
|
||||
"message": "must be equal to one of the allowed values"
|
||||
}
|
||||
```
|
||||
|
||||
**Common Error Patterns**:
|
||||
- `instancePath`: Shows where in the template the error occurred
|
||||
- `schemaPath`: Shows which schema rule was violated
|
||||
- `keyword`: Indicates the type of validation that failed
|
||||
- `params`: Provides additional context about the validation rule
|
||||
- `message`: Human-readable description of the error
|
||||
|
||||
### Getting Help
|
||||
|
||||
**Internal Resources**:
|
||||
- Main prompt README: `src/prompts/README.md`
|
||||
- Schema files: `src/prompts/schemas/*.json`
|
||||
- PromptManager code: `scripts/modules/prompt-manager.js`
|
||||
|
||||
**External Resources**:
|
||||
- JSON Schema documentation: https://json-schema.org/
|
||||
- AJV validation library: https://ajv.js.org/
|
||||
- Handlebars template syntax: https://handlebarsjs.com/
|
||||
|
||||
## Schema URLs and References
|
||||
|
||||
### Current Schema Locations
|
||||
- **Local Development**: `./src/prompts/schemas/prompt-template.schema.json`
|
||||
- **GitHub Blob**: `https://github.com/eyaltoledano/claude-task-master/blob/main/src/prompts/schemas/prompt-template.schema.json`
|
||||
- **Schema ID**: Used for internal references and validation
|
||||
|
||||
### URL Usage Guidelines
|
||||
- **`$id` Field**: Use GitHub blob URLs for stable schema identification
|
||||
- **Local References**: Use relative paths for development and testing
|
||||
- **External Tools**: GitHub blob URLs provide stable, version-controlled access
|
||||
- **Documentation**: Link to GitHub for public schema access
|
||||
48
src/prompts/schemas/parameter.schema.json
Normal file
48
src/prompts/schemas/parameter.schema.json
Normal file
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"$id": "https://github.com/eyaltoledano/claude-task-master/blob/main/src/prompts/schemas/parameter.schema.json",
|
||||
"version": "1.0.0",
|
||||
"title": "Task Master Prompt Parameter",
|
||||
"description": "Schema for individual prompt template parameters",
|
||||
"type": "object",
|
||||
"required": ["type", "description"],
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"enum": ["string", "number", "boolean", "array", "object"],
|
||||
"description": "The expected data type for this parameter"
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
"description": "Human-readable description of the parameter"
|
||||
},
|
||||
"required": {
|
||||
"type": "boolean",
|
||||
"default": false,
|
||||
"description": "Whether this parameter is required"
|
||||
},
|
||||
"default": {
|
||||
"description": "Default value for optional parameters"
|
||||
},
|
||||
"enum": {
|
||||
"type": "array",
|
||||
"description": "Valid values for string parameters",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"pattern": {
|
||||
"type": "string",
|
||||
"description": "Regular expression pattern for string validation"
|
||||
},
|
||||
"minimum": {
|
||||
"type": "number",
|
||||
"description": "Minimum value for number parameters"
|
||||
},
|
||||
"maximum": {
|
||||
"type": "number",
|
||||
"description": "Maximum value for number parameters"
|
||||
}
|
||||
}
|
||||
}
|
||||
136
src/prompts/schemas/prompt-template.schema.json
Normal file
136
src/prompts/schemas/prompt-template.schema.json
Normal file
@@ -0,0 +1,136 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"$id": "https://github.com/eyaltoledano/claude-task-master/blob/main/src/prompts/schemas/prompt-template.schema.json",
|
||||
"version": "1.0.0",
|
||||
"title": "Task Master Prompt Template",
|
||||
"description": "Schema for Task Master AI prompt template files",
|
||||
"type": "object",
|
||||
"required": ["id", "version", "description", "prompts"],
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"pattern": "^[a-z0-9-]+$",
|
||||
"description": "Unique identifier for the prompt template"
|
||||
},
|
||||
"version": {
|
||||
"type": "string",
|
||||
"pattern": "^\\d+\\.\\d+\\.\\d+$",
|
||||
"description": "Semantic version of the prompt template"
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
"description": "Brief description of what this prompt does"
|
||||
},
|
||||
"metadata": {
|
||||
"$ref": "#/definitions/metadata"
|
||||
},
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"$ref": "#/definitions/parameter"
|
||||
}
|
||||
},
|
||||
"prompts": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"default": {
|
||||
"$ref": "#/definitions/promptVariant"
|
||||
}
|
||||
},
|
||||
"additionalProperties": {
|
||||
"$ref": "#/definitions/conditionalPromptVariant"
|
||||
}
|
||||
}
|
||||
},
|
||||
"definitions": {
|
||||
"parameter": {
|
||||
"type": "object",
|
||||
"required": ["type", "description"],
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"enum": ["string", "number", "boolean", "array", "object"]
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"minLength": 1
|
||||
},
|
||||
"required": {
|
||||
"type": "boolean",
|
||||
"default": false
|
||||
},
|
||||
"default": {
|
||||
"description": "Default value for optional parameters"
|
||||
},
|
||||
"enum": {
|
||||
"type": "array",
|
||||
"description": "Valid values for string parameters"
|
||||
},
|
||||
"pattern": {
|
||||
"type": "string",
|
||||
"description": "Regular expression pattern for string validation"
|
||||
},
|
||||
"minimum": {
|
||||
"type": "number",
|
||||
"description": "Minimum value for number parameters"
|
||||
},
|
||||
"maximum": {
|
||||
"type": "number",
|
||||
"description": "Maximum value for number parameters"
|
||||
}
|
||||
}
|
||||
},
|
||||
"promptVariant": {
|
||||
"type": "object",
|
||||
"required": ["system", "user"],
|
||||
"properties": {
|
||||
"system": {
|
||||
"type": "string",
|
||||
"minLength": 1
|
||||
},
|
||||
"user": {
|
||||
"type": "string",
|
||||
"minLength": 1
|
||||
}
|
||||
}
|
||||
},
|
||||
"conditionalPromptVariant": {
|
||||
"allOf": [
|
||||
{ "$ref": "#/definitions/promptVariant" },
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"condition": {
|
||||
"type": "string",
|
||||
"description": "JavaScript expression for variant selection"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"metadata": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"author": { "type": "string" },
|
||||
"created": { "type": "string", "format": "date-time" },
|
||||
"updated": { "type": "string", "format": "date-time" },
|
||||
"tags": {
|
||||
"type": "array",
|
||||
"items": { "type": "string" }
|
||||
},
|
||||
"category": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"task",
|
||||
"analysis",
|
||||
"research",
|
||||
"parsing",
|
||||
"update",
|
||||
"expansion"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
39
src/prompts/schemas/variant.schema.json
Normal file
39
src/prompts/schemas/variant.schema.json
Normal file
@@ -0,0 +1,39 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"$id": "https://github.com/eyaltoledano/claude-task-master/blob/main/src/prompts/schemas/variant.schema.json",
|
||||
"version": "1.0.0",
|
||||
"title": "Task Master Prompt Variant",
|
||||
"description": "Schema for prompt template variants",
|
||||
"type": "object",
|
||||
"required": ["system", "user"],
|
||||
"properties": {
|
||||
"system": {
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
"description": "System prompt template with variable placeholders"
|
||||
},
|
||||
"user": {
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
"description": "User prompt template with variable placeholders"
|
||||
},
|
||||
"condition": {
|
||||
"type": "string",
|
||||
"description": "JavaScript expression for variant selection (optional, only for non-default variants)"
|
||||
},
|
||||
"metadata": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "Description of when this variant should be used"
|
||||
},
|
||||
"tags": {
|
||||
"type": "array",
|
||||
"items": { "type": "string" },
|
||||
"description": "Tags for categorizing this variant"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
55
src/prompts/update-subtask.json
Normal file
55
src/prompts/update-subtask.json
Normal file
@@ -0,0 +1,55 @@
|
||||
{
|
||||
"id": "update-subtask",
|
||||
"version": "1.0.0",
|
||||
"description": "Append information to a subtask by generating only new content",
|
||||
"metadata": {
|
||||
"author": "system",
|
||||
"created": "2024-01-01T00:00:00Z",
|
||||
"updated": "2024-01-01T00:00:00Z",
|
||||
"tags": ["update", "subtask", "append", "logging"]
|
||||
},
|
||||
"parameters": {
|
||||
"parentTask": {
|
||||
"type": "object",
|
||||
"required": true,
|
||||
"description": "The parent task context"
|
||||
},
|
||||
"prevSubtask": {
|
||||
"type": "object",
|
||||
"required": false,
|
||||
"description": "The previous subtask if any"
|
||||
},
|
||||
"nextSubtask": {
|
||||
"type": "object",
|
||||
"required": false,
|
||||
"description": "The next subtask if any"
|
||||
},
|
||||
"currentDetails": {
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"default": "(No existing details)",
|
||||
"description": "Current subtask details"
|
||||
},
|
||||
"updatePrompt": {
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "User request for what to add"
|
||||
},
|
||||
"useResearch": {
|
||||
"type": "boolean",
|
||||
"default": false,
|
||||
"description": "Use research mode"
|
||||
},
|
||||
"gatheredContext": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Additional project context"
|
||||
}
|
||||
},
|
||||
"prompts": {
|
||||
"default": {
|
||||
"system": "You are an AI assistant helping to update a subtask. You will be provided with the subtask's existing details, context about its parent and sibling tasks, and a user request string.{{#if useResearch}} You have access to current best practices and latest technical information to provide research-backed updates.{{/if}}\n\nYour Goal: Based *only* on the user's request and all the provided context (including existing details if relevant to the request), GENERATE the new text content that should be added to the subtask's details.\nFocus *only* on generating the substance of the update.\n\nOutput Requirements:\n1. Return *only* the newly generated text content as a plain string. Do NOT return a JSON object or any other structured data.\n2. Your string response should NOT include any of the subtask's original details, unless the user's request explicitly asks to rephrase, summarize, or directly modify existing text.\n3. Do NOT include any timestamps, XML-like tags, markdown, or any other special formatting in your string response.\n4. Ensure the generated text is concise yet complete for the update based on the user request. Avoid conversational fillers or explanations about what you are doing (e.g., do not start with \"Okay, here's the update...\").{{#if useResearch}}\n5. Include specific libraries, versions, and current best practices relevant to the subtask implementation.\n6. Provide research-backed technical recommendations and proven approaches.{{/if}}",
|
||||
"user": "Task Context:\n\nParent Task: {{{json parentTask}}}\n{{#if prevSubtask}}Previous Subtask: {{{json prevSubtask}}}\n{{/if}}{{#if nextSubtask}}Next Subtask: {{{json nextSubtask}}}\n{{/if}}Current Subtask Details (for context only):\n{{currentDetails}}\n\nUser Request: \"{{updatePrompt}}\"\n\n{{#if useResearch}}Research and incorporate current best practices, latest stable versions, and proven approaches into your update. {{/if}}Based on the User Request and all the Task Context (including current subtask details provided above), what is the new information or text that should be appended to this subtask's details? Return ONLY this new text as a plain string.{{#if useResearch}} Include specific technical recommendations based on current industry standards.{{/if}}\n{{#if gatheredContext}}\n\n# Additional Project Context\n\n{{gatheredContext}}\n{{/if}}"
|
||||
}
|
||||
}
|
||||
}
|
||||
59
src/prompts/update-task.json
Normal file
59
src/prompts/update-task.json
Normal file
@@ -0,0 +1,59 @@
|
||||
{
|
||||
"id": "update-task",
|
||||
"version": "1.0.0",
|
||||
"description": "Update a single task with new information, supporting full updates and append mode",
|
||||
"metadata": {
|
||||
"author": "system",
|
||||
"created": "2024-01-01T00:00:00Z",
|
||||
"updated": "2024-01-01T00:00:00Z",
|
||||
"tags": ["update", "single-task", "modification", "append"]
|
||||
},
|
||||
"parameters": {
|
||||
"task": {
|
||||
"type": "object",
|
||||
"required": true,
|
||||
"description": "The task to update"
|
||||
},
|
||||
"taskJson": {
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "JSON string representation of the task"
|
||||
},
|
||||
"updatePrompt": {
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Description of changes to apply"
|
||||
},
|
||||
"appendMode": {
|
||||
"type": "boolean",
|
||||
"default": false,
|
||||
"description": "Whether to append to details or do full update"
|
||||
},
|
||||
"useResearch": {
|
||||
"type": "boolean",
|
||||
"default": false,
|
||||
"description": "Use research mode"
|
||||
},
|
||||
"currentDetails": {
|
||||
"type": "string",
|
||||
"default": "(No existing details)",
|
||||
"description": "Current task details for context"
|
||||
},
|
||||
"gatheredContext": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Additional project context"
|
||||
}
|
||||
},
|
||||
"prompts": {
|
||||
"default": {
|
||||
"system": "You are an AI assistant helping to update a software development task based on new context.{{#if useResearch}} You have access to current best practices and latest technical information to provide research-backed updates.{{/if}}\nYou will be given a task and a prompt describing changes or new implementation details.\nYour job is to update the task to reflect these changes, while preserving its basic structure.\n\nGuidelines:\n1. VERY IMPORTANT: NEVER change the title of the task - keep it exactly as is\n2. Maintain the same ID, status, and dependencies unless specifically mentioned in the prompt{{#if useResearch}}\n3. Research and update the description, details, and test strategy with current best practices\n4. Include specific versions, libraries, and approaches that are current and well-tested{{/if}}{{#if (not useResearch)}}\n3. Update the description, details, and test strategy to reflect the new information\n4. Do not change anything unnecessarily - just adapt what needs to change based on the prompt{{/if}}\n5. Return a complete valid JSON object representing the updated task\n6. VERY IMPORTANT: Preserve all subtasks marked as \"done\" or \"completed\" - do not modify their content\n7. For tasks with completed subtasks, build upon what has already been done rather than rewriting everything\n8. If an existing completed subtask needs to be changed/undone based on the new context, DO NOT modify it directly\n9. Instead, add a new subtask that clearly indicates what needs to be changed or replaced\n10. Use the existence of completed subtasks as an opportunity to make new subtasks more specific and targeted\n11. Ensure any new subtasks have unique IDs that don't conflict with existing ones\n12. CRITICAL: For subtask IDs, use ONLY numeric values (1, 2, 3, etc.) NOT strings (\"1\", \"2\", \"3\")\n13. CRITICAL: Subtask IDs should start from 1 and increment sequentially (1, 2, 3...) - do NOT use parent task ID as prefix{{#if useResearch}}\n14. Include links to documentation or resources where helpful\n15. Focus on practical, implementable solutions using current technologies{{/if}}\n\nThe changes described in the prompt should be thoughtfully applied to make the task more accurate and actionable.",
|
||||
"user": "Here is the task to update{{#if useResearch}} with research-backed information{{/if}}:\n{{{taskJson}}}\n\nPlease {{#if useResearch}}research and {{/if}}update this task based on the following {{#if useResearch}}context:\n{{updatePrompt}}\n\nIncorporate current best practices, latest stable versions, and proven approaches.{{/if}}{{#if (not useResearch)}}new context:\n{{updatePrompt}}{{/if}}\n\nIMPORTANT: {{#if useResearch}}Preserve any subtasks marked as \"done\" or \"completed\".{{/if}}{{#if (not useResearch)}}In the task JSON above, any subtasks with \"status\": \"done\" or \"status\": \"completed\" should be preserved exactly as is. Build your changes around these completed items.{{/if}}\n{{#if gatheredContext}}\n\n# Project Context\n\n{{gatheredContext}}\n{{/if}}\n\nReturn only the updated task as a valid JSON object{{#if useResearch}} with research-backed improvements{{/if}}."
|
||||
},
|
||||
"append": {
|
||||
"condition": "appendMode === true",
|
||||
"system": "You are an AI assistant helping to append additional information to a software development task. You will be provided with the task's existing details, context, and a user request string.\n\nYour Goal: Based *only* on the user's request and all the provided context (including existing details if relevant to the request), GENERATE the new text content that should be added to the task's details.\nFocus *only* on generating the substance of the update.\n\nOutput Requirements:\n1. Return *only* the newly generated text content as a plain string. Do NOT return a JSON object or any other structured data.\n2. Your string response should NOT include any of the task's original details, unless the user's request explicitly asks to rephrase, summarize, or directly modify existing text.\n3. Do NOT include any timestamps, XML-like tags, markdown, or any other special formatting in your string response.\n4. Ensure the generated text is concise yet complete for the update based on the user request. Avoid conversational fillers or explanations about what you are doing (e.g., do not start with \"Okay, here's the update...\").",
|
||||
"user": "Task Context:\n\nTask: {{{json task}}}\nCurrent Task Details (for context only):\n{{currentDetails}}\n\nUser Request: \"{{updatePrompt}}\"\n\nBased on the User Request and all the Task Context (including current task details provided above), what is the new information or text that should be appended to this task's details? Return ONLY this new text as a plain string.\n{{#if gatheredContext}}\n\n# Additional Project Context\n\n{{gatheredContext}}\n{{/if}}"
|
||||
}
|
||||
}
|
||||
}
|
||||
38
src/prompts/update-tasks.json
Normal file
38
src/prompts/update-tasks.json
Normal file
@@ -0,0 +1,38 @@
|
||||
{
|
||||
"id": "update-tasks",
|
||||
"version": "1.0.0",
|
||||
"description": "Update multiple tasks based on new context or changes",
|
||||
"metadata": {
|
||||
"author": "system",
|
||||
"created": "2024-01-01T00:00:00Z",
|
||||
"updated": "2024-01-01T00:00:00Z",
|
||||
"tags": ["update", "bulk", "context-change"]
|
||||
},
|
||||
"parameters": {
|
||||
"tasks": {
|
||||
"type": "array",
|
||||
"required": true,
|
||||
"description": "Array of tasks to update"
|
||||
},
|
||||
"updatePrompt": {
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Description of changes to apply"
|
||||
},
|
||||
"useResearch": {
|
||||
"type": "boolean",
|
||||
"default": false,
|
||||
"description": "Use research mode"
|
||||
},
|
||||
"projectContext": {
|
||||
"type": "string",
|
||||
"description": "Additional project context"
|
||||
}
|
||||
},
|
||||
"prompts": {
|
||||
"default": {
|
||||
"system": "You are an AI assistant helping to update software development tasks based on new context.\nYou will be given a set of tasks and a prompt describing changes or new implementation details.\nYour job is to update the tasks to reflect these changes, while preserving their basic structure.\n\nGuidelines:\n1. Maintain the same IDs, statuses, and dependencies unless specifically mentioned in the prompt\n2. Update titles, descriptions, details, and test strategies to reflect the new information\n3. Do not change anything unnecessarily - just adapt what needs to change based on the prompt\n4. You should return ALL the tasks in order, not just the modified ones\n5. Return a complete valid JSON object with the updated tasks array\n6. VERY IMPORTANT: Preserve all subtasks marked as \"done\" or \"completed\" - do not modify their content\n7. For tasks with completed subtasks, build upon what has already been done rather than rewriting everything\n8. If an existing completed subtask needs to be changed/undone based on the new context, DO NOT modify it directly\n9. Instead, add a new subtask that clearly indicates what needs to be changed or replaced\n10. Use the existence of completed subtasks as an opportunity to make new subtasks more specific and targeted\n\nThe changes described in the prompt should be applied to ALL tasks in the list.",
|
||||
"user": "Here are the tasks to update:\n{{{json tasks}}}\n\nPlease update these tasks based on the following new context:\n{{updatePrompt}}\n\nIMPORTANT: In the tasks JSON above, any subtasks with \"status\": \"done\" or \"status\": \"completed\" should be preserved exactly as is. Build your changes around these completed items.{{#if projectContext}}\n\n# Project Context\n\n{{projectContext}}{{/if}}\n\nReturn only the updated tasks as a valid JSON array."
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -218,7 +218,16 @@ export function initTaskMaster(overrides = {}) {
|
||||
);
|
||||
}
|
||||
|
||||
// Remaining paths - only resolve if key exists in overrides
|
||||
// Always set default paths first
|
||||
// These can be overridden below if needed
|
||||
paths.configPath = path.join(paths.projectRoot, TASKMASTER_CONFIG_FILE);
|
||||
paths.statePath = path.join(
|
||||
paths.taskMasterDir || path.join(paths.projectRoot, TASKMASTER_DIR),
|
||||
'state.json'
|
||||
);
|
||||
paths.tasksPath = path.join(paths.projectRoot, TASKMASTER_TASKS_FILE);
|
||||
|
||||
// Handle overrides - only validate/resolve if explicitly provided
|
||||
if ('configPath' in overrides) {
|
||||
paths.configPath = resolvePath(
|
||||
'config file',
|
||||
|
||||
255
tests/manual/prompts/README.md
Normal file
255
tests/manual/prompts/README.md
Normal file
@@ -0,0 +1,255 @@
|
||||
# Task Master Prompt Template Testing
|
||||
|
||||
This directory contains comprehensive testing tools for Task Master's centralized prompt template system.
|
||||
|
||||
## Interactive Menu System (Recommended)
|
||||
|
||||
The test script now includes an interactive menu system for easy testing and exploration:
|
||||
|
||||
```bash
|
||||
node prompt-test.js
|
||||
```
|
||||
|
||||
### Menu Features
|
||||
|
||||
**Main Menu Options:**
|
||||
1. **Test specific prompt template** - Choose individual templates and variants
|
||||
2. **Run all tests** - Execute the full test suite
|
||||
3. **Toggle full prompt display** - Switch between preview and full prompt output (default: ON)
|
||||
4. **Generate HTML report** - Create a professional HTML report and open in browser
|
||||
5. **Exit** - Close the application
|
||||
|
||||
**Template Selection:**
|
||||
- Choose from 8 available prompt templates
|
||||
- See available variants for each template
|
||||
- Test individual variants or all variants at once
|
||||
|
||||
**Interactive Flow:**
|
||||
- Select template → Select variant → View results → Choose next action
|
||||
- Easy navigation back to previous menus
|
||||
- Color-coded output for better readability
|
||||
|
||||
## Batch Mode Options
|
||||
|
||||
### Run All Tests (Batch)
|
||||
```bash
|
||||
node prompt-test.js --batch
|
||||
```
|
||||
Runs all tests non-interactively and exits with appropriate status code.
|
||||
|
||||
### Generate HTML Report
|
||||
```bash
|
||||
node prompt-test.js --html
|
||||
```
|
||||
Generates a professional HTML report with all test results and full prompt content. The report includes:
|
||||
- **Test summary dashboard** with pass/fail statistics at the top
|
||||
- **Compact single-line format** - Each template shows: `template: [variant ✓] [variant ✗] - x/y passed`
|
||||
- **Individual pass/fail badges** - Visual ✓/✗ indicators for each variant test result
|
||||
- **Template status summary** - Shows x/y passed count at the end of each line
|
||||
- **Separate error condition section** - Tests for missing parameters, invalid variants, nonexistent templates
|
||||
- **Alphabetically sorted** - Templates and variants are sorted for predictable ordering
|
||||
- **Space-efficient layout** - Optimized for developer review with minimal vertical space
|
||||
- **Two-section layout**:
|
||||
1. **Prompt Templates** - Real template variants testing
|
||||
2. **Error Condition Tests** - Error handling validation (empty-prompt, missing-parameters, invalid-variant, etc.)
|
||||
3. **Detailed Content** - Full system and user prompts below
|
||||
- **Full prompt content** displayed without scrolling (no truncation)
|
||||
- **Professional styling** with clear visual hierarchy and responsive design
|
||||
- **Automatic browser opening** (cross-platform)
|
||||
|
||||
Reports are saved to `tests/manual/prompts/output/` with timestamps.
|
||||
|
||||
### Legacy Full Test Mode
|
||||
```bash
|
||||
node prompt-test.js --full
|
||||
```
|
||||
Runs all tests and shows sample full prompts for verification.
|
||||
|
||||
### Help
|
||||
```bash
|
||||
node prompt-test.js --help
|
||||
```
|
||||
Shows usage information and examples.
|
||||
|
||||
## Test Coverage
|
||||
|
||||
The comprehensive test suite covers:
|
||||
|
||||
## Test Coverage Summary
|
||||
|
||||
**Total Test Cases: 23** (18 functional + 5 error condition tests)
|
||||
|
||||
### Templates with Research Conditional Content
|
||||
These templates have `useResearch` or `research` parameters that modify prompt content:
|
||||
- **add-task** (default, research variants)
|
||||
- **analyze-complexity** (default, research variants)
|
||||
- **parse-prd** (default, research variants)
|
||||
- **update-subtask** (default, research variants)
|
||||
- **update-task** (default, append, research variants)
|
||||
|
||||
### Templates with Legitimate Separate Variants
|
||||
These templates have genuinely different prompts for different use cases:
|
||||
- **expand-task** (default, research, complexity-report variants) - Three sophisticated strategies with advanced parameter support
|
||||
- **research** (low, medium, high detail level variants)
|
||||
|
||||
### Single Variant Templates
|
||||
These templates only have one variant because research mode only changes AI role, not prompt content:
|
||||
- **update-tasks** (default variant only)
|
||||
|
||||
### Prompt Templates (8 total)
|
||||
- **add-task** (default, research variants)
|
||||
- **expand-task** (default, research, complexity-report variants) - Enhanced with sophisticated parameter support and context handling
|
||||
- **analyze-complexity** (default variant)
|
||||
- **research** (low, medium, high detail variants)
|
||||
- **parse-prd** (default variant) - Enhanced with sophisticated numTasks conditional logic
|
||||
- **update-subtask** (default variant with `useResearch` conditional content)
|
||||
- **update-task** (default, append variants; research uses `useResearch` conditional content)
|
||||
- **update-tasks** (default variant with `useResearch` conditional content)
|
||||
|
||||
### Test Scenarios (27 total)
|
||||
- 16 valid template/variant combinations (including enhanced expand-task with new parameter support)
|
||||
- 4 conditional logic validation tests (testing new gt/gte helper functions)
|
||||
- 7 error condition tests (nonexistent variants, templates, missing params, invalid detail levels)
|
||||
|
||||
### Validation
|
||||
- Parameter schema compliance
|
||||
- Template loading success/failure
|
||||
- Error handling for invalid inputs
|
||||
- Realistic test data for each template type
|
||||
- **Output content validation** for conditional logic (NEW)
|
||||
|
||||
#### Conditional Logic Testing (NEW)
|
||||
The test suite now includes specific validation for the new `gt` (greater than) and `gte` (greater than or equal) helper functions:
|
||||
|
||||
**Helper Function Tests:**
|
||||
- `conditional-zero-tasks`: Validates `numTasks = 0` produces "an appropriate number of" text
|
||||
- `conditional-positive-tasks`: Validates `numTasks = 5` produces "approximately 5" text
|
||||
- `conditional-zero-subtasks`: Validates `subtaskCount = 0` produces "an appropriate number of" text
|
||||
- `conditional-positive-subtasks`: Validates `subtaskCount = 3` produces "exactly 3" text
|
||||
|
||||
These tests use the new `validateOutput` function to verify that conditional template logic produces the expected rendered content, ensuring our helper functions work correctly beyond just successful template loading.
|
||||
|
||||
## Output Modes
|
||||
|
||||
### Preview Mode (Default)
|
||||
Shows truncated prompts (200 characters) for quick overview:
|
||||
```
|
||||
System Prompt Preview:
|
||||
You are an AI assistant helping with task management...
|
||||
|
||||
User Prompt Preview:
|
||||
Create a new task based on the following description...
|
||||
|
||||
Tip: Use option 3 in main menu to toggle full prompt display
|
||||
```
|
||||
|
||||
### Full Mode
|
||||
Shows complete system and user prompts for detailed verification:
|
||||
```
|
||||
System Prompt:
|
||||
[Complete system prompt content]
|
||||
|
||||
User Prompt:
|
||||
[Complete user prompt content]
|
||||
```
|
||||
|
||||
## Test Data
|
||||
|
||||
Each template uses realistic test data:
|
||||
|
||||
- **Tasks**: Complete task objects with proper IDs, titles, descriptions
|
||||
- **Context**: Simulated project context and gathered information
|
||||
- **Parameters**: Properly formatted parameters matching each template's schema
|
||||
- **Research**: Sample queries and detail levels for research prompts
|
||||
|
||||
## Error Testing
|
||||
|
||||
The test suite includes error condition validation:
|
||||
- Nonexistent template variants
|
||||
- Invalid template names
|
||||
- Missing required parameters
|
||||
- Malformed parameter data
|
||||
|
||||
## Exit Codes (Batch Mode)
|
||||
|
||||
- **0**: All tests passed
|
||||
- **1**: One or more tests failed
|
||||
|
||||
## Use Cases
|
||||
|
||||
### Development Workflow
|
||||
1. **Template Development**: Test new templates interactively
|
||||
2. **Variant Testing**: Verify all variants work correctly
|
||||
3. **Parameter Validation**: Ensure parameter schemas are working
|
||||
4. **Regression Testing**: Run batch tests after changes
|
||||
|
||||
### Manual Verification
|
||||
1. **Prompt Review**: Human verification of generated prompts
|
||||
2. **Parameter Exploration**: See how different parameters affect output
|
||||
3. **Context Testing**: Verify context inclusion and formatting
|
||||
|
||||
### CI/CD Integration
|
||||
```bash
|
||||
# In CI pipeline
|
||||
node tests/manual/prompts/prompt-test.js --batch
|
||||
```
|
||||
|
||||
The interactive menu makes it easy to explore and verify prompt templates during development, while batch mode enables automated testing in CI/CD pipelines.
|
||||
|
||||
## 🎯 Purpose
|
||||
|
||||
- **Verify all 8 prompt templates** work correctly with the prompt manager
|
||||
- **Test multiple variants** for each prompt (default, research, complexity-report, etc.)
|
||||
- **Show full generated prompts** for human verification and debugging
|
||||
- **Test error conditions** and parameter validation
|
||||
- **Provide realistic sample data** for each prompt type
|
||||
|
||||
## 📁 Files
|
||||
|
||||
- `prompt-test.js` - Main test script
|
||||
- `output/` - Generated HTML reports (when using --html flag or menu option)
|
||||
|
||||
## 🎯 Use Cases
|
||||
|
||||
### For Developers
|
||||
- **Verify prompt changes** don't break existing functionality
|
||||
- **Test new prompt variants** before deployment
|
||||
- **Debug prompt generation** issues with full output
|
||||
- **Validate parameter schemas** work correctly
|
||||
|
||||
### For QA
|
||||
- **Regression testing** after prompt template changes
|
||||
- **Verification of prompt outputs** match expectations
|
||||
- **Parameter validation testing** for robustness
|
||||
- **Cross-variant consistency** checking
|
||||
|
||||
### For Documentation
|
||||
- **Reference for prompt usage** with realistic examples
|
||||
- **Parameter requirements** demonstration
|
||||
- **Variant differences** visualization
|
||||
- **Expected output formats** examples
|
||||
|
||||
## ⚠️ Important Notes
|
||||
|
||||
1. **Real Prompt Manager**: This test uses the actual prompt manager, not mocks
|
||||
2. **Parameter Accuracy**: All parameters match the exact schema requirements of each prompt template
|
||||
3. **Variant Coverage**: Tests all documented variants for each prompt type
|
||||
4. **Sample Data**: Uses realistic project scenarios, not dummy data
|
||||
5. **Exit Codes**: Returns exit code 1 if any tests fail, 0 if all pass
|
||||
|
||||
## 🔄 Maintenance
|
||||
|
||||
When adding new prompt templates or variants:
|
||||
|
||||
1. Add sample data to the `sampleData` object
|
||||
2. Include realistic parameters matching the prompt's schema
|
||||
3. Test all documented variants
|
||||
4. Verify with the `--full` flag that prompts generate correctly
|
||||
5. Update this README with new coverage information
|
||||
|
||||
This test suite should be run whenever:
|
||||
- Prompt templates are modified
|
||||
- New variants are added
|
||||
- Parameter schemas change
|
||||
- Prompt manager logic is updated
|
||||
- Before major releases
|
||||
1874
tests/manual/prompts/prompt-test.js
Normal file
1874
tests/manual/prompts/prompt-test.js
Normal file
File diff suppressed because it is too large
Load Diff
@@ -557,7 +557,10 @@ describe('getConfig Tests', () => {
|
||||
// Assert
|
||||
expect(config).toEqual(DEFAULT_CONFIG);
|
||||
expect(mockFindProjectRoot).not.toHaveBeenCalled(); // Explicit root provided
|
||||
expect(fsExistsSyncSpy).toHaveBeenCalledWith(MOCK_CONFIG_PATH);
|
||||
// The implementation checks for .taskmaster directory first
|
||||
expect(fsExistsSyncSpy).toHaveBeenCalledWith(
|
||||
path.join(MOCK_PROJECT_ROOT, '.taskmaster')
|
||||
);
|
||||
expect(fsReadFileSyncSpy).not.toHaveBeenCalled(); // No read if file doesn't exist
|
||||
expect(consoleWarnSpy).toHaveBeenCalledWith(
|
||||
expect.stringContaining('not found at provided project root')
|
||||
|
||||
@@ -2,12 +2,27 @@ import { jest } from '@jest/globals';
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import os from 'os';
|
||||
// Mock the schema integration functions to avoid chalk issues
|
||||
const mockSetupSchemaIntegration = jest.fn();
|
||||
|
||||
import { vscodeProfile } from '../../../src/profiles/vscode.js';
|
||||
|
||||
// Mock external modules
|
||||
jest.mock('child_process', () => ({
|
||||
execSync: jest.fn()
|
||||
}));
|
||||
|
||||
// Mock fs/promises
|
||||
const mockFsPromises = {
|
||||
mkdir: jest.fn(),
|
||||
access: jest.fn(),
|
||||
copyFile: jest.fn(),
|
||||
readFile: jest.fn(),
|
||||
writeFile: jest.fn()
|
||||
};
|
||||
|
||||
jest.mock('fs/promises', () => mockFsPromises);
|
||||
|
||||
// Mock console methods
|
||||
jest.mock('console', () => ({
|
||||
log: jest.fn(),
|
||||
@@ -288,4 +303,41 @@ Task Master specific VS Code instruction.`;
|
||||
expect(content).toContain('alwaysApply:');
|
||||
expect(content).toContain('**/*.ts'); // File patterns in quotes
|
||||
});
|
||||
|
||||
describe('Schema Integration', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
// Replace the onAddRulesProfile function with our mock
|
||||
vscodeProfile.onAddRulesProfile = mockSetupSchemaIntegration;
|
||||
});
|
||||
|
||||
test('setupSchemaIntegration is called with project root', async () => {
|
||||
// Arrange
|
||||
mockSetupSchemaIntegration.mockResolvedValue();
|
||||
|
||||
// Act
|
||||
await vscodeProfile.onAddRulesProfile(tempDir);
|
||||
|
||||
// Assert
|
||||
expect(mockSetupSchemaIntegration).toHaveBeenCalledWith(tempDir);
|
||||
});
|
||||
|
||||
test('schema integration function exists and is callable', () => {
|
||||
// Assert that the VS Code profile has the schema integration function
|
||||
expect(vscodeProfile.onAddRulesProfile).toBeDefined();
|
||||
expect(typeof vscodeProfile.onAddRulesProfile).toBe('function');
|
||||
});
|
||||
|
||||
test('schema integration handles errors gracefully', async () => {
|
||||
// Arrange
|
||||
mockSetupSchemaIntegration.mockRejectedValue(
|
||||
new Error('Schema setup failed')
|
||||
);
|
||||
|
||||
// Act & Assert - Should propagate the error
|
||||
await expect(vscodeProfile.onAddRulesProfile(tempDir)).rejects.toThrow(
|
||||
'Schema setup failed'
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
406
tests/unit/prompt-manager.test.js
Normal file
406
tests/unit/prompt-manager.test.js
Normal file
@@ -0,0 +1,406 @@
|
||||
import {
|
||||
jest,
|
||||
beforeEach,
|
||||
afterEach,
|
||||
describe,
|
||||
it,
|
||||
expect
|
||||
} from '@jest/globals';
|
||||
import path from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
|
||||
// Create mock functions
|
||||
const mockReadFileSync = jest.fn();
|
||||
const mockReaddirSync = jest.fn();
|
||||
const mockExistsSync = jest.fn();
|
||||
|
||||
// Set up default mock for supported-models.json to prevent config-manager from failing
|
||||
mockReadFileSync.mockImplementation((filePath) => {
|
||||
if (filePath.includes('supported-models.json')) {
|
||||
return JSON.stringify({
|
||||
anthropic: [{ id: 'claude-3-5-sonnet', max_tokens: 8192 }],
|
||||
openai: [{ id: 'gpt-4', max_tokens: 8192 }]
|
||||
});
|
||||
}
|
||||
// Default return for other files
|
||||
return '{}';
|
||||
});
|
||||
|
||||
// Mock fs before importing modules that use it
|
||||
jest.unstable_mockModule('fs', () => ({
|
||||
default: {
|
||||
readFileSync: mockReadFileSync,
|
||||
readdirSync: mockReaddirSync,
|
||||
existsSync: mockExistsSync
|
||||
},
|
||||
readFileSync: mockReadFileSync,
|
||||
readdirSync: mockReaddirSync,
|
||||
existsSync: mockExistsSync
|
||||
}));
|
||||
|
||||
// Mock process.exit to prevent tests from exiting
|
||||
const mockExit = jest.fn();
|
||||
jest.unstable_mockModule('process', () => ({
|
||||
default: {
|
||||
exit: mockExit,
|
||||
env: {}
|
||||
},
|
||||
exit: mockExit
|
||||
}));
|
||||
|
||||
// Import after mocking
|
||||
const { getPromptManager } = await import(
|
||||
'../../scripts/modules/prompt-manager.js'
|
||||
);
|
||||
|
||||
describe('PromptManager', () => {
|
||||
let promptManager;
|
||||
// Calculate expected templates directory
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
const expectedTemplatesDir = path.join(
|
||||
__dirname,
|
||||
'..',
|
||||
'..',
|
||||
'src',
|
||||
'prompts'
|
||||
);
|
||||
|
||||
beforeEach(() => {
|
||||
// Clear all mocks
|
||||
jest.clearAllMocks();
|
||||
|
||||
// Re-setup the default mock after clearing
|
||||
mockReadFileSync.mockImplementation((filePath) => {
|
||||
if (filePath.includes('supported-models.json')) {
|
||||
return JSON.stringify({
|
||||
anthropic: [{ id: 'claude-3-5-sonnet', max_tokens: 8192 }],
|
||||
openai: [{ id: 'gpt-4', max_tokens: 8192 }]
|
||||
});
|
||||
}
|
||||
// Default return for other files
|
||||
return '{}';
|
||||
});
|
||||
|
||||
// Get the singleton instance
|
||||
promptManager = getPromptManager();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
jest.restoreAllMocks();
|
||||
});
|
||||
|
||||
describe('loadPrompt', () => {
|
||||
it('should load and render a simple prompt template', () => {
|
||||
const mockTemplate = {
|
||||
id: 'test-prompt',
|
||||
prompts: {
|
||||
default: {
|
||||
system: 'You are a helpful assistant',
|
||||
user: 'Hello {{name}}, please {{action}}'
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
mockReadFileSync.mockReturnValue(JSON.stringify(mockTemplate));
|
||||
|
||||
const result = promptManager.loadPrompt('test-prompt', {
|
||||
name: 'Alice',
|
||||
action: 'help me'
|
||||
});
|
||||
|
||||
expect(result.systemPrompt).toBe('You are a helpful assistant');
|
||||
expect(result.userPrompt).toBe('Hello Alice, please help me');
|
||||
expect(mockReadFileSync).toHaveBeenCalledWith(
|
||||
path.join(expectedTemplatesDir, 'test-prompt.json'),
|
||||
'utf-8'
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle conditional content', () => {
|
||||
const mockTemplate = {
|
||||
id: 'conditional-prompt',
|
||||
prompts: {
|
||||
default: {
|
||||
system: 'System prompt',
|
||||
user: '{{#if useResearch}}Research and {{/if}}analyze the task'
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
mockReadFileSync.mockReturnValue(JSON.stringify(mockTemplate));
|
||||
|
||||
// Test with useResearch = true
|
||||
let result = promptManager.loadPrompt('conditional-prompt', {
|
||||
useResearch: true
|
||||
});
|
||||
expect(result.userPrompt).toBe('Research and analyze the task');
|
||||
|
||||
// Test with useResearch = false
|
||||
result = promptManager.loadPrompt('conditional-prompt', {
|
||||
useResearch: false
|
||||
});
|
||||
expect(result.userPrompt).toBe('analyze the task');
|
||||
});
|
||||
|
||||
it('should handle array iteration with {{#each}}', () => {
|
||||
const mockTemplate = {
|
||||
id: 'loop-prompt',
|
||||
prompts: {
|
||||
default: {
|
||||
system: 'System prompt',
|
||||
user: 'Tasks:\n{{#each tasks}}- {{id}}: {{title}}\n{{/each}}'
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
mockReadFileSync.mockReturnValue(JSON.stringify(mockTemplate));
|
||||
|
||||
const result = promptManager.loadPrompt('loop-prompt', {
|
||||
tasks: [
|
||||
{ id: 1, title: 'First task' },
|
||||
{ id: 2, title: 'Second task' }
|
||||
]
|
||||
});
|
||||
|
||||
expect(result.userPrompt).toBe(
|
||||
'Tasks:\n- 1: First task\n- 2: Second task\n'
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle JSON serialization with triple braces', () => {
|
||||
const mockTemplate = {
|
||||
id: 'json-prompt',
|
||||
prompts: {
|
||||
default: {
|
||||
system: 'System prompt',
|
||||
user: 'Analyze these tasks: {{{json tasks}}}'
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
mockReadFileSync.mockReturnValue(JSON.stringify(mockTemplate));
|
||||
|
||||
const tasks = [
|
||||
{ id: 1, title: 'Task 1' },
|
||||
{ id: 2, title: 'Task 2' }
|
||||
];
|
||||
|
||||
const result = promptManager.loadPrompt('json-prompt', { tasks });
|
||||
|
||||
expect(result.userPrompt).toBe(
|
||||
`Analyze these tasks: ${JSON.stringify(tasks, null, 2)}`
|
||||
);
|
||||
});
|
||||
|
||||
it('should select variants based on conditions', () => {
|
||||
const mockTemplate = {
|
||||
id: 'variant-prompt',
|
||||
prompts: {
|
||||
default: {
|
||||
system: 'Default system',
|
||||
user: 'Default user'
|
||||
},
|
||||
research: {
|
||||
condition: 'useResearch === true',
|
||||
system: 'Research system',
|
||||
user: 'Research user'
|
||||
},
|
||||
highComplexity: {
|
||||
condition: 'complexity >= 8',
|
||||
system: 'Complex system',
|
||||
user: 'Complex user'
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
mockReadFileSync.mockReturnValue(JSON.stringify(mockTemplate));
|
||||
|
||||
// Test default variant
|
||||
let result = promptManager.loadPrompt('variant-prompt', {
|
||||
useResearch: false,
|
||||
complexity: 5
|
||||
});
|
||||
expect(result.systemPrompt).toBe('Default system');
|
||||
|
||||
// Test research variant
|
||||
result = promptManager.loadPrompt('variant-prompt', {
|
||||
useResearch: true,
|
||||
complexity: 5
|
||||
});
|
||||
expect(result.systemPrompt).toBe('Research system');
|
||||
|
||||
// Test high complexity variant
|
||||
result = promptManager.loadPrompt('variant-prompt', {
|
||||
useResearch: false,
|
||||
complexity: 9
|
||||
});
|
||||
expect(result.systemPrompt).toBe('Complex system');
|
||||
});
|
||||
|
||||
it('should use specified variant key over conditions', () => {
|
||||
const mockTemplate = {
|
||||
id: 'variant-prompt',
|
||||
prompts: {
|
||||
default: {
|
||||
system: 'Default system',
|
||||
user: 'Default user'
|
||||
},
|
||||
research: {
|
||||
condition: 'useResearch === true',
|
||||
system: 'Research system',
|
||||
user: 'Research user'
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
mockReadFileSync.mockReturnValue(JSON.stringify(mockTemplate));
|
||||
|
||||
// Force research variant even though useResearch is false
|
||||
const result = promptManager.loadPrompt(
|
||||
'variant-prompt',
|
||||
{ useResearch: false },
|
||||
'research'
|
||||
);
|
||||
|
||||
expect(result.systemPrompt).toBe('Research system');
|
||||
});
|
||||
|
||||
it('should handle nested properties with dot notation', () => {
|
||||
const mockTemplate = {
|
||||
id: 'nested-prompt',
|
||||
prompts: {
|
||||
default: {
|
||||
system: 'System',
|
||||
user: 'Project: {{project.name}}, Version: {{project.version}}'
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
mockReadFileSync.mockReturnValue(JSON.stringify(mockTemplate));
|
||||
|
||||
const result = promptManager.loadPrompt('nested-prompt', {
|
||||
project: {
|
||||
name: 'TaskMaster',
|
||||
version: '1.0.0'
|
||||
}
|
||||
});
|
||||
|
||||
expect(result.userPrompt).toBe('Project: TaskMaster, Version: 1.0.0');
|
||||
});
|
||||
|
||||
it('should handle complex nested structures', () => {
|
||||
const mockTemplate = {
|
||||
id: 'complex-prompt',
|
||||
prompts: {
|
||||
default: {
|
||||
system: 'System',
|
||||
user: '{{#if hasSubtasks}}Task has subtasks:\n{{#each subtasks}}- {{title}} ({{status}})\n{{/each}}{{/if}}'
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
mockReadFileSync.mockReturnValue(JSON.stringify(mockTemplate));
|
||||
|
||||
const result = promptManager.loadPrompt('complex-prompt', {
|
||||
hasSubtasks: true,
|
||||
subtasks: [
|
||||
{ title: 'Subtask 1', status: 'pending' },
|
||||
{ title: 'Subtask 2', status: 'done' }
|
||||
]
|
||||
});
|
||||
|
||||
expect(result.userPrompt).toBe(
|
||||
'Task has subtasks:\n- Subtask 1 (pending)\n- Subtask 2 (done)\n'
|
||||
);
|
||||
});
|
||||
|
||||
it('should cache loaded templates', () => {
|
||||
const mockTemplate = {
|
||||
id: 'cached-prompt',
|
||||
prompts: {
|
||||
default: {
|
||||
system: 'System',
|
||||
user: 'User {{value}}'
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
mockReadFileSync.mockReturnValue(JSON.stringify(mockTemplate));
|
||||
|
||||
// First load
|
||||
promptManager.loadPrompt('cached-prompt', { value: 'test1' });
|
||||
expect(mockReadFileSync).toHaveBeenCalledTimes(1);
|
||||
|
||||
// Second load with same params should use cache
|
||||
promptManager.loadPrompt('cached-prompt', { value: 'test1' });
|
||||
expect(mockReadFileSync).toHaveBeenCalledTimes(1);
|
||||
|
||||
// Third load with different params should NOT use cache
|
||||
promptManager.loadPrompt('cached-prompt', { value: 'test2' });
|
||||
expect(mockReadFileSync).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
|
||||
it('should throw error for non-existent template', () => {
|
||||
const error = new Error('File not found');
|
||||
error.code = 'ENOENT';
|
||||
mockReadFileSync.mockImplementation(() => {
|
||||
throw error;
|
||||
});
|
||||
|
||||
expect(() => {
|
||||
promptManager.loadPrompt('non-existent', {});
|
||||
}).toThrow();
|
||||
});
|
||||
|
||||
it('should throw error for invalid JSON', () => {
|
||||
mockReadFileSync.mockReturnValue('{ invalid json');
|
||||
|
||||
expect(() => {
|
||||
promptManager.loadPrompt('invalid-json', {});
|
||||
}).toThrow();
|
||||
});
|
||||
|
||||
it('should handle missing prompts section', () => {
|
||||
const mockTemplate = {
|
||||
id: 'no-prompts'
|
||||
};
|
||||
|
||||
mockReadFileSync.mockReturnValue(JSON.stringify(mockTemplate));
|
||||
|
||||
expect(() => {
|
||||
promptManager.loadPrompt('no-prompts', {});
|
||||
}).toThrow();
|
||||
});
|
||||
|
||||
it('should handle special characters in templates', () => {
|
||||
const mockTemplate = {
|
||||
id: 'special-chars',
|
||||
prompts: {
|
||||
default: {
|
||||
system: 'System with "quotes" and \'apostrophes\'',
|
||||
user: 'User with newlines\nand\ttabs'
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
mockReadFileSync.mockReturnValue(JSON.stringify(mockTemplate));
|
||||
|
||||
const result = promptManager.loadPrompt('special-chars', {});
|
||||
|
||||
expect(result.systemPrompt).toBe(
|
||||
'System with "quotes" and \'apostrophes\''
|
||||
);
|
||||
expect(result.userPrompt).toBe('User with newlines\nand\ttabs');
|
||||
});
|
||||
});
|
||||
|
||||
describe('singleton behavior', () => {
|
||||
it('should return the same instance on multiple calls', () => {
|
||||
const instance1 = getPromptManager();
|
||||
const instance2 = getPromptManager();
|
||||
|
||||
expect(instance1).toBe(instance2);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -123,6 +123,18 @@ jest.unstable_mockModule(
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/prompt-manager.js',
|
||||
() => ({
|
||||
getPromptManager: jest.fn().mockReturnValue({
|
||||
loadPrompt: jest.fn().mockResolvedValue({
|
||||
systemPrompt: 'Mocked system prompt',
|
||||
userPrompt: 'Mocked user prompt'
|
||||
})
|
||||
})
|
||||
})
|
||||
);
|
||||
|
||||
// Mock external UI libraries
|
||||
jest.unstable_mockModule('chalk', () => ({
|
||||
default: {
|
||||
|
||||
@@ -171,8 +171,20 @@ jest.unstable_mockModule('fs', () => ({
|
||||
writeFileSync: mockWriteFileSync
|
||||
}));
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/prompt-manager.js',
|
||||
() => ({
|
||||
getPromptManager: jest.fn().mockReturnValue({
|
||||
loadPrompt: jest.fn().mockResolvedValue({
|
||||
systemPrompt: 'Mocked system prompt',
|
||||
userPrompt: 'Mocked user prompt'
|
||||
})
|
||||
})
|
||||
})
|
||||
);
|
||||
|
||||
// Import the mocked modules
|
||||
const { readJSON, writeJSON, log, CONFIG } = await import(
|
||||
const { readJSON, writeJSON, log, CONFIG, findTaskById } = await import(
|
||||
'../../../../../scripts/modules/utils.js'
|
||||
);
|
||||
|
||||
@@ -253,6 +265,13 @@ describe('analyzeTaskComplexity', () => {
|
||||
_rawTaggedData: sampleTasks
|
||||
};
|
||||
});
|
||||
|
||||
// Mock findTaskById to return the expected structure
|
||||
findTaskById.mockImplementation((tasks, taskId) => {
|
||||
const task = tasks?.find((t) => t.id === parseInt(taskId));
|
||||
return { task: task || null, originalSubtaskCount: null };
|
||||
});
|
||||
|
||||
generateTextService.mockResolvedValue(sampleApiResponse);
|
||||
});
|
||||
|
||||
@@ -262,11 +281,13 @@ describe('analyzeTaskComplexity', () => {
|
||||
file: 'tasks/tasks.json',
|
||||
output: 'scripts/task-complexity-report.json',
|
||||
threshold: '5',
|
||||
research: false
|
||||
research: false,
|
||||
projectRoot: '/mock/project/root'
|
||||
};
|
||||
|
||||
// Act
|
||||
await analyzeTaskComplexity(options, {
|
||||
projectRoot: '/mock/project/root',
|
||||
mcpLog: {
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
@@ -279,7 +300,7 @@ describe('analyzeTaskComplexity', () => {
|
||||
// Assert
|
||||
expect(readJSON).toHaveBeenCalledWith(
|
||||
'tasks/tasks.json',
|
||||
undefined,
|
||||
'/mock/project/root',
|
||||
undefined
|
||||
);
|
||||
expect(generateTextService).toHaveBeenCalledWith(expect.any(Object));
|
||||
@@ -296,11 +317,13 @@ describe('analyzeTaskComplexity', () => {
|
||||
file: 'tasks/tasks.json',
|
||||
output: 'scripts/task-complexity-report.json',
|
||||
threshold: '5',
|
||||
research: true
|
||||
research: true,
|
||||
projectRoot: '/mock/project/root'
|
||||
};
|
||||
|
||||
// Act
|
||||
await analyzeTaskComplexity(researchOptions, {
|
||||
projectRoot: '/mock/project/root',
|
||||
mcpLog: {
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
@@ -323,10 +346,12 @@ describe('analyzeTaskComplexity', () => {
|
||||
let options = {
|
||||
file: 'tasks/tasks.json',
|
||||
output: 'scripts/task-complexity-report.json',
|
||||
threshold: '7'
|
||||
threshold: '7',
|
||||
projectRoot: '/mock/project/root'
|
||||
};
|
||||
|
||||
await analyzeTaskComplexity(options, {
|
||||
projectRoot: '/mock/project/root',
|
||||
mcpLog: {
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
@@ -349,10 +374,12 @@ describe('analyzeTaskComplexity', () => {
|
||||
options = {
|
||||
file: 'tasks/tasks.json',
|
||||
output: 'scripts/task-complexity-report.json',
|
||||
threshold: 8
|
||||
threshold: 8,
|
||||
projectRoot: '/mock/project/root'
|
||||
};
|
||||
|
||||
await analyzeTaskComplexity(options, {
|
||||
projectRoot: '/mock/project/root',
|
||||
mcpLog: {
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
@@ -374,11 +401,13 @@ describe('analyzeTaskComplexity', () => {
|
||||
const options = {
|
||||
file: 'tasks/tasks.json',
|
||||
output: 'scripts/task-complexity-report.json',
|
||||
threshold: '5'
|
||||
threshold: '5',
|
||||
projectRoot: '/mock/project/root'
|
||||
};
|
||||
|
||||
// Act
|
||||
await analyzeTaskComplexity(options, {
|
||||
projectRoot: '/mock/project/root',
|
||||
mcpLog: {
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
@@ -402,7 +431,8 @@ describe('analyzeTaskComplexity', () => {
|
||||
const options = {
|
||||
file: 'tasks/tasks.json',
|
||||
output: 'scripts/task-complexity-report.json',
|
||||
threshold: '5'
|
||||
threshold: '5',
|
||||
projectRoot: '/mock/project/root'
|
||||
};
|
||||
|
||||
// Force API error
|
||||
@@ -419,6 +449,7 @@ describe('analyzeTaskComplexity', () => {
|
||||
// Act & Assert
|
||||
await expect(
|
||||
analyzeTaskComplexity(options, {
|
||||
projectRoot: '/mock/project/root',
|
||||
mcpLog: mockMcpLog
|
||||
})
|
||||
).rejects.toThrow('API Error');
|
||||
|
||||
@@ -132,14 +132,22 @@ jest.unstable_mockModule(
|
||||
() => ({
|
||||
ContextGatherer: jest.fn().mockImplementation(() => ({
|
||||
gather: jest.fn().mockResolvedValue({
|
||||
contextSummary: 'Mock context summary',
|
||||
allRelatedTaskIds: [],
|
||||
graphVisualization: 'Mock graph'
|
||||
context: 'Mock project context from files'
|
||||
})
|
||||
}))
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/utils/fuzzyTaskSearch.js',
|
||||
() => ({
|
||||
FuzzyTaskSearch: jest.fn().mockImplementation(() => ({
|
||||
findRelevantTasks: jest.fn().mockReturnValue([]),
|
||||
getTaskIds: jest.fn().mockReturnValue([])
|
||||
}))
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/task-manager/generate-task-files.js',
|
||||
() => ({
|
||||
@@ -147,6 +155,18 @@ jest.unstable_mockModule(
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/prompt-manager.js',
|
||||
() => ({
|
||||
getPromptManager: jest.fn().mockReturnValue({
|
||||
loadPrompt: jest.fn().mockResolvedValue({
|
||||
systemPrompt: 'Mocked system prompt',
|
||||
userPrompt: 'Mocked user prompt'
|
||||
})
|
||||
})
|
||||
})
|
||||
);
|
||||
|
||||
// Mock external UI libraries
|
||||
jest.unstable_mockModule('chalk', () => ({
|
||||
default: {
|
||||
@@ -663,6 +683,18 @@ describe('expandTask', () => {
|
||||
describe('Complexity Report Integration (Tag-Specific)', () => {
|
||||
test('should use tag-specific complexity report when available', async () => {
|
||||
// Arrange
|
||||
const { getPromptManager } = await import(
|
||||
'../../../../../scripts/modules/prompt-manager.js'
|
||||
);
|
||||
const mockLoadPrompt = jest.fn().mockResolvedValue({
|
||||
systemPrompt: 'Generate exactly 5 subtasks for complexity report',
|
||||
userPrompt:
|
||||
'Please break this task into 5 parts\n\nUser provided context'
|
||||
});
|
||||
getPromptManager.mockReturnValue({
|
||||
loadPrompt: mockLoadPrompt
|
||||
});
|
||||
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const taskId = '1'; // Task in feature-branch
|
||||
const context = {
|
||||
@@ -710,6 +742,16 @@ describe('expandTask', () => {
|
||||
const callArg = generateTextService.mock.calls[0][0];
|
||||
expect(callArg.systemPrompt).toContain('Generate exactly 5 subtasks');
|
||||
|
||||
// Assert - Should use complexity-report variant with expansion prompt
|
||||
expect(mockLoadPrompt).toHaveBeenCalledWith(
|
||||
'expand-task',
|
||||
expect.objectContaining({
|
||||
subtaskCount: 5,
|
||||
expansionPrompt: 'Please break this task into 5 parts'
|
||||
}),
|
||||
'complexity-report'
|
||||
);
|
||||
|
||||
// Clean up stub
|
||||
existsSpy.mockRestore();
|
||||
});
|
||||
@@ -903,6 +945,17 @@ describe('expandTask', () => {
|
||||
|
||||
test('should handle additional context correctly', async () => {
|
||||
// Arrange
|
||||
const { getPromptManager } = await import(
|
||||
'../../../../../scripts/modules/prompt-manager.js'
|
||||
);
|
||||
const mockLoadPrompt = jest.fn().mockResolvedValue({
|
||||
systemPrompt: 'Mocked system prompt',
|
||||
userPrompt: 'Mocked user prompt with context'
|
||||
});
|
||||
getPromptManager.mockReturnValue({
|
||||
loadPrompt: mockLoadPrompt
|
||||
});
|
||||
|
||||
const tasksPath = 'tasks/tasks.json';
|
||||
const taskId = '2';
|
||||
const additionalContext = 'Use React hooks and TypeScript';
|
||||
@@ -922,11 +975,28 @@ describe('expandTask', () => {
|
||||
false
|
||||
);
|
||||
|
||||
// Assert - Should include additional context in prompt
|
||||
expect(generateTextService).toHaveBeenCalledWith(
|
||||
// Assert - Should pass separate context parameters to prompt manager
|
||||
expect(mockLoadPrompt).toHaveBeenCalledWith(
|
||||
'expand-task',
|
||||
expect.objectContaining({
|
||||
prompt: expect.stringContaining('Use React hooks and TypeScript')
|
||||
})
|
||||
additionalContext: expect.stringContaining(
|
||||
'Use React hooks and TypeScript'
|
||||
),
|
||||
gatheredContext: expect.stringContaining(
|
||||
'Mock project context from files'
|
||||
)
|
||||
}),
|
||||
expect.any(String)
|
||||
);
|
||||
|
||||
// Additional assertion to verify the context parameters are passed separately
|
||||
const call = mockLoadPrompt.mock.calls[0];
|
||||
const parameters = call[1];
|
||||
expect(parameters.additionalContext).toContain(
|
||||
'Use React hooks and TypeScript'
|
||||
);
|
||||
expect(parameters.gatheredContext).toContain(
|
||||
'Mock project context from files'
|
||||
);
|
||||
});
|
||||
|
||||
@@ -1003,6 +1073,20 @@ describe('expandTask', () => {
|
||||
});
|
||||
|
||||
test('should use dynamic prompting when numSubtasks is 0', async () => {
|
||||
// Mock getPromptManager to return realistic prompt with dynamic content
|
||||
const { getPromptManager } = await import(
|
||||
'../../../../../scripts/modules/prompt-manager.js'
|
||||
);
|
||||
const mockLoadPrompt = jest.fn().mockResolvedValue({
|
||||
systemPrompt:
|
||||
'You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into an appropriate number of specific subtasks that can be implemented one by one.',
|
||||
userPrompt:
|
||||
'Break down this task into an appropriate number of specific subtasks'
|
||||
});
|
||||
getPromptManager.mockReturnValue({
|
||||
loadPrompt: mockLoadPrompt
|
||||
});
|
||||
|
||||
// Act
|
||||
await expandTask(tasksPath, taskId, 0, false, '', context, false);
|
||||
|
||||
@@ -1017,6 +1101,19 @@ describe('expandTask', () => {
|
||||
});
|
||||
|
||||
test('should use specific count prompting when numSubtasks is positive', async () => {
|
||||
// Mock getPromptManager to return realistic prompt with specific count
|
||||
const { getPromptManager } = await import(
|
||||
'../../../../../scripts/modules/prompt-manager.js'
|
||||
);
|
||||
const mockLoadPrompt = jest.fn().mockResolvedValue({
|
||||
systemPrompt:
|
||||
'You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into 5 specific subtasks that can be implemented one by one.',
|
||||
userPrompt: 'Break down this task into exactly 5 specific subtasks'
|
||||
});
|
||||
getPromptManager.mockReturnValue({
|
||||
loadPrompt: mockLoadPrompt
|
||||
});
|
||||
|
||||
// Act
|
||||
await expandTask(tasksPath, taskId, 5, false, '', context, false);
|
||||
|
||||
@@ -1032,6 +1129,19 @@ describe('expandTask', () => {
|
||||
// Mock getDefaultSubtasks to return a specific value
|
||||
getDefaultSubtasks.mockReturnValue(4);
|
||||
|
||||
// Mock getPromptManager to return realistic prompt with default count
|
||||
const { getPromptManager } = await import(
|
||||
'../../../../../scripts/modules/prompt-manager.js'
|
||||
);
|
||||
const mockLoadPrompt = jest.fn().mockResolvedValue({
|
||||
systemPrompt:
|
||||
'You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into 4 specific subtasks that can be implemented one by one.',
|
||||
userPrompt: 'Break down this task into exactly 4 specific subtasks'
|
||||
});
|
||||
getPromptManager.mockReturnValue({
|
||||
loadPrompt: mockLoadPrompt
|
||||
});
|
||||
|
||||
// Act
|
||||
await expandTask(tasksPath, taskId, -3, false, '', context, false);
|
||||
|
||||
@@ -1045,6 +1155,19 @@ describe('expandTask', () => {
|
||||
// Mock getDefaultSubtasks to return a specific value
|
||||
getDefaultSubtasks.mockReturnValue(6);
|
||||
|
||||
// Mock getPromptManager to return realistic prompt with default count
|
||||
const { getPromptManager } = await import(
|
||||
'../../../../../scripts/modules/prompt-manager.js'
|
||||
);
|
||||
const mockLoadPrompt = jest.fn().mockResolvedValue({
|
||||
systemPrompt:
|
||||
'You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into 6 specific subtasks that can be implemented one by one.',
|
||||
userPrompt: 'Break down this task into exactly 6 specific subtasks'
|
||||
});
|
||||
getPromptManager.mockReturnValue({
|
||||
loadPrompt: mockLoadPrompt
|
||||
});
|
||||
|
||||
// Act - Call without specifying numSubtasks (undefined)
|
||||
await expandTask(tasksPath, taskId, undefined, false, '', context, false);
|
||||
|
||||
@@ -1058,6 +1181,19 @@ describe('expandTask', () => {
|
||||
// Mock getDefaultSubtasks to return a specific value
|
||||
getDefaultSubtasks.mockReturnValue(7);
|
||||
|
||||
// Mock getPromptManager to return realistic prompt with default count
|
||||
const { getPromptManager } = await import(
|
||||
'../../../../../scripts/modules/prompt-manager.js'
|
||||
);
|
||||
const mockLoadPrompt = jest.fn().mockResolvedValue({
|
||||
systemPrompt:
|
||||
'You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into 7 specific subtasks that can be implemented one by one.',
|
||||
userPrompt: 'Break down this task into exactly 7 specific subtasks'
|
||||
});
|
||||
getPromptManager.mockReturnValue({
|
||||
loadPrompt: mockLoadPrompt
|
||||
});
|
||||
|
||||
// Act - Call with null numSubtasks
|
||||
await expandTask(tasksPath, taskId, null, false, '', context, false);
|
||||
|
||||
|
||||
@@ -48,7 +48,8 @@ jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/config-manager.js',
|
||||
() => ({
|
||||
getDebugFlag: jest.fn(() => false),
|
||||
getDefaultNumTasks: jest.fn(() => 10)
|
||||
getDefaultNumTasks: jest.fn(() => 10),
|
||||
getDefaultPriority: jest.fn(() => 'medium')
|
||||
})
|
||||
);
|
||||
|
||||
@@ -70,6 +71,30 @@ jest.unstable_mockModule(
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/prompt-manager.js',
|
||||
() => ({
|
||||
getPromptManager: jest.fn().mockReturnValue({
|
||||
loadPrompt: jest.fn().mockImplementation((templateName, params) => {
|
||||
// Create dynamic mock prompts based on the parameters
|
||||
const { numTasks } = params || {};
|
||||
let numTasksText = '';
|
||||
|
||||
if (numTasks > 0) {
|
||||
numTasksText = `approximately ${numTasks}`;
|
||||
} else {
|
||||
numTasksText = 'an appropriate number of';
|
||||
}
|
||||
|
||||
return Promise.resolve({
|
||||
systemPrompt: 'Mocked system prompt for parse-prd',
|
||||
userPrompt: `Generate ${numTasksText} top-level development tasks from the PRD content.`
|
||||
});
|
||||
})
|
||||
})
|
||||
})
|
||||
);
|
||||
|
||||
// Mock fs module
|
||||
jest.unstable_mockModule('fs', () => ({
|
||||
default: {
|
||||
@@ -348,33 +373,23 @@ describe('parsePRD', () => {
|
||||
expect(fs.default.writeFileSync).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should call process.exit when tasks in tag exist without force flag in CLI mode', async () => {
|
||||
test('should throw error when tasks in tag exist without force flag in CLI mode', async () => {
|
||||
// Setup mocks to simulate tasks.json already exists with tasks in the target tag
|
||||
fs.default.existsSync.mockReturnValue(true);
|
||||
fs.default.readFileSync.mockReturnValueOnce(
|
||||
JSON.stringify(existingTasksData)
|
||||
);
|
||||
|
||||
// Mock process.exit for this specific test
|
||||
const mockProcessExit = jest
|
||||
.spyOn(process, 'exit')
|
||||
.mockImplementation((code) => {
|
||||
throw new Error(`process.exit: ${code}`);
|
||||
});
|
||||
|
||||
// Call the function without mcpLog (CLI mode) and expect it to throw due to mocked process.exit
|
||||
// Call the function without mcpLog (CLI mode) and expect it to throw an error
|
||||
// In test environment, process.exit is prevented and error is thrown instead
|
||||
await expect(
|
||||
parsePRD('path/to/prd.txt', 'tasks/tasks.json', 3)
|
||||
).rejects.toThrow('process.exit: 1');
|
||||
|
||||
// Verify process.exit was called with code 1
|
||||
expect(mockProcessExit).toHaveBeenCalledWith(1);
|
||||
).rejects.toThrow(
|
||||
"Tag 'master' already contains 2 tasks. Use --force to overwrite or --append to add to existing tasks."
|
||||
);
|
||||
|
||||
// Verify the file was NOT written
|
||||
expect(fs.default.writeFileSync).not.toHaveBeenCalled();
|
||||
|
||||
// Restore the mock
|
||||
mockProcessExit.mockRestore();
|
||||
});
|
||||
|
||||
test('should append new tasks when append option is true', async () => {
|
||||
|
||||
@@ -55,6 +55,18 @@ jest.unstable_mockModule(
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/prompt-manager.js',
|
||||
() => ({
|
||||
getPromptManager: jest.fn().mockReturnValue({
|
||||
loadPrompt: jest.fn().mockResolvedValue({
|
||||
systemPrompt: 'Mocked system prompt',
|
||||
userPrompt: 'Mocked user prompt'
|
||||
})
|
||||
})
|
||||
})
|
||||
);
|
||||
|
||||
jest.unstable_mockModule(
|
||||
'../../../../../scripts/modules/task-manager/models.js',
|
||||
() => ({
|
||||
|
||||
@@ -248,7 +248,7 @@ describe('initTaskMaster', () => {
|
||||
expect(taskMaster.getTasksPath()).toBeNull();
|
||||
});
|
||||
|
||||
test('should return null when optional files not specified in overrides', () => {
|
||||
test('should return default paths when optional files not specified in overrides', () => {
|
||||
// Arrange - Remove all optional files
|
||||
fs.unlinkSync(tasksPath);
|
||||
fs.unlinkSync(configPath);
|
||||
@@ -257,10 +257,16 @@ describe('initTaskMaster', () => {
|
||||
// Act - Don't specify any optional paths
|
||||
const taskMaster = initTaskMaster({});
|
||||
|
||||
// Assert
|
||||
expect(taskMaster.getTasksPath()).toBeUndefined();
|
||||
expect(taskMaster.getConfigPath()).toBeUndefined();
|
||||
expect(taskMaster.getStatePath()).toBeUndefined();
|
||||
// Assert - Should return absolute paths with default locations
|
||||
expect(taskMaster.getTasksPath()).toBe(
|
||||
path.join(tempDir, TASKMASTER_TASKS_FILE)
|
||||
);
|
||||
expect(taskMaster.getConfigPath()).toBe(
|
||||
path.join(tempDir, TASKMASTER_CONFIG_FILE)
|
||||
);
|
||||
expect(taskMaster.getStatePath()).toBe(
|
||||
path.join(tempDir, TASKMASTER_DIR, 'state.json')
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -415,11 +421,19 @@ describe('initTaskMaster', () => {
|
||||
// Assert
|
||||
expect(taskMaster.getProjectRoot()).toBe(tempDir);
|
||||
expect(taskMaster.getTaskMasterDir()).toBe(taskMasterDir);
|
||||
expect(taskMaster.getTasksPath()).toBeUndefined();
|
||||
// Default paths are always set for tasks, config, and state
|
||||
expect(taskMaster.getTasksPath()).toBe(
|
||||
path.join(tempDir, TASKMASTER_TASKS_FILE)
|
||||
);
|
||||
expect(taskMaster.getConfigPath()).toBe(
|
||||
path.join(tempDir, TASKMASTER_CONFIG_FILE)
|
||||
);
|
||||
expect(taskMaster.getStatePath()).toBe(
|
||||
path.join(taskMasterDir, 'state.json')
|
||||
);
|
||||
// PRD and complexity report paths are undefined when not provided
|
||||
expect(taskMaster.getPrdPath()).toBeUndefined();
|
||||
expect(taskMaster.getComplexityReportPath()).toBeUndefined();
|
||||
expect(taskMaster.getConfigPath()).toBeUndefined();
|
||||
expect(taskMaster.getStatePath()).toBeUndefined();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
Reference in New Issue
Block a user