Compare commits
43 Commits
esm-module
...
readme-fix
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9ad517231a | ||
|
|
7db3b47a47 | ||
|
|
3de785a99c | ||
|
|
8188fdd832 | ||
|
|
3fadc2f1ef | ||
|
|
dd36111367 | ||
|
|
c58ab8963c | ||
|
|
3eeb4721aa | ||
|
|
7ea905f2c5 | ||
|
|
51dd4f625b | ||
|
|
2e55757b26 | ||
|
|
54bfc72baa | ||
|
|
faae0b419d | ||
|
|
27edbd8f3f | ||
|
|
b1390e4ddf | ||
|
|
cc04d53720 | ||
|
|
bfd86eb9cc | ||
|
|
9eb3842f04 | ||
|
|
bf2053e140 | ||
|
|
ee0be04302 | ||
|
|
c0707fc399 | ||
|
|
1ece6f1904 | ||
|
|
f4a9ad1095 | ||
|
|
cba86510d3 | ||
|
|
86ea6d1dbc | ||
|
|
a22d2a45b5 | ||
|
|
d73c8e17ec | ||
|
|
4f23751d25 | ||
|
|
7d5c028ca0 | ||
|
|
f18df6da19 | ||
|
|
1754a31372 | ||
|
|
3096ccdfb3 | ||
|
|
6464bb11e5 | ||
|
|
edaa5fe0d5 | ||
|
|
41d9dbbe6d | ||
|
|
6e0d866756 | ||
|
|
926aa61a4e | ||
|
|
9b4168bb4e | ||
|
|
ad612763ff | ||
|
|
293b59bac6 | ||
|
|
1809c4ed7b | ||
|
|
6e406958c1 | ||
|
|
074b7ec0bc |
5
.changeset/curly-dragons-design.md
Normal file
5
.changeset/curly-dragons-design.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": patch
|
||||||
|
---
|
||||||
|
|
||||||
|
improve findTasks algorithm for resolving tasks path
|
||||||
5
.changeset/eleven-news-check.md
Normal file
5
.changeset/eleven-news-check.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Fix update tool on MCP giving `No valid tasks found`
|
||||||
39
.changeset/four-cups-enter.md
Normal file
39
.changeset/four-cups-enter.md
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Enhanced add-task fuzzy search intelligence and improved user experience
|
||||||
|
|
||||||
|
**Smarter Task Discovery:**
|
||||||
|
|
||||||
|
- Remove hardcoded category system that always matched "Task management"
|
||||||
|
- Eliminate arbitrary limits on fuzzy search results (5→25 high relevance, 3→10 medium relevance, 8→20 detailed tasks)
|
||||||
|
- Improve semantic weighting in Fuse.js search (details=3, description=2, title=1.5) for better relevance
|
||||||
|
- Generate context-driven task recommendations based on true semantic similarity
|
||||||
|
|
||||||
|
**Enhanced Terminal Experience:**
|
||||||
|
|
||||||
|
- Fix duplicate banner display issue that was "eating" terminal history (closes #553)
|
||||||
|
- Remove console.clear() and redundant displayBanner() calls from UI functions
|
||||||
|
- Preserve command history for better development workflow
|
||||||
|
- Streamline banner display across all commands (list, next, show, set-status, clear-subtasks, dependency commands)
|
||||||
|
|
||||||
|
**Visual Improvements:**
|
||||||
|
|
||||||
|
- Replace emoji complexity indicators with clean filled circle characters (●) for professional appearance
|
||||||
|
- Improve consistency and readability of task complexity display
|
||||||
|
|
||||||
|
**AI Provider Compatibility:**
|
||||||
|
|
||||||
|
- Change generateObject mode from 'tool' to 'auto' for better cross-provider compatibility
|
||||||
|
- Add qwen3-235n-a22b:free model support (closes #687)
|
||||||
|
- Add smart warnings for free OpenRouter models with limitations (rate limits, restricted context, no tool_use)
|
||||||
|
|
||||||
|
**Technical Improvements:**
|
||||||
|
|
||||||
|
- Enhanced context generation in add-task to rely on semantic similarity rather than rigid pattern matching
|
||||||
|
- Improved dependency analysis and common pattern detection
|
||||||
|
- Better handling of task relationships and relevance scoring
|
||||||
|
- More intelligent task suggestion algorithms
|
||||||
|
|
||||||
|
The add-task system now provides truly relevant task context based on semantic understanding rather than arbitrary categories and limits, while maintaining a cleaner and more professional terminal experience.
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': minor
|
|
||||||
---
|
|
||||||
|
|
||||||
Add AWS bedrock support
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': minor
|
|
||||||
---
|
|
||||||
|
|
||||||
# Add Google Vertex AI Provider Integration
|
|
||||||
|
|
||||||
- Implemented `VertexAIProvider` class extending BaseAIProvider
|
|
||||||
- Added authentication and configuration handling for Vertex AI
|
|
||||||
- Updated configuration manager with Vertex-specific getters
|
|
||||||
- Modified AI services unified system to integrate the provider
|
|
||||||
- Added documentation for Vertex AI setup and configuration
|
|
||||||
- Updated environment variable examples for Vertex AI support
|
|
||||||
- Implemented specialized error handling for Vertex-specific issues
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': minor
|
|
||||||
---
|
|
||||||
|
|
||||||
Add support for Azure
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
"task-master-ai": minor
|
|
||||||
---
|
|
||||||
|
|
||||||
Increased minimum required node version to > 18 (was > 14)
|
|
||||||
7
.changeset/pink-houses-lay.md
Normal file
7
.changeset/pink-houses-lay.md
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Fix double .taskmaster directory paths in file resolution utilities
|
||||||
|
|
||||||
|
- Closes #636
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': minor
|
|
||||||
---
|
|
||||||
|
|
||||||
Renamed baseUrl to baseURL
|
|
||||||
5
.changeset/polite-areas-shave.md
Normal file
5
.changeset/polite-areas-shave.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Add one-click MCP server installation for Cursor
|
||||||
@@ -2,17 +2,10 @@
|
|||||||
"mode": "exit",
|
"mode": "exit",
|
||||||
"tag": "rc",
|
"tag": "rc",
|
||||||
"initialVersions": {
|
"initialVersions": {
|
||||||
"task-master-ai": "0.15.0"
|
"task-master-ai": "0.16.1"
|
||||||
},
|
},
|
||||||
"changesets": [
|
"changesets": [
|
||||||
"hungry-geese-work",
|
"pink-houses-lay",
|
||||||
"itchy-taxes-sip",
|
"polite-areas-shave"
|
||||||
"lemon-apes-sort",
|
|
||||||
"new-colts-flow",
|
|
||||||
"plain-bottles-stand",
|
|
||||||
"shaggy-rice-exist",
|
|
||||||
"sharp-flies-call",
|
|
||||||
"spotty-buttons-walk",
|
|
||||||
"tidy-seals-rule"
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': patch
|
|
||||||
---
|
|
||||||
|
|
||||||
Fix max_tokens error when trying to use claude-sonnet-4 and claude-opus-4
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': minor
|
|
||||||
---
|
|
||||||
|
|
||||||
Add TASK_MASTER_PROJECT_ROOT env variable supported in mcp.json and .env for project root resolution
|
|
||||||
|
|
||||||
- Some users were having issues where the MCP wasn't able to detect the location of their project root, you can now set the `TASK_MASTER_PROJECT_ROOT` environment variable to the root of your project.
|
|
||||||
@@ -1,31 +0,0 @@
|
|||||||
---
|
|
||||||
"task-master-ai": minor
|
|
||||||
---
|
|
||||||
|
|
||||||
Consolidate Task Master files into unified .taskmaster directory structure
|
|
||||||
|
|
||||||
This release introduces a new consolidated directory structure that organizes all Task Master files under a single `.taskmaster/` directory for better project organization and cleaner workspace management.
|
|
||||||
|
|
||||||
**New Directory Structure:**
|
|
||||||
|
|
||||||
- `.taskmaster/tasks/` - Task files (previously `tasks/`)
|
|
||||||
- `.taskmaster/docs/` - Documentation including PRD files (previously `scripts/`)
|
|
||||||
- `.taskmaster/reports/` - Complexity analysis reports (previously `scripts/`)
|
|
||||||
- `.taskmaster/templates/` - Template files like example PRD
|
|
||||||
- `.taskmaster/config.json` - Configuration (previously `.taskmasterconfig`)
|
|
||||||
|
|
||||||
**Migration & Backward Compatibility:**
|
|
||||||
|
|
||||||
- Existing projects continue to work with legacy file locations
|
|
||||||
- New projects use the consolidated structure automatically
|
|
||||||
- Run `task-master migrate` to move existing projects to the new structure
|
|
||||||
- All CLI commands and MCP tools automatically detect and use appropriate file locations
|
|
||||||
|
|
||||||
**Benefits:**
|
|
||||||
|
|
||||||
- Cleaner project root with Task Master files organized in one location
|
|
||||||
- Reduced file scatter across multiple directories
|
|
||||||
- Improved project navigation and maintenance
|
|
||||||
- Consistent file organization across all Task Master projects
|
|
||||||
|
|
||||||
This change maintains full backward compatibility while providing a migration path to the improved structure.
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': patch
|
|
||||||
---
|
|
||||||
|
|
||||||
Fix add-task MCP command causing an error
|
|
||||||
22
.changeset/vast-shrimps-happen.md
Normal file
22
.changeset/vast-shrimps-happen.md
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": minor
|
||||||
|
---
|
||||||
|
|
||||||
|
Add sync-readme command for a task export to GitHub README
|
||||||
|
|
||||||
|
Introduces a new `sync-readme` command that exports your task list to your project's README.md file.
|
||||||
|
|
||||||
|
**Features:**
|
||||||
|
|
||||||
|
- **Flexible filtering**: Supports `--status` filtering (e.g., pending, done) and `--with-subtasks` flag
|
||||||
|
- **Smart content management**: Automatically replaces existing exports or appends to new READMEs
|
||||||
|
- **Metadata display**: Shows export timestamp, subtask inclusion status, and filter settings
|
||||||
|
|
||||||
|
**Usage:**
|
||||||
|
|
||||||
|
- `task-master sync-readme` - Export tasks without subtasks
|
||||||
|
- `task-master sync-readme --with-subtasks` - Include subtasks in export
|
||||||
|
- `task-master sync-readme --status=pending` - Only export pending tasks
|
||||||
|
- `task-master sync-readme --status=done --with-subtasks` - Export completed tasks with subtasks
|
||||||
|
|
||||||
|
Perfect for showcasing project progress on GitHub. Experimental. Open to feedback.
|
||||||
@@ -45,7 +45,7 @@ This document provides a detailed reference for interacting with Taskmaster, cov
|
|||||||
* **Description:** `Parse a Product Requirements Document, PRD, or text file with Taskmaster to automatically generate an initial set of tasks in tasks.json.`
|
* **Description:** `Parse a Product Requirements Document, PRD, or text file with Taskmaster to automatically generate an initial set of tasks in tasks.json.`
|
||||||
* **Key Parameters/Options:**
|
* **Key Parameters/Options:**
|
||||||
* `input`: `Path to your PRD or requirements text file that Taskmaster should parse for tasks.` (CLI: `[file]` positional or `-i, --input <file>`)
|
* `input`: `Path to your PRD or requirements text file that Taskmaster should parse for tasks.` (CLI: `[file]` positional or `-i, --input <file>`)
|
||||||
* `output`: `Specify where Taskmaster should save the generated 'tasks.json' file. Defaults to 'tasks/tasks.json'.` (CLI: `-o, --output <file>`)
|
* `output`: `Specify where Taskmaster should save the generated 'tasks.json' file. Defaults to '.taskmaster/tasks/tasks.json'.` (CLI: `-o, --output <file>`)
|
||||||
* `numTasks`: `Approximate number of top-level tasks Taskmaster should aim to generate from the document.` (CLI: `-n, --num-tasks <number>`)
|
* `numTasks`: `Approximate number of top-level tasks Taskmaster should aim to generate from the document.` (CLI: `-n, --num-tasks <number>`)
|
||||||
* `force`: `Use this to allow Taskmaster to overwrite an existing 'tasks.json' without asking for confirmation.` (CLI: `-f, --force`)
|
* `force`: `Use this to allow Taskmaster to overwrite an existing 'tasks.json' without asking for confirmation.` (CLI: `-f, --force`)
|
||||||
* **Usage:** Useful for bootstrapping a project from an existing requirements document.
|
* **Usage:** Useful for bootstrapping a project from an existing requirements document.
|
||||||
@@ -395,7 +395,7 @@ Environment variables are used **only** for sensitive API keys related to AI pro
|
|||||||
* `AZURE_OPENAI_API_KEY` (Requires `AZURE_OPENAI_ENDPOINT` too)
|
* `AZURE_OPENAI_API_KEY` (Requires `AZURE_OPENAI_ENDPOINT` too)
|
||||||
* `OPENROUTER_API_KEY`
|
* `OPENROUTER_API_KEY`
|
||||||
* `XAI_API_KEY`
|
* `XAI_API_KEY`
|
||||||
* `OLLANA_API_KEY` (Requires `OLLAMA_BASE_URL` too)
|
* `OLLAMA_API_KEY` (Requires `OLLAMA_BASE_URL` too)
|
||||||
* **Endpoints (Optional/Provider Specific inside .taskmaster/config.json):**
|
* **Endpoints (Optional/Provider Specific inside .taskmaster/config.json):**
|
||||||
* `AZURE_OPENAI_ENDPOINT`
|
* `AZURE_OPENAI_ENDPOINT`
|
||||||
* `OLLAMA_BASE_URL` (Default: `http://localhost:11434/api`)
|
* `OLLAMA_BASE_URL` (Default: `http://localhost:11434/api`)
|
||||||
|
|||||||
@@ -20,13 +20,14 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"global": {
|
"global": {
|
||||||
|
"userId": "1234567890",
|
||||||
"logLevel": "info",
|
"logLevel": "info",
|
||||||
"debug": false,
|
"debug": false,
|
||||||
"defaultSubtasks": 5,
|
"defaultSubtasks": 5,
|
||||||
"defaultPriority": "medium",
|
"defaultPriority": "medium",
|
||||||
"projectName": "Taskmaster",
|
"projectName": "Taskmaster",
|
||||||
"ollamaBaseURL": "http://localhost:11434/api",
|
"ollamaBaseURL": "http://localhost:11434/api",
|
||||||
"userId": "1234567890",
|
"bedrockBaseURL": "https://bedrock.us-east-1.amazonaws.com",
|
||||||
"azureBaseURL": "https://your-endpoint.azure.com/"
|
"azureBaseURL": "https://your-endpoint.azure.com/"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# Task ID: 92
|
# Task ID: 92
|
||||||
# Title: Implement Project Root Environment Variable Support in MCP Configuration
|
# Title: Implement Project Root Environment Variable Support in MCP Configuration
|
||||||
# Status: in-progress
|
# Status: review
|
||||||
# Dependencies: 1, 3, 17
|
# Dependencies: 1, 3, 17
|
||||||
# Priority: medium
|
# Priority: medium
|
||||||
# Description: Add support for a 'TASK_MASTER_PROJECT_ROOT' environment variable in MCP configuration, allowing it to be set in both mcp.json and .env, with precedence over other methods. This will define the root directory for the MCP server and take precedence over all other project root resolution methods. The implementation should be backward compatible with existing workflows that don't use this variable.
|
# Description: Add support for a 'TASK_MASTER_PROJECT_ROOT' environment variable in MCP configuration, allowing it to be set in both mcp.json and .env, with precedence over other methods. This will define the root directory for the MCP server and take precedence over all other project root resolution methods. The implementation should be backward compatible with existing workflows that don't use this variable.
|
||||||
@@ -44,49 +44,49 @@ Implementation steps:
|
|||||||
- Test with invalid or non-existent directories to verify error handling
|
- Test with invalid or non-existent directories to verify error handling
|
||||||
|
|
||||||
# Subtasks:
|
# Subtasks:
|
||||||
## 92.1. Update configuration loader to check for TASK_MASTER_PROJECT_ROOT environment variable [pending]
|
## 1. Update configuration loader to check for TASK_MASTER_PROJECT_ROOT environment variable [pending]
|
||||||
### Dependencies: None
|
### Dependencies: None
|
||||||
### Description: Modify the configuration loading system to check for the TASK_MASTER_PROJECT_ROOT environment variable as the primary source for project root directory. Ensure proper error handling if the variable is set but points to a non-existent or inaccessible directory.
|
### Description: Modify the configuration loading system to check for the TASK_MASTER_PROJECT_ROOT environment variable as the primary source for project root directory. Ensure proper error handling if the variable is set but points to a non-existent or inaccessible directory.
|
||||||
### Details:
|
### Details:
|
||||||
|
|
||||||
|
|
||||||
## 92.2. Add support for 'projectRoot' in configuration files [pending]
|
## 2. Add support for 'projectRoot' in configuration files [pending]
|
||||||
### Dependencies: None
|
### Dependencies: None
|
||||||
### Description: Implement support for a 'projectRoot' key in mcp_config.toml and mcp.json configuration files as a fallback when the environment variable is not set. Update the configuration parser to recognize and validate this field.
|
### Description: Implement support for a 'projectRoot' key in mcp_config.toml and mcp.json configuration files as a fallback when the environment variable is not set. Update the configuration parser to recognize and validate this field.
|
||||||
### Details:
|
### Details:
|
||||||
|
|
||||||
|
|
||||||
## 92.3. Refactor project root resolution logic with clear precedence rules [pending]
|
## 3. Refactor project root resolution logic with clear precedence rules [pending]
|
||||||
### Dependencies: None
|
### Dependencies: None
|
||||||
### Description: Create a unified project root resolution function that follows the precedence order: 1) TASK_MASTER_PROJECT_ROOT environment variable, 2) 'projectRoot' in config files, 3) existing resolution methods. Ensure this function is used consistently throughout the codebase.
|
### Description: Create a unified project root resolution function that follows the precedence order: 1) TASK_MASTER_PROJECT_ROOT environment variable, 2) 'projectRoot' in config files, 3) existing resolution methods. Ensure this function is used consistently throughout the codebase.
|
||||||
### Details:
|
### Details:
|
||||||
|
|
||||||
|
|
||||||
## 92.4. Update all MCP tools to use the new project root resolution [pending]
|
## 4. Update all MCP tools to use the new project root resolution [pending]
|
||||||
### Dependencies: None
|
### Dependencies: None
|
||||||
### Description: Identify all MCP tools and components that need to access the project root and update them to use the new resolution logic. Ensure consistent behavior across all parts of the system.
|
### Description: Identify all MCP tools and components that need to access the project root and update them to use the new resolution logic. Ensure consistent behavior across all parts of the system.
|
||||||
### Details:
|
### Details:
|
||||||
|
|
||||||
|
|
||||||
## 92.5. Add comprehensive tests for the new project root resolution [pending]
|
## 5. Add comprehensive tests for the new project root resolution [pending]
|
||||||
### Dependencies: None
|
### Dependencies: None
|
||||||
### Description: Create unit and integration tests to verify the correct behavior of the project root resolution logic under various configurations and edge cases.
|
### Description: Create unit and integration tests to verify the correct behavior of the project root resolution logic under various configurations and edge cases.
|
||||||
### Details:
|
### Details:
|
||||||
|
|
||||||
|
|
||||||
## 92.6. Update documentation with new configuration options [pending]
|
## 6. Update documentation with new configuration options [pending]
|
||||||
### Dependencies: None
|
### Dependencies: None
|
||||||
### Description: Update the project documentation to clearly explain the new TASK_MASTER_PROJECT_ROOT environment variable, the 'projectRoot' configuration option, and the precedence rules. Include examples of different configuration scenarios.
|
### Description: Update the project documentation to clearly explain the new TASK_MASTER_PROJECT_ROOT environment variable, the 'projectRoot' configuration option, and the precedence rules. Include examples of different configuration scenarios.
|
||||||
### Details:
|
### Details:
|
||||||
|
|
||||||
|
|
||||||
## 92.7. Implement validation for project root directory [pending]
|
## 7. Implement validation for project root directory [pending]
|
||||||
### Dependencies: None
|
### Dependencies: None
|
||||||
### Description: Add validation to ensure the specified project root directory exists and has the necessary permissions. Provide clear error messages when validation fails.
|
### Description: Add validation to ensure the specified project root directory exists and has the necessary permissions. Provide clear error messages when validation fails.
|
||||||
### Details:
|
### Details:
|
||||||
|
|
||||||
|
|
||||||
## 92.8. Implement support for loading environment variables from .env files [pending]
|
## 8. Implement support for loading environment variables from .env files [pending]
|
||||||
### Dependencies: None
|
### Dependencies: None
|
||||||
### Description: Add functionality to load the TASK_MASTER_PROJECT_ROOT variable from .env files in the workspace, following best practices for environment variable management in MCP servers.
|
### Description: Add functionality to load the TASK_MASTER_PROJECT_ROOT variable from .env files in the workspace, following best practices for environment variable management in MCP servers.
|
||||||
### Details:
|
### Details:
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# Task ID: 95
|
# Task ID: 95
|
||||||
# Title: Implement .taskmaster Directory Structure
|
# Title: Implement .taskmaster Directory Structure
|
||||||
# Status: in-progress
|
# Status: done
|
||||||
# Dependencies: 1, 3, 4, 17
|
# Dependencies: 1, 3, 4, 17
|
||||||
# Priority: high
|
# Priority: high
|
||||||
# Description: Consolidate all Task Master-managed files in user projects into a clean, centralized .taskmaster/ directory structure to improve organization and keep user project directories clean, based on GitHub issue #275.
|
# Description: Consolidate all Task Master-managed files in user projects into a clean, centralized .taskmaster/ directory structure to improve organization and keep user project directories clean, based on GitHub issue #275.
|
||||||
@@ -105,7 +105,7 @@ Update Task Master's file handling code to use the new paths: tasks in .taskmast
|
|||||||
### Details:
|
### Details:
|
||||||
Update the task file generation system to create and read task files from .taskmaster/tasks/ instead of tasks/. Ensure all template paths are updated. Modify any path resolution logic specific to task file handling.
|
Update the task file generation system to create and read task files from .taskmaster/tasks/ instead of tasks/. Ensure all template paths are updated. Modify any path resolution logic specific to task file handling.
|
||||||
|
|
||||||
## 4. Implement backward compatibility logic [in-progress]
|
## 4. Implement backward compatibility logic [done]
|
||||||
### Dependencies: 95.2, 95.3
|
### Dependencies: 95.2, 95.3
|
||||||
### Description: Add fallback mechanisms to support both old and new file locations during transition
|
### Description: Add fallback mechanisms to support both old and new file locations during transition
|
||||||
### Details:
|
### Details:
|
||||||
@@ -141,7 +141,7 @@ Update README.md and other documentation to reflect the new .taskmaster structur
|
|||||||
### Details:
|
### Details:
|
||||||
Create functionality to support user-defined templates in .taskmaster/templates/. Allow users to store custom task templates, PRD templates, or other reusable files. Update Task Master commands to recognize and use templates from this directory when available.
|
Create functionality to support user-defined templates in .taskmaster/templates/. Allow users to store custom task templates, PRD templates, or other reusable files. Update Task Master commands to recognize and use templates from this directory when available.
|
||||||
|
|
||||||
## 10. Verify clean user project directories [in-progress]
|
## 10. Verify clean user project directories [done]
|
||||||
### Dependencies: 95.8, 95.9
|
### Dependencies: 95.8, 95.9
|
||||||
### Description: Ensure the new structure keeps user project root directories clean and organized
|
### Description: Ensure the new structure keeps user project root directories clean and organized
|
||||||
### Details:
|
### Details:
|
||||||
|
|||||||
37
.taskmaster/tasks/task_096.txt
Normal file
37
.taskmaster/tasks/task_096.txt
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
# Task ID: 96
|
||||||
|
# Title: Create Export Command for On-Demand Task File and PDF Generation
|
||||||
|
# Status: pending
|
||||||
|
# Dependencies: 2, 4, 95
|
||||||
|
# Priority: medium
|
||||||
|
# Description: Develop an 'export' CLI command that generates task files and comprehensive PDF exports on-demand, replacing automatic file generation and providing users with flexible export options.
|
||||||
|
# Details:
|
||||||
|
Implement a new 'export' command in the CLI that supports two primary modes: (1) generating individual task files on-demand (superseding the current automatic generation system), and (2) producing a comprehensive PDF export. The PDF should include: a first page with the output of 'tm list --with-subtasks', followed by individual pages for each task (using 'tm show <task_id>') and each subtask (using 'tm show <subtask_id>'). Integrate PDF generation using a robust library (e.g., pdfkit, Puppeteer, or jsPDF) to ensure high-quality output and proper pagination. Refactor or disable any existing automatic file generation logic to avoid performance overhead. Ensure the command supports flexible output paths and options for exporting only files, only PDF, or both. Update documentation and help output to reflect the new export capabilities. Consider concurrency and error handling for large projects. Ensure the export process is efficient and does not block the main CLI thread unnecessarily.
|
||||||
|
|
||||||
|
# Test Strategy:
|
||||||
|
1. Run the 'export' command with various options and verify that task files are generated only on-demand, not automatically. 2. Generate a PDF export and confirm that the first page contains the correct 'tm list --with-subtasks' output, and that each subsequent page accurately reflects the output of 'tm show <task_id>' and 'tm show <subtask_id>' for all tasks and subtasks. 3. Test exporting in projects with large numbers of tasks and subtasks to ensure performance and correctness. 4. Attempt exports with invalid paths or missing data to verify robust error handling. 5. Confirm that no automatic file generation occurs during normal task operations. 6. Review CLI help output and documentation for accuracy regarding the new export functionality.
|
||||||
|
|
||||||
|
# Subtasks:
|
||||||
|
## 1. Remove Automatic Task File Generation from Task Operations [pending]
|
||||||
|
### Dependencies: None
|
||||||
|
### Description: Eliminate all calls to generateTaskFiles() from task operations such as add-task, remove-task, set-status, and similar commands to prevent unnecessary performance overhead.
|
||||||
|
### Details:
|
||||||
|
Audit the codebase for any automatic invocations of generateTaskFiles() and remove or refactor them to ensure task files are not generated automatically during task operations.
|
||||||
|
|
||||||
|
## 2. Implement Export Command Infrastructure with On-Demand Task File Generation [pending]
|
||||||
|
### Dependencies: 96.1
|
||||||
|
### Description: Develop the CLI 'export' command infrastructure, enabling users to generate task files on-demand by invoking the preserved generateTaskFiles function only when requested.
|
||||||
|
### Details:
|
||||||
|
Create the export command with options for output paths and modes (files, PDF, or both). Ensure generateTaskFiles is only called within this command and not elsewhere.
|
||||||
|
|
||||||
|
## 3. Implement Comprehensive PDF Export Functionality [pending]
|
||||||
|
### Dependencies: 96.2
|
||||||
|
### Description: Add PDF export capability to the export command, generating a structured PDF with a first page listing all tasks and subtasks, followed by individual pages for each task and subtask, using a robust PDF library.
|
||||||
|
### Details:
|
||||||
|
Integrate a PDF generation library (e.g., pdfkit, Puppeteer, or jsPDF). Ensure the PDF includes the output of 'tm list --with-subtasks' on the first page, and uses 'tm show <task_id>' and 'tm show <subtask_id>' for subsequent pages. Handle pagination, concurrency, and error handling for large projects.
|
||||||
|
|
||||||
|
## 4. Update Documentation, Tests, and CLI Help for Export Workflow [pending]
|
||||||
|
### Dependencies: 96.2, 96.3
|
||||||
|
### Description: Revise all relevant documentation, automated tests, and CLI help output to reflect the new export-based workflow and available options.
|
||||||
|
### Details:
|
||||||
|
Update user guides, README files, and CLI help text. Add or modify tests to cover the new export command and its options. Ensure all documentation accurately describes the new workflow and usage.
|
||||||
|
|
||||||
@@ -5467,6 +5467,70 @@
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"id": 92,
|
||||||
|
"title": "Implement Project Root Environment Variable Support in MCP Configuration",
|
||||||
|
"description": "Add support for a 'TASK_MASTER_PROJECT_ROOT' environment variable in MCP configuration, allowing it to be set in both mcp.json and .env, with precedence over other methods. This will define the root directory for the MCP server and take precedence over all other project root resolution methods. The implementation should be backward compatible with existing workflows that don't use this variable.",
|
||||||
|
"status": "review",
|
||||||
|
"dependencies": [
|
||||||
|
1,
|
||||||
|
3,
|
||||||
|
17
|
||||||
|
],
|
||||||
|
"priority": "medium",
|
||||||
|
"details": "Update the MCP server configuration system to support the TASK_MASTER_PROJECT_ROOT environment variable as the standard way to specify the project root directory. This provides better namespacing and avoids conflicts with other tools that might use a generic PROJECT_ROOT variable. Implement a clear precedence order for project root resolution:\n\n1. TASK_MASTER_PROJECT_ROOT environment variable (from shell or .env file)\n2. 'projectRoot' key in mcp_config.toml or mcp.json configuration files\n3. Existing resolution logic (CLI args, current working directory, etc.)\n\nModify the configuration loading logic to check for these sources in the specified order, ensuring backward compatibility. All MCP tools and components should use this standardized project root resolution logic. The TASK_MASTER_PROJECT_ROOT environment variable will be required because path resolution is delegated to the MCP client implementation, ensuring consistent behavior across different environments.\n\nImplementation steps:\n1. Identify all code locations where project root is determined (initialization, utility functions)\n2. Update configuration loaders to check for TASK_MASTER_PROJECT_ROOT in environment variables\n3. Add support for 'projectRoot' in configuration files as a fallback\n4. Refactor project root resolution logic to follow the new precedence rules\n5. Ensure all MCP tools and functions use the updated resolution logic\n6. Add comprehensive error handling for cases where TASK_MASTER_PROJECT_ROOT is not set or invalid\n7. Implement validation to ensure the specified directory exists and is accessible",
|
||||||
|
"testStrategy": "1. Write unit tests to verify that the config loader correctly reads project root from environment variables and configuration files with the expected precedence:\n - Test TASK_MASTER_PROJECT_ROOT environment variable takes precedence when set\n - Test 'projectRoot' in configuration files is used when environment variable is absent\n - Test fallback to existing resolution logic when neither is specified\n\n2. Add integration tests to ensure that the MCP server and all tools use the correct project root:\n - Test server startup with TASK_MASTER_PROJECT_ROOT set to various valid and invalid paths\n - Test configuration file loading from the specified project root\n - Test path resolution for resources relative to the project root\n\n3. Test backward compatibility:\n - Verify existing workflows function correctly without the new variables\n - Ensure no regression in projects not using the new configuration options\n\n4. Manual testing:\n - Set TASK_MASTER_PROJECT_ROOT in shell environment and verify correct behavior\n - Set TASK_MASTER_PROJECT_ROOT in .env file and verify it's properly loaded\n - Configure 'projectRoot' in configuration files and test precedence\n - Test with invalid or non-existent directories to verify error handling",
|
||||||
|
"subtasks": [
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"title": "Update configuration loader to check for TASK_MASTER_PROJECT_ROOT environment variable",
|
||||||
|
"description": "Modify the configuration loading system to check for the TASK_MASTER_PROJECT_ROOT environment variable as the primary source for project root directory. Ensure proper error handling if the variable is set but points to a non-existent or inaccessible directory.",
|
||||||
|
"status": "pending"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2,
|
||||||
|
"title": "Add support for 'projectRoot' in configuration files",
|
||||||
|
"description": "Implement support for a 'projectRoot' key in mcp_config.toml and mcp.json configuration files as a fallback when the environment variable is not set. Update the configuration parser to recognize and validate this field.",
|
||||||
|
"status": "pending"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 3,
|
||||||
|
"title": "Refactor project root resolution logic with clear precedence rules",
|
||||||
|
"description": "Create a unified project root resolution function that follows the precedence order: 1) TASK_MASTER_PROJECT_ROOT environment variable, 2) 'projectRoot' in config files, 3) existing resolution methods. Ensure this function is used consistently throughout the codebase.",
|
||||||
|
"status": "pending"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 4,
|
||||||
|
"title": "Update all MCP tools to use the new project root resolution",
|
||||||
|
"description": "Identify all MCP tools and components that need to access the project root and update them to use the new resolution logic. Ensure consistent behavior across all parts of the system.",
|
||||||
|
"status": "pending"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 5,
|
||||||
|
"title": "Add comprehensive tests for the new project root resolution",
|
||||||
|
"description": "Create unit and integration tests to verify the correct behavior of the project root resolution logic under various configurations and edge cases.",
|
||||||
|
"status": "pending"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 6,
|
||||||
|
"title": "Update documentation with new configuration options",
|
||||||
|
"description": "Update the project documentation to clearly explain the new TASK_MASTER_PROJECT_ROOT environment variable, the 'projectRoot' configuration option, and the precedence rules. Include examples of different configuration scenarios.",
|
||||||
|
"status": "pending"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 7,
|
||||||
|
"title": "Implement validation for project root directory",
|
||||||
|
"description": "Add validation to ensure the specified project root directory exists and has the necessary permissions. Provide clear error messages when validation fails.",
|
||||||
|
"status": "pending"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 8,
|
||||||
|
"title": "Implement support for loading environment variables from .env files",
|
||||||
|
"description": "Add functionality to load the TASK_MASTER_PROJECT_ROOT variable from .env files in the workspace, following best practices for environment variable management in MCP servers.",
|
||||||
|
"status": "pending"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"id": 93,
|
"id": 93,
|
||||||
"title": "Implement Google Vertex AI Provider Integration",
|
"title": "Implement Google Vertex AI Provider Integration",
|
||||||
@@ -5613,75 +5677,11 @@
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"id": 92,
|
|
||||||
"title": "Implement Project Root Environment Variable Support in MCP Configuration",
|
|
||||||
"description": "Add support for a 'TASK_MASTER_PROJECT_ROOT' environment variable in MCP configuration, allowing it to be set in both mcp.json and .env, with precedence over other methods. This will define the root directory for the MCP server and take precedence over all other project root resolution methods. The implementation should be backward compatible with existing workflows that don't use this variable.",
|
|
||||||
"status": "in-progress",
|
|
||||||
"dependencies": [
|
|
||||||
1,
|
|
||||||
3,
|
|
||||||
17
|
|
||||||
],
|
|
||||||
"priority": "medium",
|
|
||||||
"details": "Update the MCP server configuration system to support the TASK_MASTER_PROJECT_ROOT environment variable as the standard way to specify the project root directory. This provides better namespacing and avoids conflicts with other tools that might use a generic PROJECT_ROOT variable. Implement a clear precedence order for project root resolution:\n\n1. TASK_MASTER_PROJECT_ROOT environment variable (from shell or .env file)\n2. 'projectRoot' key in mcp_config.toml or mcp.json configuration files\n3. Existing resolution logic (CLI args, current working directory, etc.)\n\nModify the configuration loading logic to check for these sources in the specified order, ensuring backward compatibility. All MCP tools and components should use this standardized project root resolution logic. The TASK_MASTER_PROJECT_ROOT environment variable will be required because path resolution is delegated to the MCP client implementation, ensuring consistent behavior across different environments.\n\nImplementation steps:\n1. Identify all code locations where project root is determined (initialization, utility functions)\n2. Update configuration loaders to check for TASK_MASTER_PROJECT_ROOT in environment variables\n3. Add support for 'projectRoot' in configuration files as a fallback\n4. Refactor project root resolution logic to follow the new precedence rules\n5. Ensure all MCP tools and functions use the updated resolution logic\n6. Add comprehensive error handling for cases where TASK_MASTER_PROJECT_ROOT is not set or invalid\n7. Implement validation to ensure the specified directory exists and is accessible",
|
|
||||||
"testStrategy": "1. Write unit tests to verify that the config loader correctly reads project root from environment variables and configuration files with the expected precedence:\n - Test TASK_MASTER_PROJECT_ROOT environment variable takes precedence when set\n - Test 'projectRoot' in configuration files is used when environment variable is absent\n - Test fallback to existing resolution logic when neither is specified\n\n2. Add integration tests to ensure that the MCP server and all tools use the correct project root:\n - Test server startup with TASK_MASTER_PROJECT_ROOT set to various valid and invalid paths\n - Test configuration file loading from the specified project root\n - Test path resolution for resources relative to the project root\n\n3. Test backward compatibility:\n - Verify existing workflows function correctly without the new variables\n - Ensure no regression in projects not using the new configuration options\n\n4. Manual testing:\n - Set TASK_MASTER_PROJECT_ROOT in shell environment and verify correct behavior\n - Set TASK_MASTER_PROJECT_ROOT in .env file and verify it's properly loaded\n - Configure 'projectRoot' in configuration files and test precedence\n - Test with invalid or non-existent directories to verify error handling",
|
|
||||||
"subtasks": [
|
|
||||||
{
|
|
||||||
"id": 92.1,
|
|
||||||
"title": "Update configuration loader to check for TASK_MASTER_PROJECT_ROOT environment variable",
|
|
||||||
"description": "Modify the configuration loading system to check for the TASK_MASTER_PROJECT_ROOT environment variable as the primary source for project root directory. Ensure proper error handling if the variable is set but points to a non-existent or inaccessible directory.",
|
|
||||||
"status": "pending"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": 92.2,
|
|
||||||
"title": "Add support for 'projectRoot' in configuration files",
|
|
||||||
"description": "Implement support for a 'projectRoot' key in mcp_config.toml and mcp.json configuration files as a fallback when the environment variable is not set. Update the configuration parser to recognize and validate this field.",
|
|
||||||
"status": "pending"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": 92.3,
|
|
||||||
"title": "Refactor project root resolution logic with clear precedence rules",
|
|
||||||
"description": "Create a unified project root resolution function that follows the precedence order: 1) TASK_MASTER_PROJECT_ROOT environment variable, 2) 'projectRoot' in config files, 3) existing resolution methods. Ensure this function is used consistently throughout the codebase.",
|
|
||||||
"status": "pending"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": 92.4,
|
|
||||||
"title": "Update all MCP tools to use the new project root resolution",
|
|
||||||
"description": "Identify all MCP tools and components that need to access the project root and update them to use the new resolution logic. Ensure consistent behavior across all parts of the system.",
|
|
||||||
"status": "pending"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": 92.5,
|
|
||||||
"title": "Add comprehensive tests for the new project root resolution",
|
|
||||||
"description": "Create unit and integration tests to verify the correct behavior of the project root resolution logic under various configurations and edge cases.",
|
|
||||||
"status": "pending"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": 92.6,
|
|
||||||
"title": "Update documentation with new configuration options",
|
|
||||||
"description": "Update the project documentation to clearly explain the new TASK_MASTER_PROJECT_ROOT environment variable, the 'projectRoot' configuration option, and the precedence rules. Include examples of different configuration scenarios.",
|
|
||||||
"status": "pending"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": 92.7,
|
|
||||||
"title": "Implement validation for project root directory",
|
|
||||||
"description": "Add validation to ensure the specified project root directory exists and has the necessary permissions. Provide clear error messages when validation fails.",
|
|
||||||
"status": "pending"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": 92.8,
|
|
||||||
"title": "Implement support for loading environment variables from .env files",
|
|
||||||
"description": "Add functionality to load the TASK_MASTER_PROJECT_ROOT variable from .env files in the workspace, following best practices for environment variable management in MCP servers.",
|
|
||||||
"status": "pending"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"id": 95,
|
"id": 95,
|
||||||
"title": "Implement .taskmaster Directory Structure",
|
"title": "Implement .taskmaster Directory Structure",
|
||||||
"description": "Consolidate all Task Master-managed files in user projects into a clean, centralized .taskmaster/ directory structure to improve organization and keep user project directories clean, based on GitHub issue #275.",
|
"description": "Consolidate all Task Master-managed files in user projects into a clean, centralized .taskmaster/ directory structure to improve organization and keep user project directories clean, based on GitHub issue #275.",
|
||||||
"status": "in-progress",
|
"status": "done",
|
||||||
"dependencies": [
|
"dependencies": [
|
||||||
1,
|
1,
|
||||||
3,
|
3,
|
||||||
@@ -5732,7 +5732,7 @@
|
|||||||
3
|
3
|
||||||
],
|
],
|
||||||
"details": "Implement path fallback logic that checks both old and new locations when files aren't found. Add deprecation warnings when old paths are used, informing users about the new structure. Ensure error messages are clear about the transition.",
|
"details": "Implement path fallback logic that checks both old and new locations when files aren't found. Add deprecation warnings when old paths are used, informing users about the new structure. Ensure error messages are clear about the transition.",
|
||||||
"status": "in-progress",
|
"status": "done",
|
||||||
"testStrategy": "Test with both old and new directory structures to verify fallback works correctly. Verify deprecation warnings appear when using old paths."
|
"testStrategy": "Test with both old and new directory structures to verify fallback works correctly. Verify deprecation warnings appear when using old paths."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -5804,10 +5804,73 @@
|
|||||||
9
|
9
|
||||||
],
|
],
|
||||||
"details": "Validate that after implementing the new structure, user project root directories only contain their actual project files plus the single .taskmaster/ directory. Verify that no Task Master files are created outside of .taskmaster/. Test that users can easily add .taskmaster/ to .gitignore if they choose to exclude Task Master files from version control.",
|
"details": "Validate that after implementing the new structure, user project root directories only contain their actual project files plus the single .taskmaster/ directory. Verify that no Task Master files are created outside of .taskmaster/. Test that users can easily add .taskmaster/ to .gitignore if they choose to exclude Task Master files from version control.",
|
||||||
"status": "in-progress",
|
"status": "done",
|
||||||
"testStrategy": "Test complete workflows and verify only .taskmaster/ directory is created in project root. Check that all Task Master operations respect the new file organization. Verify .gitignore compatibility."
|
"testStrategy": "Test complete workflows and verify only .taskmaster/ directory is created in project root. Check that all Task Master operations respect the new file organization. Verify .gitignore compatibility."
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 96,
|
||||||
|
"title": "Create Export Command for On-Demand Task File and PDF Generation",
|
||||||
|
"description": "Develop an 'export' CLI command that generates task files and comprehensive PDF exports on-demand, replacing automatic file generation and providing users with flexible export options.",
|
||||||
|
"details": "Implement a new 'export' command in the CLI that supports two primary modes: (1) generating individual task files on-demand (superseding the current automatic generation system), and (2) producing a comprehensive PDF export. The PDF should include: a first page with the output of 'tm list --with-subtasks', followed by individual pages for each task (using 'tm show <task_id>') and each subtask (using 'tm show <subtask_id>'). Integrate PDF generation using a robust library (e.g., pdfkit, Puppeteer, or jsPDF) to ensure high-quality output and proper pagination. Refactor or disable any existing automatic file generation logic to avoid performance overhead. Ensure the command supports flexible output paths and options for exporting only files, only PDF, or both. Update documentation and help output to reflect the new export capabilities. Consider concurrency and error handling for large projects. Ensure the export process is efficient and does not block the main CLI thread unnecessarily.",
|
||||||
|
"testStrategy": "1. Run the 'export' command with various options and verify that task files are generated only on-demand, not automatically. 2. Generate a PDF export and confirm that the first page contains the correct 'tm list --with-subtasks' output, and that each subsequent page accurately reflects the output of 'tm show <task_id>' and 'tm show <subtask_id>' for all tasks and subtasks. 3. Test exporting in projects with large numbers of tasks and subtasks to ensure performance and correctness. 4. Attempt exports with invalid paths or missing data to verify robust error handling. 5. Confirm that no automatic file generation occurs during normal task operations. 6. Review CLI help output and documentation for accuracy regarding the new export functionality.",
|
||||||
|
"status": "pending",
|
||||||
|
"dependencies": [
|
||||||
|
2,
|
||||||
|
4,
|
||||||
|
95
|
||||||
|
],
|
||||||
|
"priority": "medium",
|
||||||
|
"subtasks": [
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"title": "Remove Automatic Task File Generation from Task Operations",
|
||||||
|
"description": "Eliminate all calls to generateTaskFiles() from task operations such as add-task, remove-task, set-status, and similar commands to prevent unnecessary performance overhead.",
|
||||||
|
"dependencies": [],
|
||||||
|
"details": "Audit the codebase for any automatic invocations of generateTaskFiles() and remove or refactor them to ensure task files are not generated automatically during task operations.",
|
||||||
|
"status": "pending",
|
||||||
|
"testStrategy": "Verify that no task file generation occurs during any task operation by running the CLI and monitoring file system changes.",
|
||||||
|
"parentTaskId": 96
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2,
|
||||||
|
"title": "Implement Export Command Infrastructure with On-Demand Task File Generation",
|
||||||
|
"description": "Develop the CLI 'export' command infrastructure, enabling users to generate task files on-demand by invoking the preserved generateTaskFiles function only when requested.",
|
||||||
|
"dependencies": [
|
||||||
|
1
|
||||||
|
],
|
||||||
|
"details": "Create the export command with options for output paths and modes (files, PDF, or both). Ensure generateTaskFiles is only called within this command and not elsewhere.",
|
||||||
|
"status": "pending",
|
||||||
|
"testStrategy": "Test the export command to confirm task files are generated only when explicitly requested and that output paths and options function as intended.",
|
||||||
|
"parentTaskId": 96
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 3,
|
||||||
|
"title": "Implement Comprehensive PDF Export Functionality",
|
||||||
|
"description": "Add PDF export capability to the export command, generating a structured PDF with a first page listing all tasks and subtasks, followed by individual pages for each task and subtask, using a robust PDF library.",
|
||||||
|
"dependencies": [
|
||||||
|
2
|
||||||
|
],
|
||||||
|
"details": "Integrate a PDF generation library (e.g., pdfkit, Puppeteer, or jsPDF). Ensure the PDF includes the output of 'tm list --with-subtasks' on the first page, and uses 'tm show <task_id>' and 'tm show <subtask_id>' for subsequent pages. Handle pagination, concurrency, and error handling for large projects.",
|
||||||
|
"status": "pending",
|
||||||
|
"testStrategy": "Generate PDFs for projects of varying sizes and verify layout, content accuracy, and performance. Test error handling and concurrency under load.",
|
||||||
|
"parentTaskId": 96
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 4,
|
||||||
|
"title": "Update Documentation, Tests, and CLI Help for Export Workflow",
|
||||||
|
"description": "Revise all relevant documentation, automated tests, and CLI help output to reflect the new export-based workflow and available options.",
|
||||||
|
"dependencies": [
|
||||||
|
2,
|
||||||
|
3
|
||||||
|
],
|
||||||
|
"details": "Update user guides, README files, and CLI help text. Add or modify tests to cover the new export command and its options. Ensure all documentation accurately describes the new workflow and usage.",
|
||||||
|
"status": "pending",
|
||||||
|
"testStrategy": "Review documentation for completeness and accuracy. Run all tests to ensure coverage of the new export command and verify CLI help output.",
|
||||||
|
"parentTaskId": 96
|
||||||
|
}
|
||||||
|
]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
{
|
|
||||||
"models": {
|
|
||||||
"main": {
|
|
||||||
"provider": "anthropic",
|
|
||||||
"modelId": "claude-sonnet-4-20250514",
|
|
||||||
"maxTokens": 50000,
|
|
||||||
"temperature": 0.2
|
|
||||||
},
|
|
||||||
"research": {
|
|
||||||
"provider": "perplexity",
|
|
||||||
"modelId": "sonar-pro",
|
|
||||||
"maxTokens": 8700,
|
|
||||||
"temperature": 0.1
|
|
||||||
},
|
|
||||||
"fallback": {
|
|
||||||
"provider": "anthropic",
|
|
||||||
"modelId": "claude-3-7-sonnet-20250219",
|
|
||||||
"maxTokens": 128000,
|
|
||||||
"temperature": 0.2
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"global": {
|
|
||||||
"logLevel": "info",
|
|
||||||
"debug": false,
|
|
||||||
"defaultSubtasks": 5,
|
|
||||||
"defaultPriority": "medium",
|
|
||||||
"projectName": "Taskmaster",
|
|
||||||
"ollamaBaseURL": "http://localhost:11434/api",
|
|
||||||
"userId": "1234567890",
|
|
||||||
"azureBaseURL": "https://your-endpoint.azure.com/"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
84
CHANGELOG.md
84
CHANGELOG.md
@@ -1,5 +1,89 @@
|
|||||||
# task-master-ai
|
# task-master-ai
|
||||||
|
|
||||||
|
## 0.16.2-rc.0
|
||||||
|
|
||||||
|
### Patch Changes
|
||||||
|
|
||||||
|
- [#655](https://github.com/eyaltoledano/claude-task-master/pull/655) [`edaa5fe`](https://github.com/eyaltoledano/claude-task-master/commit/edaa5fe0d56e0e4e7c4370670a7a388eebd922ac) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix double .taskmaster directory paths in file resolution utilities
|
||||||
|
|
||||||
|
- Closes #636
|
||||||
|
|
||||||
|
- [#671](https://github.com/eyaltoledano/claude-task-master/pull/671) [`86ea6d1`](https://github.com/eyaltoledano/claude-task-master/commit/86ea6d1dbc03eeb39f524f565b50b7017b1d2c9c) Thanks [@joedanz](https://github.com/joedanz)! - Add one-click MCP server installation for Cursor
|
||||||
|
|
||||||
|
## 0.16.1
|
||||||
|
|
||||||
|
### Patch Changes
|
||||||
|
|
||||||
|
- [#641](https://github.com/eyaltoledano/claude-task-master/pull/641) [`ad61276`](https://github.com/eyaltoledano/claude-task-master/commit/ad612763ffbdd35aa1b593c9613edc1dc27a8856) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix bedrock issues
|
||||||
|
|
||||||
|
- [#648](https://github.com/eyaltoledano/claude-task-master/pull/648) [`9b4168b`](https://github.com/eyaltoledano/claude-task-master/commit/9b4168bb4e4dfc2f4fb0cf6bd5f81a8565879176) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix MCP tool calls logging errors
|
||||||
|
|
||||||
|
- [#641](https://github.com/eyaltoledano/claude-task-master/pull/641) [`ad61276`](https://github.com/eyaltoledano/claude-task-master/commit/ad612763ffbdd35aa1b593c9613edc1dc27a8856) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Update rules for new directory structure
|
||||||
|
|
||||||
|
- [#648](https://github.com/eyaltoledano/claude-task-master/pull/648) [`9b4168b`](https://github.com/eyaltoledano/claude-task-master/commit/9b4168bb4e4dfc2f4fb0cf6bd5f81a8565879176) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix bug in expand_all mcp tool
|
||||||
|
|
||||||
|
- [#641](https://github.com/eyaltoledano/claude-task-master/pull/641) [`ad61276`](https://github.com/eyaltoledano/claude-task-master/commit/ad612763ffbdd35aa1b593c9613edc1dc27a8856) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix MCP crashing after certain commands due to console logs
|
||||||
|
|
||||||
|
## 0.16.0
|
||||||
|
|
||||||
|
### Minor Changes
|
||||||
|
|
||||||
|
- [#607](https://github.com/eyaltoledano/claude-task-master/pull/607) [`6a8a68e`](https://github.com/eyaltoledano/claude-task-master/commit/6a8a68e1a3f34dcdf40b355b4602a08d291f8e38) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Add AWS bedrock support
|
||||||
|
|
||||||
|
- [#607](https://github.com/eyaltoledano/claude-task-master/pull/607) [`6a8a68e`](https://github.com/eyaltoledano/claude-task-master/commit/6a8a68e1a3f34dcdf40b355b4602a08d291f8e38) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - # Add Google Vertex AI Provider Integration
|
||||||
|
|
||||||
|
- Implemented `VertexAIProvider` class extending BaseAIProvider
|
||||||
|
- Added authentication and configuration handling for Vertex AI
|
||||||
|
- Updated configuration manager with Vertex-specific getters
|
||||||
|
- Modified AI services unified system to integrate the provider
|
||||||
|
- Added documentation for Vertex AI setup and configuration
|
||||||
|
- Updated environment variable examples for Vertex AI support
|
||||||
|
- Implemented specialized error handling for Vertex-specific issues
|
||||||
|
|
||||||
|
- [#607](https://github.com/eyaltoledano/claude-task-master/pull/607) [`6a8a68e`](https://github.com/eyaltoledano/claude-task-master/commit/6a8a68e1a3f34dcdf40b355b4602a08d291f8e38) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Add support for Azure
|
||||||
|
|
||||||
|
- [#612](https://github.com/eyaltoledano/claude-task-master/pull/612) [`669b744`](https://github.com/eyaltoledano/claude-task-master/commit/669b744ced454116a7b29de6c58b4b8da977186a) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Increased minimum required node version to > 18 (was > 14)
|
||||||
|
|
||||||
|
- [#607](https://github.com/eyaltoledano/claude-task-master/pull/607) [`6a8a68e`](https://github.com/eyaltoledano/claude-task-master/commit/6a8a68e1a3f34dcdf40b355b4602a08d291f8e38) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Renamed baseUrl to baseURL
|
||||||
|
|
||||||
|
- [#604](https://github.com/eyaltoledano/claude-task-master/pull/604) [`80735f9`](https://github.com/eyaltoledano/claude-task-master/commit/80735f9e60c7dda7207e169697f8ac07b6733634) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Add TASK_MASTER_PROJECT_ROOT env variable supported in mcp.json and .env for project root resolution
|
||||||
|
|
||||||
|
- Some users were having issues where the MCP wasn't able to detect the location of their project root, you can now set the `TASK_MASTER_PROJECT_ROOT` environment variable to the root of your project.
|
||||||
|
|
||||||
|
- [#619](https://github.com/eyaltoledano/claude-task-master/pull/619) [`3f64202`](https://github.com/eyaltoledano/claude-task-master/commit/3f64202c9feef83f2bf383c79e4367d337c37e20) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Consolidate Task Master files into unified .taskmaster directory structure
|
||||||
|
|
||||||
|
This release introduces a new consolidated directory structure that organizes all Task Master files under a single `.taskmaster/` directory for better project organization and cleaner workspace management.
|
||||||
|
|
||||||
|
**New Directory Structure:**
|
||||||
|
|
||||||
|
- `.taskmaster/tasks/` - Task files (previously `tasks/`)
|
||||||
|
- `.taskmaster/docs/` - Documentation including PRD files (previously `scripts/`)
|
||||||
|
- `.taskmaster/reports/` - Complexity analysis reports (previously `scripts/`)
|
||||||
|
- `.taskmaster/templates/` - Template files like example PRD
|
||||||
|
- `.taskmaster/config.json` - Configuration (previously `.taskmasterconfig`)
|
||||||
|
|
||||||
|
**Migration & Backward Compatibility:**
|
||||||
|
|
||||||
|
- Existing projects continue to work with legacy file locations
|
||||||
|
- New projects use the consolidated structure automatically
|
||||||
|
- Run `task-master migrate` to move existing projects to the new structure
|
||||||
|
- All CLI commands and MCP tools automatically detect and use appropriate file locations
|
||||||
|
|
||||||
|
**Benefits:**
|
||||||
|
|
||||||
|
- Cleaner project root with Task Master files organized in one location
|
||||||
|
- Reduced file scatter across multiple directories
|
||||||
|
- Improved project navigation and maintenance
|
||||||
|
- Consistent file organization across all Task Master projects
|
||||||
|
|
||||||
|
This change maintains full backward compatibility while providing a migration path to the improved structure.
|
||||||
|
|
||||||
|
### Patch Changes
|
||||||
|
|
||||||
|
- [#607](https://github.com/eyaltoledano/claude-task-master/pull/607) [`6a8a68e`](https://github.com/eyaltoledano/claude-task-master/commit/6a8a68e1a3f34dcdf40b355b4602a08d291f8e38) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix max_tokens error when trying to use claude-sonnet-4 and claude-opus-4
|
||||||
|
|
||||||
|
- [#625](https://github.com/eyaltoledano/claude-task-master/pull/625) [`2d520de`](https://github.com/eyaltoledano/claude-task-master/commit/2d520de2694da3efe537b475ca52baf3c869edda) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix add-task MCP command causing an error
|
||||||
|
|
||||||
## 0.16.0-rc.0
|
## 0.16.0-rc.0
|
||||||
|
|
||||||
### Minor Changes
|
### Minor Changes
|
||||||
|
|||||||
19
README.md
19
README.md
@@ -2,10 +2,13 @@
|
|||||||
|
|
||||||
[](https://github.com/eyaltoledano/claude-task-master/actions/workflows/ci.yml) [](https://badge.fury.io/js/task-master-ai) [](https://discord.gg/taskmasterai) [](LICENSE)
|
[](https://github.com/eyaltoledano/claude-task-master/actions/workflows/ci.yml) [](https://badge.fury.io/js/task-master-ai) [](https://discord.gg/taskmasterai) [](LICENSE)
|
||||||
|
|
||||||
### By [@eyaltoledano](https://x.com/eyaltoledano) & [@RalphEcom](https://x.com/RalphEcom)
|
[](https://www.npmjs.com/package/task-master-ai) [](https://www.npmjs.com/package/task-master-ai) [](https://www.npmjs.com/package/task-master-ai)
|
||||||
|
|
||||||
|
### By [@eyaltoledano](https://x.com/eyaltoledano), [@RalphEcom](https://x.com/RalphEcom) & [@jasonzhou1993](https://x.com/jasonzhou1993)
|
||||||
|
|
||||||
[](https://x.com/eyaltoledano)
|
[](https://x.com/eyaltoledano)
|
||||||
[](https://x.com/RalphEcom)
|
[](https://x.com/RalphEcom)
|
||||||
|
[](https://x.com/jasonzhou1993)
|
||||||
|
|
||||||
A task management system for AI-driven development with Claude, designed to work seamlessly with Cursor AI.
|
A task management system for AI-driven development with Claude, designed to work seamlessly with Cursor AI.
|
||||||
|
|
||||||
@@ -39,9 +42,17 @@ MCP (Model Control Protocol) lets you run Task Master directly from your editor.
|
|||||||
| **Cursor** | Global | `~/.cursor/mcp.json` | `%USERPROFILE%\.cursor\mcp.json` | `mcpServers` |
|
| **Cursor** | Global | `~/.cursor/mcp.json` | `%USERPROFILE%\.cursor\mcp.json` | `mcpServers` |
|
||||||
| | Project | `<project_folder>/.cursor/mcp.json` | `<project_folder>\.cursor\mcp.json` | `mcpServers` |
|
| | Project | `<project_folder>/.cursor/mcp.json` | `<project_folder>\.cursor\mcp.json` | `mcpServers` |
|
||||||
| **Windsurf** | Global | `~/.codeium/windsurf/mcp_config.json` | `%USERPROFILE%\.codeium\windsurf\mcp_config.json` | `mcpServers` |
|
| **Windsurf** | Global | `~/.codeium/windsurf/mcp_config.json` | `%USERPROFILE%\.codeium\windsurf\mcp_config.json` | `mcpServers` |
|
||||||
| **VS Code** | Project | `<project_folder>/.vscode/mcp.json` | `<project_folder>\.vscode\mcp.json` | `servers` |
|
| **VS Code** | Project | `<project_folder>/.vscode/mcp.json` | `<project_folder>\.vscode\mcp.json` | `servers` |
|
||||||
|
|
||||||
##### Cursor & Windsurf (`mcpServers`)
|
##### Quick Install for Cursor (One-Click)
|
||||||
|
|
||||||
|
[<img src="https://cursor.com/deeplink/mcp-install-dark.png" alt="Add Task Master MCP server to Cursor" style="max-height: 32px;">](cursor://anysphere.cursor-deeplink/mcp/install?name=taskmaster-ai&config=eyJjb21tYW5kIjoibnB4IiwiYXJncyI6WyIteSIsIi0tcGFja2FnZT10YXNrLW1hc3Rlci1haSIsInRhc2stbWFzdGVyLWFpIl0sImVudiI6eyJBTlRIUk9QSUNfQVBJX0tFWSI6IllPVVJfQU5USFJPUElDX0FQSV9LRVlfSEVSRSIsIlBFUlBMRVhJVFlfQVBJX0tFWSI6IllPVVJfUEVSUExFWElUWV9BUElfS0VZX0hFUkUiLCJPUEVOQUlfQVBJX0tFWSI6IllPVVJfT1BFTkFJX0tFWV9IRVJFIiwiR09PR0xFX0FQSV9LRVkiOiJZT1VSX0dPT0dMRV9LRVlfSEVSRSIsIk1JU1RSQUxfQVBJX0tFWSI6IllPVVJfTUlTVFJBTF9LRVlfSEVSRSIsIk9QRU5ST1VURVJfQVBJX0tFWSI6IllPVVJfT1BFTlJPVVRFUl9LRVlfSEVSRSIsIlhBSV9BUElfS0VZIjoiWU9VUl9YQUlfS0VZX0hFUkUiLCJBWlVSRV9PUEVOQUJFX0FQSV9LRVkiOiJZT1VSX0FaVVJFX0tFWV9IRVJFIiwiT0xMQU1BX0FQSV9LRVkiOiJZT1VSX09MTEFNQV9BUElfS0VZX0hFUkUifX0%3D)
|
||||||
|
|
||||||
|
> **Note:** After clicking the install button, you'll still need to add your API keys to the configuration. The button installs the MCP server with placeholder keys that you'll need to replace with your actual API keys.
|
||||||
|
|
||||||
|
##### Manual Configuration
|
||||||
|
|
||||||
|
###### Cursor & Windsurf (`mcpServers`)
|
||||||
|
|
||||||
```jsonc
|
```jsonc
|
||||||
{
|
{
|
||||||
@@ -67,7 +78,7 @@ MCP (Model Control Protocol) lets you run Task Master directly from your editor.
|
|||||||
|
|
||||||
> 🔑 Replace `YOUR_…_KEY_HERE` with your real API keys. You can remove keys you don't use.
|
> 🔑 Replace `YOUR_…_KEY_HERE` with your real API keys. You can remove keys you don't use.
|
||||||
|
|
||||||
##### VS Code (`servers` + `type`)
|
###### VS Code (`servers` + `type`)
|
||||||
|
|
||||||
```jsonc
|
```jsonc
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -56,17 +56,21 @@ task-master generate # Update task markd
|
|||||||
|
|
||||||
```
|
```
|
||||||
project/
|
project/
|
||||||
├── tasks/
|
├── .taskmaster/
|
||||||
│ ├── tasks.json # Main task database
|
│ ├── tasks/ # Task files directory
|
||||||
│ ├── task-1.md # Individual task files
|
│ │ ├── tasks.json # Main task database
|
||||||
│ └── task-2.md
|
│ │ ├── task-1.md # Individual task files
|
||||||
├── scripts/
|
│ │ └── task-2.md
|
||||||
│ ├── prd.txt # Product requirements
|
│ ├── docs/ # Documentation directory
|
||||||
│ └── task-complexity-report.json
|
│ │ ├── prd.txt # Product requirements
|
||||||
|
│ ├── reports/ # Analysis reports directory
|
||||||
|
│ │ └── task-complexity-report.json
|
||||||
|
│ ├── templates/ # Template files
|
||||||
|
│ │ └── example_prd.txt # Example PRD template
|
||||||
|
│ └── config.json # AI models & settings
|
||||||
├── .claude/
|
├── .claude/
|
||||||
│ ├── settings.json # Claude Code configuration
|
│ ├── settings.json # Claude Code configuration
|
||||||
│ └── commands/ # Custom slash commands
|
│ └── commands/ # Custom slash commands
|
||||||
├── .taskmasterconfig # AI models & settings
|
|
||||||
├── .env # API keys
|
├── .env # API keys
|
||||||
├── .mcp.json # MCP configuration
|
├── .mcp.json # MCP configuration
|
||||||
└── CLAUDE.md # This file - auto-loaded by Claude Code
|
└── CLAUDE.md # This file - auto-loaded by Claude Code
|
||||||
@@ -384,7 +388,7 @@ These commands make AI calls and may take up to a minute:
|
|||||||
### File Management
|
### File Management
|
||||||
|
|
||||||
- Never manually edit `tasks.json` - use commands instead
|
- Never manually edit `tasks.json` - use commands instead
|
||||||
- Never manually edit `.taskmasterconfig` - use `task-master models`
|
- Never manually edit `.taskmaster/config.json` - use `task-master models`
|
||||||
- Task markdown files in `tasks/` are auto-generated
|
- Task markdown files in `tasks/` are auto-generated
|
||||||
- Run `task-master generate` after manual changes to tasks.json
|
- Run `task-master generate` after manual changes to tasks.json
|
||||||
|
|
||||||
|
|||||||
@@ -26,6 +26,7 @@
|
|||||||
"defaultPriority": "medium",
|
"defaultPriority": "medium",
|
||||||
"projectName": "Taskmaster",
|
"projectName": "Taskmaster",
|
||||||
"ollamaBaseURL": "http://localhost:11434/api",
|
"ollamaBaseURL": "http://localhost:11434/api",
|
||||||
"azureOpenaiBaseURL": "https://your-endpoint.openai.azure.com/"
|
"azureOpenaiBaseURL": "https://your-endpoint.openai.azure.com/",
|
||||||
|
"bedrockBaseURL": "https://bedrock.us-east-1.amazonaws.com"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -5,5 +5,5 @@ OPENAI_API_KEY="your_openai_api_key_here" # Optional, for OpenAI/Ope
|
|||||||
GOOGLE_API_KEY="your_google_api_key_here" # Optional, for Google Gemini models.
|
GOOGLE_API_KEY="your_google_api_key_here" # Optional, for Google Gemini models.
|
||||||
MISTRAL_API_KEY="your_mistral_key_here" # Optional, for Mistral AI models.
|
MISTRAL_API_KEY="your_mistral_key_here" # Optional, for Mistral AI models.
|
||||||
XAI_API_KEY="YOUR_XAI_KEY_HERE" # Optional, for xAI AI models.
|
XAI_API_KEY="YOUR_XAI_KEY_HERE" # Optional, for xAI AI models.
|
||||||
AZURE_OPENAI_API_KEY="your_azure_key_here" # Optional, for Azure OpenAI models (requires endpoint in .taskmasterconfig).
|
AZURE_OPENAI_API_KEY="your_azure_key_here" # Optional, for Azure OpenAI models (requires endpoint in .taskmaster/config.json).
|
||||||
OLLAMA_API_KEY="your_ollama_api_key_here" # Optional: For remote Ollama servers that require authentication.
|
OLLAMA_API_KEY="your_ollama_api_key_here" # Optional: For remote Ollama servers that require authentication.
|
||||||
@@ -20,7 +20,7 @@ In an AI-driven development process—particularly with tools like [Cursor](http
|
|||||||
|
|
||||||
Task Master configuration is now managed through two primary methods:
|
Task Master configuration is now managed through two primary methods:
|
||||||
|
|
||||||
1. **`.taskmasterconfig` File (Project Root - Primary)**
|
1. **`.taskmaster/config.json` File (Project Root - Primary)**
|
||||||
|
|
||||||
- Stores AI model selections (`main`, `research`, `fallback`), model parameters (`maxTokens`, `temperature`), `logLevel`, `defaultSubtasks`, `defaultPriority`, `projectName`, etc.
|
- Stores AI model selections (`main`, `research`, `fallback`), model parameters (`maxTokens`, `temperature`), `logLevel`, `defaultSubtasks`, `defaultPriority`, `projectName`, etc.
|
||||||
- Managed using the `task-master models --setup` command or the `models` MCP tool.
|
- Managed using the `task-master models --setup` command or the `models` MCP tool.
|
||||||
@@ -192,7 +192,7 @@ Notes:
|
|||||||
## AI Integration (Updated)
|
## AI Integration (Updated)
|
||||||
|
|
||||||
- The script now uses a unified AI service layer (`ai-services-unified.js`).
|
- The script now uses a unified AI service layer (`ai-services-unified.js`).
|
||||||
- Model selection (e.g., Claude vs. Perplexity for `--research`) is determined by the configuration in `.taskmasterconfig` based on the requested `role` (`main` or `research`).
|
- Model selection (e.g., Claude vs. Perplexity for `--research`) is determined by the configuration in `.taskmaster/config.json` based on the requested `role` (`main` or `research`).
|
||||||
- API keys are automatically resolved from your `.env` file (for CLI) or MCP session environment.
|
- API keys are automatically resolved from your `.env` file (for CLI) or MCP session environment.
|
||||||
- To use the research capabilities (e.g., `expand --research`), ensure you have:
|
- To use the research capabilities (e.g., `expand --research`), ensure you have:
|
||||||
1. Configured a model for the `research` role using `task-master models --setup` (Perplexity models are recommended).
|
1. Configured a model for the `research` role using `task-master models --setup` (Perplexity models are recommended).
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Available Models as of May 27, 2025
|
# Available Models as of June 8, 2025
|
||||||
|
|
||||||
## Main Models
|
## Main Models
|
||||||
|
|
||||||
@@ -24,6 +24,7 @@
|
|||||||
| google | gemini-2.5-flash-preview-04-17 | — | — | — |
|
| google | gemini-2.5-flash-preview-04-17 | — | — | — |
|
||||||
| google | gemini-2.0-flash | 0.754 | 0.15 | 0.6 |
|
| google | gemini-2.0-flash | 0.754 | 0.15 | 0.6 |
|
||||||
| google | gemini-2.0-flash-lite | — | — | — |
|
| google | gemini-2.0-flash-lite | — | — | — |
|
||||||
|
| perplexity | sonar-pro | — | 3 | 15 |
|
||||||
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
|
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
|
||||||
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
|
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
|
||||||
| xai | grok-3 | — | 3 | 15 |
|
| xai | grok-3 | — | 3 | 15 |
|
||||||
@@ -70,6 +71,8 @@
|
|||||||
| perplexity | sonar-pro | — | 3 | 15 |
|
| perplexity | sonar-pro | — | 3 | 15 |
|
||||||
| perplexity | sonar | — | 1 | 1 |
|
| perplexity | sonar | — | 1 | 1 |
|
||||||
| perplexity | deep-research | 0.211 | 2 | 8 |
|
| perplexity | deep-research | 0.211 | 2 | 8 |
|
||||||
|
| perplexity | sonar-reasoning-pro | 0.211 | 2 | 8 |
|
||||||
|
| perplexity | sonar-reasoning | 0.211 | 1 | 5 |
|
||||||
| xai | grok-3 | — | 3 | 15 |
|
| xai | grok-3 | — | 3 | 15 |
|
||||||
| xai | grok-3-fast | — | 5 | 25 |
|
| xai | grok-3-fast | — | 5 | 25 |
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
``# Taskmaster AI Installation Guide
|
# Taskmaster AI Installation Guide
|
||||||
|
|
||||||
This guide helps AI assistants install and configure Taskmaster for users in their development projects.
|
This guide helps AI assistants install and configure Taskmaster for users in their development projects.
|
||||||
|
|
||||||
|
|||||||
@@ -28,8 +28,7 @@ export async function complexityReportDirect(args, log) {
|
|||||||
log.error('complexityReportDirect called without reportPath');
|
log.error('complexityReportDirect called without reportPath');
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: { code: 'MISSING_ARGUMENT', message: 'reportPath is required' },
|
error: { code: 'MISSING_ARGUMENT', message: 'reportPath is required' }
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -111,8 +110,7 @@ export async function complexityReportDirect(args, log) {
|
|||||||
error: {
|
error: {
|
||||||
code: 'UNEXPECTED_ERROR',
|
code: 'UNEXPECTED_ERROR',
|
||||||
message: error.message
|
message: error.message
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -60,7 +60,8 @@ export async function expandAllTasksDirect(args, log, context = {}) {
|
|||||||
useResearch,
|
useResearch,
|
||||||
additionalContext,
|
additionalContext,
|
||||||
forceFlag,
|
forceFlag,
|
||||||
{ session, mcpLog, projectRoot }
|
{ session, mcpLog, projectRoot },
|
||||||
|
'json'
|
||||||
);
|
);
|
||||||
|
|
||||||
// Core function now returns a summary object including the *aggregated* telemetryData
|
// Core function now returns a summary object including the *aggregated* telemetryData
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ import { createLogWrapper } from '../../tools/utils.js';
|
|||||||
* @param {Object} log - Logger object
|
* @param {Object} log - Logger object
|
||||||
* @param {Object} context - Context object containing session
|
* @param {Object} context - Context object containing session
|
||||||
* @param {Object} [context.session] - MCP Session object
|
* @param {Object} [context.session] - MCP Session object
|
||||||
* @returns {Promise<Object>} - Task expansion result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
|
* @returns {Promise<Object>} - Task expansion result { success: boolean, data?: any, error?: { code: string, message: string } }
|
||||||
*/
|
*/
|
||||||
export async function expandTaskDirect(args, log, context = {}) {
|
export async function expandTaskDirect(args, log, context = {}) {
|
||||||
const { session } = context; // Extract session
|
const { session } = context; // Extract session
|
||||||
@@ -54,8 +54,7 @@ export async function expandTaskDirect(args, log, context = {}) {
|
|||||||
error: {
|
error: {
|
||||||
code: 'MISSING_ARGUMENT',
|
code: 'MISSING_ARGUMENT',
|
||||||
message: 'tasksJsonPath is required'
|
message: 'tasksJsonPath is required'
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -73,8 +72,7 @@ export async function expandTaskDirect(args, log, context = {}) {
|
|||||||
error: {
|
error: {
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
message: 'Task ID is required'
|
message: 'Task ID is required'
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -105,8 +103,7 @@ export async function expandTaskDirect(args, log, context = {}) {
|
|||||||
error: {
|
error: {
|
||||||
code: 'INVALID_TASKS_FILE',
|
code: 'INVALID_TASKS_FILE',
|
||||||
message: `No valid tasks found in ${tasksPath}. readJSON returned: ${JSON.stringify(data)}`
|
message: `No valid tasks found in ${tasksPath}. readJSON returned: ${JSON.stringify(data)}`
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -121,8 +118,7 @@ export async function expandTaskDirect(args, log, context = {}) {
|
|||||||
error: {
|
error: {
|
||||||
code: 'TASK_NOT_FOUND',
|
code: 'TASK_NOT_FOUND',
|
||||||
message: `Task with ID ${taskId} not found`
|
message: `Task with ID ${taskId} not found`
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -133,8 +129,7 @@ export async function expandTaskDirect(args, log, context = {}) {
|
|||||||
error: {
|
error: {
|
||||||
code: 'TASK_COMPLETED',
|
code: 'TASK_COMPLETED',
|
||||||
message: `Task ${taskId} is already marked as ${task.status} and cannot be expanded`
|
message: `Task ${taskId} is already marked as ${task.status} and cannot be expanded`
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -151,8 +146,7 @@ export async function expandTaskDirect(args, log, context = {}) {
|
|||||||
task,
|
task,
|
||||||
subtasksAdded: 0,
|
subtasksAdded: 0,
|
||||||
hasExistingSubtasks
|
hasExistingSubtasks
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -232,8 +226,7 @@ export async function expandTaskDirect(args, log, context = {}) {
|
|||||||
subtasksAdded,
|
subtasksAdded,
|
||||||
hasExistingSubtasks,
|
hasExistingSubtasks,
|
||||||
telemetryData: coreResult.telemetryData
|
telemetryData: coreResult.telemetryData
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Make sure to restore normal logging even if there's an error
|
// Make sure to restore normal logging even if there's an error
|
||||||
@@ -245,8 +238,7 @@ export async function expandTaskDirect(args, log, context = {}) {
|
|||||||
error: {
|
error: {
|
||||||
code: 'CORE_FUNCTION_ERROR',
|
code: 'CORE_FUNCTION_ERROR',
|
||||||
message: error.message || 'Failed to expand task'
|
message: error.message || 'Failed to expand task'
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
@@ -256,8 +248,7 @@ export async function expandTaskDirect(args, log, context = {}) {
|
|||||||
error: {
|
error: {
|
||||||
code: 'CORE_FUNCTION_ERROR',
|
code: 'CORE_FUNCTION_ERROR',
|
||||||
message: error.message || 'Failed to expand task'
|
message: error.message || 'Failed to expand task'
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -28,8 +28,7 @@ export async function generateTaskFilesDirect(args, log) {
|
|||||||
log.error(errorMessage);
|
log.error(errorMessage);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: { code: 'MISSING_ARGUMENT', message: errorMessage },
|
error: { code: 'MISSING_ARGUMENT', message: errorMessage }
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
if (!outputDir) {
|
if (!outputDir) {
|
||||||
@@ -37,8 +36,7 @@ export async function generateTaskFilesDirect(args, log) {
|
|||||||
log.error(errorMessage);
|
log.error(errorMessage);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: { code: 'MISSING_ARGUMENT', message: errorMessage },
|
error: { code: 'MISSING_ARGUMENT', message: errorMessage }
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -65,8 +63,7 @@ export async function generateTaskFilesDirect(args, log) {
|
|||||||
log.error(`Error in generateTaskFiles: ${genError.message}`);
|
log.error(`Error in generateTaskFiles: ${genError.message}`);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: { code: 'GENERATE_FILES_ERROR', message: genError.message },
|
error: { code: 'GENERATE_FILES_ERROR', message: genError.message }
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -79,8 +76,7 @@ export async function generateTaskFilesDirect(args, log) {
|
|||||||
outputDir: resolvedOutputDir,
|
outputDir: resolvedOutputDir,
|
||||||
taskFiles:
|
taskFiles:
|
||||||
'Individual task files have been generated in the output directory'
|
'Individual task files have been generated in the output directory'
|
||||||
},
|
}
|
||||||
fromCache: false // This operation always modifies state and should never be cached
|
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Make sure to restore normal logging if an outer error occurs
|
// Make sure to restore normal logging if an outer error occurs
|
||||||
@@ -92,8 +88,7 @@ export async function generateTaskFilesDirect(args, log) {
|
|||||||
error: {
|
error: {
|
||||||
code: 'GENERATE_TASKS_ERROR',
|
code: 'GENERATE_TASKS_ERROR',
|
||||||
message: error.message || 'Unknown error generating task files'
|
message: error.message || 'Unknown error generating task files'
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -41,8 +41,7 @@ export async function initializeProjectDirect(args, log, context = {}) {
|
|||||||
code: 'INVALID_TARGET_DIRECTORY',
|
code: 'INVALID_TARGET_DIRECTORY',
|
||||||
message: `Cannot initialize project: Invalid target directory '${targetDirectory}' received. Please ensure a valid workspace/folder is open or specified.`,
|
message: `Cannot initialize project: Invalid target directory '${targetDirectory}' received. Please ensure a valid workspace/folder is open or specified.`,
|
||||||
details: `Received args.projectRoot: ${args.projectRoot}` // Show what was received
|
details: `Received args.projectRoot: ${args.projectRoot}` // Show what was received
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -97,8 +96,8 @@ export async function initializeProjectDirect(args, log, context = {}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (success) {
|
if (success) {
|
||||||
return { success: true, data: resultData, fromCache: false };
|
return { success: true, data: resultData };
|
||||||
} else {
|
} else {
|
||||||
return { success: false, error: errorResult, fromCache: false };
|
return { success: false, error: errorResult };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ import {
|
|||||||
*
|
*
|
||||||
* @param {Object} args - Command arguments (now expecting tasksJsonPath explicitly).
|
* @param {Object} args - Command arguments (now expecting tasksJsonPath explicitly).
|
||||||
* @param {Object} log - Logger object.
|
* @param {Object} log - Logger object.
|
||||||
* @returns {Promise<Object>} - Task list result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }.
|
* @returns {Promise<Object>} - Task list result { success: boolean, data?: any, error?: { code: string, message: string } }.
|
||||||
*/
|
*/
|
||||||
export async function listTasksDirect(args, log) {
|
export async function listTasksDirect(args, log) {
|
||||||
// Destructure the explicit tasksJsonPath from args
|
// Destructure the explicit tasksJsonPath from args
|
||||||
@@ -27,8 +27,7 @@ export async function listTasksDirect(args, log) {
|
|||||||
error: {
|
error: {
|
||||||
code: 'MISSING_ARGUMENT',
|
code: 'MISSING_ARGUMENT',
|
||||||
message: 'tasksJsonPath is required'
|
message: 'tasksJsonPath is required'
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ import {
|
|||||||
* @param {Object} args - Command arguments
|
* @param {Object} args - Command arguments
|
||||||
* @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.
|
* @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.
|
||||||
* @param {Object} log - Logger object
|
* @param {Object} log - Logger object
|
||||||
* @returns {Promise<Object>} - Next task result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
|
* @returns {Promise<Object>} - Next task result { success: boolean, data?: any, error?: { code: string, message: string } }
|
||||||
*/
|
*/
|
||||||
export async function nextTaskDirect(args, log) {
|
export async function nextTaskDirect(args, log) {
|
||||||
// Destructure expected args
|
// Destructure expected args
|
||||||
@@ -32,8 +32,7 @@ export async function nextTaskDirect(args, log) {
|
|||||||
error: {
|
error: {
|
||||||
code: 'MISSING_ARGUMENT',
|
code: 'MISSING_ARGUMENT',
|
||||||
message: 'tasksJsonPath is required'
|
message: 'tasksJsonPath is required'
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -121,7 +120,7 @@ export async function nextTaskDirect(args, log) {
|
|||||||
// Use the caching utility
|
// Use the caching utility
|
||||||
try {
|
try {
|
||||||
const result = await coreNextTaskAction();
|
const result = await coreNextTaskAction();
|
||||||
log.info(`nextTaskDirect completed.`);
|
log.info('nextTaskDirect completed.');
|
||||||
return result;
|
return result;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log.error(`Unexpected error during nextTask: ${error.message}`);
|
log.error(`Unexpected error during nextTask: ${error.message}`);
|
||||||
@@ -130,8 +129,7 @@ export async function nextTaskDirect(args, log) {
|
|||||||
error: {
|
error: {
|
||||||
code: 'UNEXPECTED_ERROR',
|
code: 'UNEXPECTED_ERROR',
|
||||||
message: error.message
|
message: error.message
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -77,7 +77,7 @@ export async function parsePRDDirect(args, log, context = {}) {
|
|||||||
? path.isAbsolute(outputArg)
|
? path.isAbsolute(outputArg)
|
||||||
? outputArg
|
? outputArg
|
||||||
: path.resolve(projectRoot, outputArg)
|
: path.resolve(projectRoot, outputArg)
|
||||||
: resolveProjectPath(TASKMASTER_TASKS_FILE, session) ||
|
: resolveProjectPath(TASKMASTER_TASKS_FILE, args) ||
|
||||||
path.resolve(projectRoot, TASKMASTER_TASKS_FILE);
|
path.resolve(projectRoot, TASKMASTER_TASKS_FILE);
|
||||||
|
|
||||||
// Check if input file exists
|
// Check if input file exists
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ import {
|
|||||||
* @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.
|
* @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.
|
||||||
* @param {string} args.id - The ID(s) of the task(s) or subtask(s) to remove (comma-separated for multiple).
|
* @param {string} args.id - The ID(s) of the task(s) or subtask(s) to remove (comma-separated for multiple).
|
||||||
* @param {Object} log - Logger object
|
* @param {Object} log - Logger object
|
||||||
* @returns {Promise<Object>} - Remove task result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: false }
|
* @returns {Promise<Object>} - Remove task result { success: boolean, data?: any, error?: { code: string, message: string } }
|
||||||
*/
|
*/
|
||||||
export async function removeTaskDirect(args, log) {
|
export async function removeTaskDirect(args, log) {
|
||||||
// Destructure expected args
|
// Destructure expected args
|
||||||
@@ -35,8 +35,7 @@ export async function removeTaskDirect(args, log) {
|
|||||||
error: {
|
error: {
|
||||||
code: 'MISSING_ARGUMENT',
|
code: 'MISSING_ARGUMENT',
|
||||||
message: 'tasksJsonPath is required'
|
message: 'tasksJsonPath is required'
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -48,8 +47,7 @@ export async function removeTaskDirect(args, log) {
|
|||||||
error: {
|
error: {
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
message: 'Task ID is required'
|
message: 'Task ID is required'
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -68,8 +66,7 @@ export async function removeTaskDirect(args, log) {
|
|||||||
error: {
|
error: {
|
||||||
code: 'INVALID_TASKS_FILE',
|
code: 'INVALID_TASKS_FILE',
|
||||||
message: `No valid tasks found in ${tasksJsonPath}`
|
message: `No valid tasks found in ${tasksJsonPath}`
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -83,8 +80,7 @@ export async function removeTaskDirect(args, log) {
|
|||||||
error: {
|
error: {
|
||||||
code: 'INVALID_TASK_ID',
|
code: 'INVALID_TASK_ID',
|
||||||
message: `The following tasks were not found: ${invalidTasks.join(', ')}`
|
message: `The following tasks were not found: ${invalidTasks.join(', ')}`
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -133,8 +129,7 @@ export async function removeTaskDirect(args, log) {
|
|||||||
details: failedRemovals
|
details: failedRemovals
|
||||||
.map((r) => `${r.taskId}: ${r.error}`)
|
.map((r) => `${r.taskId}: ${r.error}`)
|
||||||
.join('; ')
|
.join('; ')
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -147,8 +142,7 @@ export async function removeTaskDirect(args, log) {
|
|||||||
failed: failedRemovals.length,
|
failed: failedRemovals.length,
|
||||||
results: results,
|
results: results,
|
||||||
tasksPath: tasksJsonPath
|
tasksPath: tasksJsonPath
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Ensure silent mode is disabled even if an outer error occurs
|
// Ensure silent mode is disabled even if an outer error occurs
|
||||||
@@ -161,8 +155,7 @@ export async function removeTaskDirect(args, log) {
|
|||||||
error: {
|
error: {
|
||||||
code: 'UNEXPECTED_ERROR',
|
code: 'UNEXPECTED_ERROR',
|
||||||
message: error.message
|
message: error.message
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -29,8 +29,7 @@ export async function setTaskStatusDirect(args, log) {
|
|||||||
log.error(errorMessage);
|
log.error(errorMessage);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: { code: 'MISSING_ARGUMENT', message: errorMessage },
|
error: { code: 'MISSING_ARGUMENT', message: errorMessage }
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -41,8 +40,7 @@ export async function setTaskStatusDirect(args, log) {
|
|||||||
log.error(errorMessage);
|
log.error(errorMessage);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: { code: 'MISSING_TASK_ID', message: errorMessage },
|
error: { code: 'MISSING_TASK_ID', message: errorMessage }
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -52,8 +50,7 @@ export async function setTaskStatusDirect(args, log) {
|
|||||||
log.error(errorMessage);
|
log.error(errorMessage);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: { code: 'MISSING_STATUS', message: errorMessage },
|
error: { code: 'MISSING_STATUS', message: errorMessage }
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -82,8 +79,7 @@ export async function setTaskStatusDirect(args, log) {
|
|||||||
taskId,
|
taskId,
|
||||||
status: newStatus,
|
status: newStatus,
|
||||||
tasksPath: tasksPath // Return the path used
|
tasksPath: tasksPath // Return the path used
|
||||||
},
|
}
|
||||||
fromCache: false // This operation always modifies state and should never be cached
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// If the task was completed, attempt to fetch the next task
|
// If the task was completed, attempt to fetch the next task
|
||||||
@@ -126,8 +122,7 @@ export async function setTaskStatusDirect(args, log) {
|
|||||||
error: {
|
error: {
|
||||||
code: 'SET_STATUS_ERROR',
|
code: 'SET_STATUS_ERROR',
|
||||||
message: error.message || 'Unknown error setting task status'
|
message: error.message || 'Unknown error setting task status'
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
} finally {
|
} finally {
|
||||||
// ALWAYS restore normal logging in finally block
|
// ALWAYS restore normal logging in finally block
|
||||||
@@ -145,8 +140,7 @@ export async function setTaskStatusDirect(args, log) {
|
|||||||
error: {
|
error: {
|
||||||
code: 'SET_STATUS_ERROR',
|
code: 'SET_STATUS_ERROR',
|
||||||
message: error.message || 'Unknown error setting task status'
|
message: error.message || 'Unknown error setting task status'
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -42,8 +42,7 @@ export async function updateSubtaskByIdDirect(args, log, context = {}) {
|
|||||||
logWrapper.error(errorMessage);
|
logWrapper.error(errorMessage);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: { code: 'MISSING_ARGUMENT', message: errorMessage },
|
error: { code: 'MISSING_ARGUMENT', message: errorMessage }
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -54,8 +53,7 @@ export async function updateSubtaskByIdDirect(args, log, context = {}) {
|
|||||||
logWrapper.error(errorMessage);
|
logWrapper.error(errorMessage);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: { code: 'INVALID_SUBTASK_ID', message: errorMessage },
|
error: { code: 'INVALID_SUBTASK_ID', message: errorMessage }
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -65,8 +63,7 @@ export async function updateSubtaskByIdDirect(args, log, context = {}) {
|
|||||||
logWrapper.error(errorMessage);
|
logWrapper.error(errorMessage);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: { code: 'MISSING_PROMPT', message: errorMessage },
|
error: { code: 'MISSING_PROMPT', message: errorMessage }
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -77,8 +74,7 @@ export async function updateSubtaskByIdDirect(args, log, context = {}) {
|
|||||||
log.error(errorMessage);
|
log.error(errorMessage);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: { code: 'INVALID_SUBTASK_ID_TYPE', message: errorMessage },
|
error: { code: 'INVALID_SUBTASK_ID_TYPE', message: errorMessage }
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -88,8 +84,7 @@ export async function updateSubtaskByIdDirect(args, log, context = {}) {
|
|||||||
log.error(errorMessage);
|
log.error(errorMessage);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: { code: 'INVALID_SUBTASK_ID_FORMAT', message: errorMessage },
|
error: { code: 'INVALID_SUBTASK_ID_FORMAT', message: errorMessage }
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -128,8 +123,7 @@ export async function updateSubtaskByIdDirect(args, log, context = {}) {
|
|||||||
logWrapper.error(message);
|
logWrapper.error(message);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: { code: 'SUBTASK_NOT_FOUND', message: message },
|
error: { code: 'SUBTASK_NOT_FOUND', message: message }
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -146,8 +140,7 @@ export async function updateSubtaskByIdDirect(args, log, context = {}) {
|
|||||||
tasksPath,
|
tasksPath,
|
||||||
useResearch,
|
useResearch,
|
||||||
telemetryData: coreResult.telemetryData
|
telemetryData: coreResult.telemetryData
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logWrapper.error(`Error updating subtask by ID: ${error.message}`);
|
logWrapper.error(`Error updating subtask by ID: ${error.message}`);
|
||||||
@@ -156,8 +149,7 @@ export async function updateSubtaskByIdDirect(args, log, context = {}) {
|
|||||||
error: {
|
error: {
|
||||||
code: 'UPDATE_SUBTASK_CORE_ERROR',
|
code: 'UPDATE_SUBTASK_CORE_ERROR',
|
||||||
message: error.message || 'Unknown error updating subtask'
|
message: error.message || 'Unknown error updating subtask'
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
} finally {
|
} finally {
|
||||||
if (!wasSilent && isSilentMode()) {
|
if (!wasSilent && isSilentMode()) {
|
||||||
@@ -174,8 +166,7 @@ export async function updateSubtaskByIdDirect(args, log, context = {}) {
|
|||||||
error: {
|
error: {
|
||||||
code: 'DIRECT_FUNCTION_SETUP_ERROR',
|
code: 'DIRECT_FUNCTION_SETUP_ERROR',
|
||||||
message: error.message || 'Unknown setup error'
|
message: error.message || 'Unknown setup error'
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -42,8 +42,7 @@ export async function updateTaskByIdDirect(args, log, context = {}) {
|
|||||||
logWrapper.error(errorMessage);
|
logWrapper.error(errorMessage);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: { code: 'MISSING_ARGUMENT', message: errorMessage },
|
error: { code: 'MISSING_ARGUMENT', message: errorMessage }
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -54,8 +53,7 @@ export async function updateTaskByIdDirect(args, log, context = {}) {
|
|||||||
logWrapper.error(errorMessage);
|
logWrapper.error(errorMessage);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: { code: 'MISSING_TASK_ID', message: errorMessage },
|
error: { code: 'MISSING_TASK_ID', message: errorMessage }
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -65,8 +63,7 @@ export async function updateTaskByIdDirect(args, log, context = {}) {
|
|||||||
logWrapper.error(errorMessage);
|
logWrapper.error(errorMessage);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: { code: 'MISSING_PROMPT', message: errorMessage },
|
error: { code: 'MISSING_PROMPT', message: errorMessage }
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -84,8 +81,7 @@ export async function updateTaskByIdDirect(args, log, context = {}) {
|
|||||||
logWrapper.error(errorMessage);
|
logWrapper.error(errorMessage);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: { code: 'INVALID_TASK_ID', message: errorMessage },
|
error: { code: 'INVALID_TASK_ID', message: errorMessage }
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -137,8 +133,7 @@ export async function updateTaskByIdDirect(args, log, context = {}) {
|
|||||||
taskId: taskId,
|
taskId: taskId,
|
||||||
updated: false,
|
updated: false,
|
||||||
telemetryData: coreResult?.telemetryData
|
telemetryData: coreResult?.telemetryData
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -155,8 +150,7 @@ export async function updateTaskByIdDirect(args, log, context = {}) {
|
|||||||
updated: true,
|
updated: true,
|
||||||
updatedTask: coreResult.updatedTask,
|
updatedTask: coreResult.updatedTask,
|
||||||
telemetryData: coreResult.telemetryData
|
telemetryData: coreResult.telemetryData
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logWrapper.error(`Error updating task by ID: ${error.message}`);
|
logWrapper.error(`Error updating task by ID: ${error.message}`);
|
||||||
@@ -165,8 +159,7 @@ export async function updateTaskByIdDirect(args, log, context = {}) {
|
|||||||
error: {
|
error: {
|
||||||
code: 'UPDATE_TASK_CORE_ERROR',
|
code: 'UPDATE_TASK_CORE_ERROR',
|
||||||
message: error.message || 'Unknown error updating task'
|
message: error.message || 'Unknown error updating task'
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
} finally {
|
} finally {
|
||||||
if (!wasSilent && isSilentMode()) {
|
if (!wasSilent && isSilentMode()) {
|
||||||
@@ -181,8 +174,7 @@ export async function updateTaskByIdDirect(args, log, context = {}) {
|
|||||||
error: {
|
error: {
|
||||||
code: 'DIRECT_FUNCTION_SETUP_ERROR',
|
code: 'DIRECT_FUNCTION_SETUP_ERROR',
|
||||||
message: error.message || 'Unknown setup error'
|
message: error.message || 'Unknown setup error'
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ import {
|
|||||||
*/
|
*/
|
||||||
export async function updateTasksDirect(args, log, context = {}) {
|
export async function updateTasksDirect(args, log, context = {}) {
|
||||||
const { session } = context;
|
const { session } = context;
|
||||||
const { from, prompt, research, file: fileArg, projectRoot } = args;
|
const { from, prompt, research, tasksJsonPath, projectRoot } = args;
|
||||||
|
|
||||||
// Create the standard logger wrapper
|
// Create the standard logger wrapper
|
||||||
const logWrapper = createLogWrapper(log);
|
const logWrapper = createLogWrapper(log);
|
||||||
@@ -60,20 +60,15 @@ export async function updateTasksDirect(args, log, context = {}) {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resolve tasks file path
|
|
||||||
const tasksFile = fileArg
|
|
||||||
? path.resolve(projectRoot, fileArg)
|
|
||||||
: path.resolve(projectRoot, 'tasks', 'tasks.json');
|
|
||||||
|
|
||||||
logWrapper.info(
|
logWrapper.info(
|
||||||
`Updating tasks via direct function. From: ${from}, Research: ${research}, File: ${tasksFile}, ProjectRoot: ${projectRoot}`
|
`Updating tasks via direct function. From: ${from}, Research: ${research}, File: ${tasksJsonPath}, ProjectRoot: ${projectRoot}`
|
||||||
);
|
);
|
||||||
|
|
||||||
enableSilentMode(); // Enable silent mode
|
enableSilentMode(); // Enable silent mode
|
||||||
try {
|
try {
|
||||||
// Call the core updateTasks function
|
// Call the core updateTasks function
|
||||||
const result = await updateTasks(
|
const result = await updateTasks(
|
||||||
tasksFile,
|
tasksJsonPath,
|
||||||
from,
|
from,
|
||||||
prompt,
|
prompt,
|
||||||
research,
|
research,
|
||||||
@@ -93,7 +88,7 @@ export async function updateTasksDirect(args, log, context = {}) {
|
|||||||
success: true,
|
success: true,
|
||||||
data: {
|
data: {
|
||||||
message: `Successfully updated ${result.updatedTasks.length} tasks.`,
|
message: `Successfully updated ${result.updatedTasks.length} tasks.`,
|
||||||
tasksFile,
|
tasksPath: tasksJsonPath,
|
||||||
updatedCount: result.updatedTasks.length,
|
updatedCount: result.updatedTasks.length,
|
||||||
telemetryData: result.telemetryData
|
telemetryData: result.telemetryData
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
import path from 'path';
|
import path from 'path';
|
||||||
import fs from 'fs';
|
|
||||||
import {
|
import {
|
||||||
findTasksPath as coreFindTasksPath,
|
findTasksPath as coreFindTasksPath,
|
||||||
findPRDPath as coreFindPrdPath,
|
findPRDPath as coreFindPrdPath,
|
||||||
findComplexityReportPath as coreFindComplexityReportPath,
|
findComplexityReportPath as coreFindComplexityReportPath,
|
||||||
findProjectRoot as coreFindProjectRoot
|
findProjectRoot as coreFindProjectRoot,
|
||||||
|
normalizeProjectRoot
|
||||||
} from '../../../../src/utils/path-utils.js';
|
} from '../../../../src/utils/path-utils.js';
|
||||||
import { PROJECT_MARKERS } from '../../../../src/constants/paths.js';
|
import { PROJECT_MARKERS } from '../../../../src/constants/paths.js';
|
||||||
|
|
||||||
@@ -13,22 +13,22 @@ import { PROJECT_MARKERS } from '../../../../src/constants/paths.js';
|
|||||||
* This module handles session-specific path resolution for the MCP server
|
* This module handles session-specific path resolution for the MCP server
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Silent logger for MCP context to prevent console output
|
||||||
|
*/
|
||||||
|
const silentLogger = {
|
||||||
|
info: () => {},
|
||||||
|
warn: () => {},
|
||||||
|
error: () => {},
|
||||||
|
debug: () => {},
|
||||||
|
success: () => {}
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Cache for last found project root to improve performance
|
* Cache for last found project root to improve performance
|
||||||
*/
|
*/
|
||||||
export const lastFoundProjectRoot = null;
|
export const lastFoundProjectRoot = null;
|
||||||
|
|
||||||
/**
|
|
||||||
* Find tasks.json file with MCP support
|
|
||||||
* @param {string} [explicitPath] - Explicit path to tasks.json (highest priority)
|
|
||||||
* @param {Object} [args] - Arguments object for context
|
|
||||||
* @param {Object} [log] - Logger object to prevent console logging
|
|
||||||
* @returns {string|null} - Resolved path to tasks.json or null if not found
|
|
||||||
*/
|
|
||||||
export function findTasksPathCore(explicitPath, args = null, log = null) {
|
|
||||||
return coreFindTasksPath(explicitPath, args, log);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Find PRD file with MCP support
|
* Find PRD file with MCP support
|
||||||
* @param {string} [explicitPath] - Explicit path to PRD file (highest priority)
|
* @param {string} [explicitPath] - Explicit path to PRD file (highest priority)
|
||||||
@@ -36,25 +36,10 @@ export function findTasksPathCore(explicitPath, args = null, log = null) {
|
|||||||
* @param {Object} [log] - Logger object to prevent console logging
|
* @param {Object} [log] - Logger object to prevent console logging
|
||||||
* @returns {string|null} - Resolved path to PRD file or null if not found
|
* @returns {string|null} - Resolved path to PRD file or null if not found
|
||||||
*/
|
*/
|
||||||
export function findPrdPath(explicitPath, args = null, log = null) {
|
export function findPrdPath(explicitPath, args = null, log = silentLogger) {
|
||||||
return coreFindPrdPath(explicitPath, args, log);
|
return coreFindPrdPath(explicitPath, args, log);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Find complexity report file with MCP support
|
|
||||||
* @param {string} [explicitPath] - Explicit path to complexity report (highest priority)
|
|
||||||
* @param {Object} [args] - Arguments object for context
|
|
||||||
* @param {Object} [log] - Logger object to prevent console logging
|
|
||||||
* @returns {string|null} - Resolved path to complexity report or null if not found
|
|
||||||
*/
|
|
||||||
export function findComplexityReportPathCore(
|
|
||||||
explicitPath,
|
|
||||||
args = null,
|
|
||||||
log = null
|
|
||||||
) {
|
|
||||||
return coreFindComplexityReportPath(explicitPath, args, log);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Resolve tasks.json path from arguments
|
* Resolve tasks.json path from arguments
|
||||||
* Prioritizes explicit path parameter, then uses fallback logic
|
* Prioritizes explicit path parameter, then uses fallback logic
|
||||||
@@ -62,22 +47,27 @@ export function findComplexityReportPathCore(
|
|||||||
* @param {Object} [log] - Logger object to prevent console logging
|
* @param {Object} [log] - Logger object to prevent console logging
|
||||||
* @returns {string|null} - Resolved path to tasks.json or null if not found
|
* @returns {string|null} - Resolved path to tasks.json or null if not found
|
||||||
*/
|
*/
|
||||||
export function resolveTasksPath(args, log = null) {
|
export function resolveTasksPath(args, log = silentLogger) {
|
||||||
// Get explicit path from args.file if provided
|
// Get explicit path from args.file if provided
|
||||||
const explicitPath = args?.file;
|
const explicitPath = args?.file;
|
||||||
const projectRoot = args?.projectRoot;
|
const rawProjectRoot = args?.projectRoot;
|
||||||
|
|
||||||
// If explicit path is provided and absolute, use it directly
|
// If explicit path is provided and absolute, use it directly
|
||||||
if (explicitPath && path.isAbsolute(explicitPath)) {
|
if (explicitPath && path.isAbsolute(explicitPath)) {
|
||||||
return explicitPath;
|
return explicitPath;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If explicit path is relative, resolve it relative to projectRoot
|
// Normalize project root if provided
|
||||||
|
const projectRoot = rawProjectRoot
|
||||||
|
? normalizeProjectRoot(rawProjectRoot)
|
||||||
|
: null;
|
||||||
|
|
||||||
|
// If explicit path is relative, resolve it relative to normalized projectRoot
|
||||||
if (explicitPath && projectRoot) {
|
if (explicitPath && projectRoot) {
|
||||||
return path.resolve(projectRoot, explicitPath);
|
return path.resolve(projectRoot, explicitPath);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use core findTasksPath with explicit path and projectRoot context
|
// Use core findTasksPath with explicit path and normalized projectRoot context
|
||||||
if (projectRoot) {
|
if (projectRoot) {
|
||||||
return coreFindTasksPath(explicitPath, { projectRoot }, log);
|
return coreFindTasksPath(explicitPath, { projectRoot }, log);
|
||||||
}
|
}
|
||||||
@@ -92,22 +82,27 @@ export function resolveTasksPath(args, log = null) {
|
|||||||
* @param {Object} [log] - Logger object to prevent console logging
|
* @param {Object} [log] - Logger object to prevent console logging
|
||||||
* @returns {string|null} - Resolved path to PRD file or null if not found
|
* @returns {string|null} - Resolved path to PRD file or null if not found
|
||||||
*/
|
*/
|
||||||
export function resolvePrdPath(args, log = null) {
|
export function resolvePrdPath(args, log = silentLogger) {
|
||||||
// Get explicit path from args.input if provided
|
// Get explicit path from args.input if provided
|
||||||
const explicitPath = args?.input;
|
const explicitPath = args?.input;
|
||||||
const projectRoot = args?.projectRoot;
|
const rawProjectRoot = args?.projectRoot;
|
||||||
|
|
||||||
// If explicit path is provided and absolute, use it directly
|
// If explicit path is provided and absolute, use it directly
|
||||||
if (explicitPath && path.isAbsolute(explicitPath)) {
|
if (explicitPath && path.isAbsolute(explicitPath)) {
|
||||||
return explicitPath;
|
return explicitPath;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If explicit path is relative, resolve it relative to projectRoot
|
// Normalize project root if provided
|
||||||
|
const projectRoot = rawProjectRoot
|
||||||
|
? normalizeProjectRoot(rawProjectRoot)
|
||||||
|
: null;
|
||||||
|
|
||||||
|
// If explicit path is relative, resolve it relative to normalized projectRoot
|
||||||
if (explicitPath && projectRoot) {
|
if (explicitPath && projectRoot) {
|
||||||
return path.resolve(projectRoot, explicitPath);
|
return path.resolve(projectRoot, explicitPath);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use core findPRDPath with explicit path and projectRoot context
|
// Use core findPRDPath with explicit path and normalized projectRoot context
|
||||||
if (projectRoot) {
|
if (projectRoot) {
|
||||||
return coreFindPrdPath(explicitPath, { projectRoot }, log);
|
return coreFindPrdPath(explicitPath, { projectRoot }, log);
|
||||||
}
|
}
|
||||||
@@ -122,22 +117,27 @@ export function resolvePrdPath(args, log = null) {
|
|||||||
* @param {Object} [log] - Logger object to prevent console logging
|
* @param {Object} [log] - Logger object to prevent console logging
|
||||||
* @returns {string|null} - Resolved path to complexity report or null if not found
|
* @returns {string|null} - Resolved path to complexity report or null if not found
|
||||||
*/
|
*/
|
||||||
export function resolveComplexityReportPath(args, log = null) {
|
export function resolveComplexityReportPath(args, log = silentLogger) {
|
||||||
// Get explicit path from args.complexityReport if provided
|
// Get explicit path from args.complexityReport if provided
|
||||||
const explicitPath = args?.complexityReport;
|
const explicitPath = args?.complexityReport;
|
||||||
const projectRoot = args?.projectRoot;
|
const rawProjectRoot = args?.projectRoot;
|
||||||
|
|
||||||
// If explicit path is provided and absolute, use it directly
|
// If explicit path is provided and absolute, use it directly
|
||||||
if (explicitPath && path.isAbsolute(explicitPath)) {
|
if (explicitPath && path.isAbsolute(explicitPath)) {
|
||||||
return explicitPath;
|
return explicitPath;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If explicit path is relative, resolve it relative to projectRoot
|
// Normalize project root if provided
|
||||||
|
const projectRoot = rawProjectRoot
|
||||||
|
? normalizeProjectRoot(rawProjectRoot)
|
||||||
|
: null;
|
||||||
|
|
||||||
|
// If explicit path is relative, resolve it relative to normalized projectRoot
|
||||||
if (explicitPath && projectRoot) {
|
if (explicitPath && projectRoot) {
|
||||||
return path.resolve(projectRoot, explicitPath);
|
return path.resolve(projectRoot, explicitPath);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use core findComplexityReportPath with explicit path and projectRoot context
|
// Use core findComplexityReportPath with explicit path and normalized projectRoot context
|
||||||
if (projectRoot) {
|
if (projectRoot) {
|
||||||
return coreFindComplexityReportPath(explicitPath, { projectRoot }, log);
|
return coreFindComplexityReportPath(explicitPath, { projectRoot }, log);
|
||||||
}
|
}
|
||||||
@@ -158,13 +158,16 @@ export function resolveProjectPath(relativePath, args) {
|
|||||||
throw new Error('projectRoot is required in args to resolve project paths');
|
throw new Error('projectRoot is required in args to resolve project paths');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Normalize the project root to prevent double .taskmaster paths
|
||||||
|
const projectRoot = normalizeProjectRoot(args.projectRoot);
|
||||||
|
|
||||||
// If already absolute, return as-is
|
// If already absolute, return as-is
|
||||||
if (path.isAbsolute(relativePath)) {
|
if (path.isAbsolute(relativePath)) {
|
||||||
return relativePath;
|
return relativePath;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resolve relative to projectRoot
|
// Resolve relative to normalized projectRoot
|
||||||
return path.resolve(args.projectRoot, relativePath);
|
return path.resolve(projectRoot, relativePath);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -184,7 +187,7 @@ export function findProjectRoot(startDir) {
|
|||||||
* @param {Object} [log] - Log function to prevent console logging
|
* @param {Object} [log] - Log function to prevent console logging
|
||||||
* @returns {string|null} - Resolved path to tasks.json or null if not found
|
* @returns {string|null} - Resolved path to tasks.json or null if not found
|
||||||
*/
|
*/
|
||||||
export function findTasksPath(args, log = null) {
|
export function findTasksPath(args, log = silentLogger) {
|
||||||
return resolveTasksPath(args, log);
|
return resolveTasksPath(args, log);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -194,7 +197,7 @@ export function findTasksPath(args, log = null) {
|
|||||||
* @param {Object} [log] - Log function to prevent console logging
|
* @param {Object} [log] - Log function to prevent console logging
|
||||||
* @returns {string|null} - Resolved path to complexity report or null if not found
|
* @returns {string|null} - Resolved path to complexity report or null if not found
|
||||||
*/
|
*/
|
||||||
export function findComplexityReportPath(args, log = null) {
|
export function findComplexityReportPath(args, log = silentLogger) {
|
||||||
return resolveComplexityReportPath(args, log);
|
return resolveComplexityReportPath(args, log);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -205,7 +208,7 @@ export function findComplexityReportPath(args, log = null) {
|
|||||||
* @param {Object} [log] - Logger object to prevent console logging
|
* @param {Object} [log] - Logger object to prevent console logging
|
||||||
* @returns {string|null} - Resolved path to PRD file or null if not found
|
* @returns {string|null} - Resolved path to PRD file or null if not found
|
||||||
*/
|
*/
|
||||||
export function findPRDPath(explicitPath, args = null, log = null) {
|
export function findPRDPath(explicitPath, args = null, log = silentLogger) {
|
||||||
return findPrdPath(explicitPath, args, log);
|
return findPrdPath(explicitPath, args, log);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ import {
|
|||||||
} from './utils.js';
|
} from './utils.js';
|
||||||
import { complexityReportDirect } from '../core/task-master-core.js';
|
import { complexityReportDirect } from '../core/task-master-core.js';
|
||||||
import { COMPLEXITY_REPORT_FILE } from '../../../src/constants/paths.js';
|
import { COMPLEXITY_REPORT_FILE } from '../../../src/constants/paths.js';
|
||||||
import path from 'path';
|
import { findComplexityReportPath } from '../core/utils/path-utils.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the complexityReport tool with the MCP server
|
* Register the complexityReport tool with the MCP server
|
||||||
@@ -38,10 +38,18 @@ export function registerComplexityReportTool(server) {
|
|||||||
`Getting complexity report with args: ${JSON.stringify(args)}`
|
`Getting complexity report with args: ${JSON.stringify(args)}`
|
||||||
);
|
);
|
||||||
|
|
||||||
// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)
|
const pathArgs = {
|
||||||
const reportPath = args.file
|
projectRoot: args.projectRoot,
|
||||||
? path.resolve(args.projectRoot, args.file)
|
complexityReport: args.file
|
||||||
: path.resolve(args.projectRoot, COMPLEXITY_REPORT_FILE);
|
};
|
||||||
|
|
||||||
|
const reportPath = findComplexityReportPath(pathArgs, log);
|
||||||
|
|
||||||
|
if (!reportPath) {
|
||||||
|
return createErrorResponse(
|
||||||
|
'No complexity report found. Run task-master analyze-complexity first.'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
const result = await complexityReportDirect(
|
const result = await complexityReportDirect(
|
||||||
{
|
{
|
||||||
@@ -51,9 +59,7 @@ export function registerComplexityReportTool(server) {
|
|||||||
);
|
);
|
||||||
|
|
||||||
if (result.success) {
|
if (result.success) {
|
||||||
log.info(
|
log.info('Successfully retrieved complexity report');
|
||||||
`Successfully retrieved complexity report${result.fromCache ? ' (from cache)' : ''}`
|
|
||||||
);
|
|
||||||
} else {
|
} else {
|
||||||
log.error(
|
log.error(
|
||||||
`Failed to retrieve complexity report: ${result.error.message}`
|
`Failed to retrieve complexity report: ${result.error.message}`
|
||||||
|
|||||||
@@ -116,9 +116,7 @@ export function registerShowTaskTool(server) {
|
|||||||
);
|
);
|
||||||
|
|
||||||
if (result.success) {
|
if (result.success) {
|
||||||
log.info(
|
log.info(`Successfully retrieved task details for ID: ${args.id}`);
|
||||||
`Successfully retrieved task details for ID: ${args.id}${result.fromCache ? ' (from cache)' : ''}`
|
|
||||||
);
|
|
||||||
} else {
|
} else {
|
||||||
log.error(`Failed to get task: ${result.error.message}`);
|
log.error(`Failed to get task: ${result.error.message}`);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ export function registerListTasksTool(server) {
|
|||||||
// Resolve the path to tasks.json using new path utilities
|
// Resolve the path to tasks.json using new path utilities
|
||||||
let tasksJsonPath;
|
let tasksJsonPath;
|
||||||
try {
|
try {
|
||||||
tasksJsonPath = resolveTasksPath(args, session);
|
tasksJsonPath = resolveTasksPath(args, log);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log.error(`Error finding tasks.json: ${error.message}`);
|
log.error(`Error finding tasks.json: ${error.message}`);
|
||||||
return createErrorResponse(
|
return createErrorResponse(
|
||||||
@@ -87,7 +87,7 @@ export function registerListTasksTool(server) {
|
|||||||
);
|
);
|
||||||
|
|
||||||
log.info(
|
log.info(
|
||||||
`Retrieved ${result.success ? result.data?.tasks?.length || 0 : 0} tasks${result.fromCache ? ' (from cache)' : ''}`
|
`Retrieved ${result.success ? result.data?.tasks?.length || 0 : 0} tasks`
|
||||||
);
|
);
|
||||||
return handleApiResult(result, log, 'Error getting tasks');
|
return handleApiResult(result, log, 'Error getting tasks');
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
|||||||
@@ -47,7 +47,6 @@ export function registerModelsTool(server) {
|
|||||||
),
|
),
|
||||||
projectRoot: z
|
projectRoot: z
|
||||||
.string()
|
.string()
|
||||||
.optional()
|
|
||||||
.describe('The directory of the project. Must be an absolute path.'),
|
.describe('The directory of the project. Must be an absolute path.'),
|
||||||
openrouter: z
|
openrouter: z
|
||||||
.boolean()
|
.boolean()
|
||||||
|
|||||||
@@ -34,7 +34,6 @@ export function registerMoveTaskTool(server) {
|
|||||||
file: z.string().optional().describe('Custom path to tasks.json file'),
|
file: z.string().optional().describe('Custom path to tasks.json file'),
|
||||||
projectRoot: z
|
projectRoot: z
|
||||||
.string()
|
.string()
|
||||||
.optional()
|
|
||||||
.describe(
|
.describe(
|
||||||
'Root directory of the project (typically derived from session)'
|
'Root directory of the project (typically derived from session)'
|
||||||
)
|
)
|
||||||
@@ -95,13 +94,16 @@ export function registerMoveTaskTool(server) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return {
|
return handleApiResult(
|
||||||
|
{
|
||||||
success: true,
|
success: true,
|
||||||
data: {
|
data: {
|
||||||
moves: results,
|
moves: results,
|
||||||
message: `Successfully moved ${results.length} tasks`
|
message: `Successfully moved ${results.length} tasks`
|
||||||
}
|
}
|
||||||
};
|
},
|
||||||
|
log
|
||||||
|
);
|
||||||
} else {
|
} else {
|
||||||
// Moving a single task
|
// Moving a single task
|
||||||
return handleApiResult(
|
return handleApiResult(
|
||||||
|
|||||||
@@ -32,7 +32,6 @@ export function registerParsePRDTool(server) {
|
|||||||
.describe('Absolute path to the PRD document file (.txt, .md, etc.)'),
|
.describe('Absolute path to the PRD document file (.txt, .md, etc.)'),
|
||||||
projectRoot: z
|
projectRoot: z
|
||||||
.string()
|
.string()
|
||||||
.optional()
|
|
||||||
.describe('The directory of the project. Must be an absolute path.'),
|
.describe('The directory of the project. Must be an absolute path.'),
|
||||||
output: z
|
output: z
|
||||||
.string()
|
.string()
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import { spawnSync } from 'child_process';
|
|||||||
import path from 'path';
|
import path from 'path';
|
||||||
import fs from 'fs';
|
import fs from 'fs';
|
||||||
import { contextManager } from '../core/context-manager.js'; // Import the singleton
|
import { contextManager } from '../core/context-manager.js'; // Import the singleton
|
||||||
|
import { fileURLToPath } from 'url';
|
||||||
|
|
||||||
// Import path utilities to ensure consistent path resolution
|
// Import path utilities to ensure consistent path resolution
|
||||||
import {
|
import {
|
||||||
@@ -14,6 +15,50 @@ import {
|
|||||||
PROJECT_MARKERS
|
PROJECT_MARKERS
|
||||||
} from '../core/utils/path-utils.js';
|
} from '../core/utils/path-utils.js';
|
||||||
|
|
||||||
|
const __filename = fileURLToPath(import.meta.url);
|
||||||
|
|
||||||
|
// Cache for version info to avoid repeated file reads
|
||||||
|
let cachedVersionInfo = null;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get version information from package.json
|
||||||
|
* @returns {Object} Version information
|
||||||
|
*/
|
||||||
|
function getVersionInfo() {
|
||||||
|
// Return cached version if available
|
||||||
|
if (cachedVersionInfo) {
|
||||||
|
return cachedVersionInfo;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Navigate to the project root from the tools directory
|
||||||
|
const packageJsonPath = path.join(
|
||||||
|
path.dirname(__filename),
|
||||||
|
'../../../package.json'
|
||||||
|
);
|
||||||
|
if (fs.existsSync(packageJsonPath)) {
|
||||||
|
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf-8'));
|
||||||
|
cachedVersionInfo = {
|
||||||
|
version: packageJson.version,
|
||||||
|
name: packageJson.name
|
||||||
|
};
|
||||||
|
return cachedVersionInfo;
|
||||||
|
}
|
||||||
|
cachedVersionInfo = {
|
||||||
|
version: 'unknown',
|
||||||
|
name: 'task-master-ai'
|
||||||
|
};
|
||||||
|
return cachedVersionInfo;
|
||||||
|
} catch (error) {
|
||||||
|
// Fallback version info if package.json can't be read
|
||||||
|
cachedVersionInfo = {
|
||||||
|
version: 'unknown',
|
||||||
|
name: 'task-master-ai'
|
||||||
|
};
|
||||||
|
return cachedVersionInfo;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get normalized project root path
|
* Get normalized project root path
|
||||||
* @param {string|undefined} projectRootRaw - Raw project root from arguments
|
* @param {string|undefined} projectRootRaw - Raw project root from arguments
|
||||||
@@ -199,17 +244,19 @@ function getProjectRootFromSession(session, log) {
|
|||||||
* @param {Function} processFunction - Optional function to process successful result data
|
* @param {Function} processFunction - Optional function to process successful result data
|
||||||
* @returns {Object} - Standardized MCP response object
|
* @returns {Object} - Standardized MCP response object
|
||||||
*/
|
*/
|
||||||
function handleApiResult(
|
async function handleApiResult(
|
||||||
result,
|
result,
|
||||||
log,
|
log,
|
||||||
errorPrefix = 'API error',
|
errorPrefix = 'API error',
|
||||||
processFunction = processMCPResponseData
|
processFunction = processMCPResponseData
|
||||||
) {
|
) {
|
||||||
|
// Get version info for every response
|
||||||
|
const versionInfo = getVersionInfo();
|
||||||
|
|
||||||
if (!result.success) {
|
if (!result.success) {
|
||||||
const errorMsg = result.error?.message || `Unknown ${errorPrefix}`;
|
const errorMsg = result.error?.message || `Unknown ${errorPrefix}`;
|
||||||
// Include cache status in error logs
|
log.error(`${errorPrefix}: ${errorMsg}`);
|
||||||
log.error(`${errorPrefix}: ${errorMsg}. From cache: ${result.fromCache}`); // Keep logging cache status on error
|
return createErrorResponse(errorMsg, versionInfo);
|
||||||
return createErrorResponse(errorMsg);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process the result data if needed
|
// Process the result data if needed
|
||||||
@@ -217,16 +264,14 @@ function handleApiResult(
|
|||||||
? processFunction(result.data)
|
? processFunction(result.data)
|
||||||
: result.data;
|
: result.data;
|
||||||
|
|
||||||
// Log success including cache status
|
log.info('Successfully completed operation');
|
||||||
log.info(`Successfully completed operation. From cache: ${result.fromCache}`); // Add success log with cache status
|
|
||||||
|
|
||||||
// Create the response payload including the fromCache flag
|
// Create the response payload including version info
|
||||||
const responsePayload = {
|
const responsePayload = {
|
||||||
fromCache: result.fromCache, // Get the flag from the original 'result'
|
data: processedData,
|
||||||
data: processedData // Nest the processed data under a 'data' key
|
version: versionInfo
|
||||||
};
|
};
|
||||||
|
|
||||||
// Pass this combined payload to createContentResponse
|
|
||||||
return createContentResponse(responsePayload);
|
return createContentResponse(responsePayload);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -320,8 +365,8 @@ function executeTaskMasterCommand(
|
|||||||
* @param {Function} options.actionFn - The async function to execute if the cache misses.
|
* @param {Function} options.actionFn - The async function to execute if the cache misses.
|
||||||
* Should return an object like { success: boolean, data?: any, error?: { code: string, message: string } }.
|
* Should return an object like { success: boolean, data?: any, error?: { code: string, message: string } }.
|
||||||
* @param {Object} options.log - The logger instance.
|
* @param {Object} options.log - The logger instance.
|
||||||
* @returns {Promise<Object>} - An object containing the result, indicating if it was from cache.
|
* @returns {Promise<Object>} - An object containing the result.
|
||||||
* Format: { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
|
* Format: { success: boolean, data?: any, error?: { code: string, message: string } }
|
||||||
*/
|
*/
|
||||||
async function getCachedOrExecute({ cacheKey, actionFn, log }) {
|
async function getCachedOrExecute({ cacheKey, actionFn, log }) {
|
||||||
// Check cache first
|
// Check cache first
|
||||||
@@ -329,11 +374,7 @@ async function getCachedOrExecute({ cacheKey, actionFn, log }) {
|
|||||||
|
|
||||||
if (cachedResult !== undefined) {
|
if (cachedResult !== undefined) {
|
||||||
log.info(`Cache hit for key: ${cacheKey}`);
|
log.info(`Cache hit for key: ${cacheKey}`);
|
||||||
// Return the cached data in the same structure as a fresh result
|
return cachedResult;
|
||||||
return {
|
|
||||||
...cachedResult, // Spread the cached result to maintain its structure
|
|
||||||
fromCache: true // Just add the fromCache flag
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
log.info(`Cache miss for key: ${cacheKey}. Executing action function.`);
|
log.info(`Cache miss for key: ${cacheKey}. Executing action function.`);
|
||||||
@@ -341,12 +382,10 @@ async function getCachedOrExecute({ cacheKey, actionFn, log }) {
|
|||||||
// Execute the action function if cache missed
|
// Execute the action function if cache missed
|
||||||
const result = await actionFn();
|
const result = await actionFn();
|
||||||
|
|
||||||
// If the action was successful, cache the result (but without fromCache flag)
|
// If the action was successful, cache the result
|
||||||
if (result.success && result.data !== undefined) {
|
if (result.success && result.data !== undefined) {
|
||||||
log.info(`Action successful. Caching result for key: ${cacheKey}`);
|
log.info(`Action successful. Caching result for key: ${cacheKey}`);
|
||||||
// Cache the entire result structure (minus the fromCache flag)
|
contextManager.setCachedData(cacheKey, result);
|
||||||
const { fromCache, ...resultToCache } = result;
|
|
||||||
contextManager.setCachedData(cacheKey, resultToCache);
|
|
||||||
} else if (!result.success) {
|
} else if (!result.success) {
|
||||||
log.warn(
|
log.warn(
|
||||||
`Action failed for cache key ${cacheKey}. Result not cached. Error: ${result.error?.message}`
|
`Action failed for cache key ${cacheKey}. Result not cached. Error: ${result.error?.message}`
|
||||||
@@ -357,11 +396,7 @@ async function getCachedOrExecute({ cacheKey, actionFn, log }) {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the fresh result, indicating it wasn't from cache
|
return result;
|
||||||
return {
|
|
||||||
...result,
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -460,14 +495,22 @@ function createContentResponse(content) {
|
|||||||
/**
|
/**
|
||||||
* Creates error response for tools
|
* Creates error response for tools
|
||||||
* @param {string} errorMessage - Error message to include in response
|
* @param {string} errorMessage - Error message to include in response
|
||||||
|
* @param {Object} [versionInfo] - Optional version information object
|
||||||
* @returns {Object} - Error content response object in FastMCP format
|
* @returns {Object} - Error content response object in FastMCP format
|
||||||
*/
|
*/
|
||||||
function createErrorResponse(errorMessage) {
|
function createErrorResponse(errorMessage, versionInfo) {
|
||||||
|
// Provide fallback version info if not provided
|
||||||
|
if (!versionInfo) {
|
||||||
|
versionInfo = getVersionInfo();
|
||||||
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
content: [
|
content: [
|
||||||
{
|
{
|
||||||
type: 'text',
|
type: 'text',
|
||||||
text: `Error: ${errorMessage}`
|
text: `Error: ${errorMessage}
|
||||||
|
Version: ${versionInfo.version}
|
||||||
|
Name: ${versionInfo.name}`
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
isError: true
|
isError: true
|
||||||
|
|||||||
254
package-lock.json
generated
254
package-lock.json
generated
@@ -1,12 +1,12 @@
|
|||||||
{
|
{
|
||||||
"name": "task-master-ai",
|
"name": "task-master-ai",
|
||||||
"version": "0.15.0",
|
"version": "0.16.2-rc.0",
|
||||||
"lockfileVersion": 3,
|
"lockfileVersion": 3,
|
||||||
"requires": true,
|
"requires": true,
|
||||||
"packages": {
|
"packages": {
|
||||||
"": {
|
"": {
|
||||||
"name": "task-master-ai",
|
"name": "task-master-ai",
|
||||||
"version": "0.15.0",
|
"version": "0.16.2-rc.0",
|
||||||
"license": "MIT WITH Commons-Clause",
|
"license": "MIT WITH Commons-Clause",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@ai-sdk/amazon-bedrock": "^2.2.9",
|
"@ai-sdk/amazon-bedrock": "^2.2.9",
|
||||||
@@ -29,7 +29,7 @@
|
|||||||
"cors": "^2.8.5",
|
"cors": "^2.8.5",
|
||||||
"dotenv": "^16.3.1",
|
"dotenv": "^16.3.1",
|
||||||
"express": "^4.21.2",
|
"express": "^4.21.2",
|
||||||
"fastmcp": "^1.20.5",
|
"fastmcp": "^2.2.2",
|
||||||
"figlet": "^1.8.0",
|
"figlet": "^1.8.0",
|
||||||
"fuse.js": "^7.1.0",
|
"fuse.js": "^7.1.0",
|
||||||
"gradient-string": "^3.0.0",
|
"gradient-string": "^3.0.0",
|
||||||
@@ -64,7 +64,7 @@
|
|||||||
"tsx": "^4.16.2"
|
"tsx": "^4.16.2"
|
||||||
},
|
},
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=14.0.0"
|
"node": ">=18.0.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@ai-sdk/amazon-bedrock": {
|
"node_modules/@ai-sdk/amazon-bedrock": {
|
||||||
@@ -3593,18 +3593,19 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@modelcontextprotocol/sdk": {
|
"node_modules/@modelcontextprotocol/sdk": {
|
||||||
"version": "1.8.0",
|
"version": "1.12.1",
|
||||||
"resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.8.0.tgz",
|
"resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.12.1.tgz",
|
||||||
"integrity": "sha512-e06W7SwrontJDHwCawNO5SGxG+nU9AAx+jpHHZqGl/WrDBdWOpvirC+s58VpJTB5QemI4jTRcjWT4Pt3Q1NPQQ==",
|
"integrity": "sha512-KG1CZhZfWg+u8pxeM/mByJDScJSrjjxLc8fwQqbsS8xCjBmQfMNEBTotYdNanKekepnfRI85GtgQlctLFpcYPw==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
|
"ajv": "^6.12.6",
|
||||||
"content-type": "^1.0.5",
|
"content-type": "^1.0.5",
|
||||||
"cors": "^2.8.5",
|
"cors": "^2.8.5",
|
||||||
"cross-spawn": "^7.0.3",
|
"cross-spawn": "^7.0.5",
|
||||||
"eventsource": "^3.0.2",
|
"eventsource": "^3.0.2",
|
||||||
"express": "^5.0.1",
|
"express": "^5.0.1",
|
||||||
"express-rate-limit": "^7.5.0",
|
"express-rate-limit": "^7.5.0",
|
||||||
"pkce-challenge": "^4.1.0",
|
"pkce-challenge": "^5.0.0",
|
||||||
"raw-body": "^3.0.0",
|
"raw-body": "^3.0.0",
|
||||||
"zod": "^3.23.8",
|
"zod": "^3.23.8",
|
||||||
"zod-to-json-schema": "^3.24.1"
|
"zod-to-json-schema": "^3.24.1"
|
||||||
@@ -3668,84 +3669,45 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@modelcontextprotocol/sdk/node_modules/express": {
|
"node_modules/@modelcontextprotocol/sdk/node_modules/express": {
|
||||||
"version": "5.0.1",
|
"version": "5.1.0",
|
||||||
"resolved": "https://registry.npmjs.org/express/-/express-5.0.1.tgz",
|
"resolved": "https://registry.npmjs.org/express/-/express-5.1.0.tgz",
|
||||||
"integrity": "sha512-ORF7g6qGnD+YtUG9yx4DFoqCShNMmUKiXuT5oWMHiOvt/4WFbHC6yCwQMTSBMno7AqntNCAzzcnnjowRkTL9eQ==",
|
"integrity": "sha512-DT9ck5YIRU+8GYzzU5kT3eHGA5iL+1Zd0EutOmTE9Dtk+Tvuzd23VBU+ec7HPNSTxXYO55gPV/hq4pSBJDjFpA==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"accepts": "^2.0.0",
|
"accepts": "^2.0.0",
|
||||||
"body-parser": "^2.0.1",
|
"body-parser": "^2.2.0",
|
||||||
"content-disposition": "^1.0.0",
|
"content-disposition": "^1.0.0",
|
||||||
"content-type": "~1.0.4",
|
"content-type": "^1.0.5",
|
||||||
"cookie": "0.7.1",
|
"cookie": "^0.7.1",
|
||||||
"cookie-signature": "^1.2.1",
|
"cookie-signature": "^1.2.1",
|
||||||
"debug": "4.3.6",
|
"debug": "^4.4.0",
|
||||||
"depd": "2.0.0",
|
"encodeurl": "^2.0.0",
|
||||||
"encodeurl": "~2.0.0",
|
"escape-html": "^1.0.3",
|
||||||
"escape-html": "~1.0.3",
|
"etag": "^1.8.1",
|
||||||
"etag": "~1.8.1",
|
"finalhandler": "^2.1.0",
|
||||||
"finalhandler": "^2.0.0",
|
"fresh": "^2.0.0",
|
||||||
"fresh": "2.0.0",
|
"http-errors": "^2.0.0",
|
||||||
"http-errors": "2.0.0",
|
|
||||||
"merge-descriptors": "^2.0.0",
|
"merge-descriptors": "^2.0.0",
|
||||||
"methods": "~1.1.2",
|
|
||||||
"mime-types": "^3.0.0",
|
"mime-types": "^3.0.0",
|
||||||
"on-finished": "2.4.1",
|
"on-finished": "^2.4.1",
|
||||||
"once": "1.4.0",
|
"once": "^1.4.0",
|
||||||
"parseurl": "~1.3.3",
|
"parseurl": "^1.3.3",
|
||||||
"proxy-addr": "~2.0.7",
|
"proxy-addr": "^2.0.7",
|
||||||
"qs": "6.13.0",
|
"qs": "^6.14.0",
|
||||||
"range-parser": "~1.2.1",
|
"range-parser": "^1.2.1",
|
||||||
"router": "^2.0.0",
|
"router": "^2.2.0",
|
||||||
"safe-buffer": "5.2.1",
|
|
||||||
"send": "^1.1.0",
|
"send": "^1.1.0",
|
||||||
"serve-static": "^2.1.0",
|
"serve-static": "^2.2.0",
|
||||||
"setprototypeof": "1.2.0",
|
"statuses": "^2.0.1",
|
||||||
"statuses": "2.0.1",
|
"type-is": "^2.0.1",
|
||||||
"type-is": "^2.0.0",
|
"vary": "^1.1.2"
|
||||||
"utils-merge": "1.0.1",
|
|
||||||
"vary": "~1.1.2"
|
|
||||||
},
|
},
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">= 18"
|
"node": ">= 18"
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@modelcontextprotocol/sdk/node_modules/express/node_modules/debug": {
|
|
||||||
"version": "4.3.6",
|
|
||||||
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.6.tgz",
|
|
||||||
"integrity": "sha512-O/09Bd4Z1fBrU4VzkhFqVgpPzaGbw6Sm9FEkBT1A/YBXQFGuuSxa1dN2nxgxS34JmKXqYx8CZAwEVoJFImUXIg==",
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"ms": "2.1.2"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">=6.0"
|
|
||||||
},
|
|
||||||
"peerDependenciesMeta": {
|
|
||||||
"supports-color": {
|
|
||||||
"optional": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@modelcontextprotocol/sdk/node_modules/express/node_modules/ms": {
|
|
||||||
"version": "2.1.2",
|
|
||||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
|
|
||||||
"integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
|
|
||||||
"license": "MIT"
|
|
||||||
},
|
|
||||||
"node_modules/@modelcontextprotocol/sdk/node_modules/express/node_modules/qs": {
|
|
||||||
"version": "6.13.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz",
|
|
||||||
"integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==",
|
|
||||||
"license": "BSD-3-Clause",
|
|
||||||
"dependencies": {
|
|
||||||
"side-channel": "^1.0.6"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": ">=0.6"
|
|
||||||
},
|
},
|
||||||
"funding": {
|
"funding": {
|
||||||
"url": "https://github.com/sponsors/ljharb"
|
"type": "opencollective",
|
||||||
|
"url": "https://opencollective.com/express"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@modelcontextprotocol/sdk/node_modules/finalhandler": {
|
"node_modules/@modelcontextprotocol/sdk/node_modules/finalhandler": {
|
||||||
@@ -4640,6 +4602,12 @@
|
|||||||
"node": ">=18.0.0"
|
"node": ">=18.0.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/@standard-schema/spec": {
|
||||||
|
"version": "1.0.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.0.0.tgz",
|
||||||
|
"integrity": "sha512-m2bOd0f2RT9k8QJx1JN85cZYyH1RqFBdlwtkSlf4tBDYLCiiZnv1fIIwacK6cqwXavOydf0NPToMQgpKq+dVlA==",
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
"node_modules/@tokenizer/inflate": {
|
"node_modules/@tokenizer/inflate": {
|
||||||
"version": "0.2.7",
|
"version": "0.2.7",
|
||||||
"resolved": "https://registry.npmjs.org/@tokenizer/inflate/-/inflate-0.2.7.tgz",
|
"resolved": "https://registry.npmjs.org/@tokenizer/inflate/-/inflate-0.2.7.tgz",
|
||||||
@@ -4884,6 +4852,22 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/ajv": {
|
||||||
|
"version": "6.12.6",
|
||||||
|
"resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
|
||||||
|
"integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"fast-deep-equal": "^3.1.1",
|
||||||
|
"fast-json-stable-stringify": "^2.0.0",
|
||||||
|
"json-schema-traverse": "^0.4.1",
|
||||||
|
"uri-js": "^4.2.2"
|
||||||
|
},
|
||||||
|
"funding": {
|
||||||
|
"type": "github",
|
||||||
|
"url": "https://github.com/sponsors/epoberezkin"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/ansi-align": {
|
"node_modules/ansi-align": {
|
||||||
"version": "3.0.1",
|
"version": "3.0.1",
|
||||||
"resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz",
|
"resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz",
|
||||||
@@ -6432,9 +6416,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/eventsource": {
|
"node_modules/eventsource": {
|
||||||
"version": "3.0.6",
|
"version": "3.0.7",
|
||||||
"resolved": "https://registry.npmjs.org/eventsource/-/eventsource-3.0.6.tgz",
|
"resolved": "https://registry.npmjs.org/eventsource/-/eventsource-3.0.7.tgz",
|
||||||
"integrity": "sha512-l19WpE2m9hSuyP06+FbuUUf1G+R0SFLrtQfbRb9PRr+oimOfxQhgGCbVaXg5IvZyyTThJsxh6L/srkMiCeBPDA==",
|
"integrity": "sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"eventsource-parser": "^3.0.1"
|
"eventsource-parser": "^3.0.1"
|
||||||
@@ -6636,6 +6620,12 @@
|
|||||||
"node": ">=4"
|
"node": ">=4"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/fast-deep-equal": {
|
||||||
|
"version": "3.1.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
|
||||||
|
"integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==",
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
"node_modules/fast-glob": {
|
"node_modules/fast-glob": {
|
||||||
"version": "3.3.3",
|
"version": "3.3.3",
|
||||||
"resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz",
|
"resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz",
|
||||||
@@ -6657,7 +6647,6 @@
|
|||||||
"version": "2.1.0",
|
"version": "2.1.0",
|
||||||
"resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
|
"resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
|
||||||
"integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==",
|
"integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==",
|
||||||
"dev": true,
|
|
||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
"node_modules/fast-safe-stringify": {
|
"node_modules/fast-safe-stringify": {
|
||||||
@@ -6690,22 +6679,24 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/fastmcp": {
|
"node_modules/fastmcp": {
|
||||||
"version": "1.20.5",
|
"version": "2.2.2",
|
||||||
"resolved": "https://registry.npmjs.org/fastmcp/-/fastmcp-1.20.5.tgz",
|
"resolved": "https://registry.npmjs.org/fastmcp/-/fastmcp-2.2.2.tgz",
|
||||||
"integrity": "sha512-jwcPgMF9bcE9qsEG82YMlAG26/n5CSYsr95e60ntqWWd+3kgTBbUIasB3HfpqHLTNaQuoX6/jl18fpDcybBjcQ==",
|
"integrity": "sha512-V6qEfOnABo7lDrwHqZQhCYd52KXzK85/ipllmUyaos8WLAjygP9NuuKcm1kiEWa0jjsFxe2kf/Y+T4PRE+0rEw==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@modelcontextprotocol/sdk": "^1.6.0",
|
"@modelcontextprotocol/sdk": "^1.10.2",
|
||||||
|
"@standard-schema/spec": "^1.0.0",
|
||||||
"execa": "^9.5.2",
|
"execa": "^9.5.2",
|
||||||
"file-type": "^20.3.0",
|
"file-type": "^20.4.1",
|
||||||
"fuse.js": "^7.1.0",
|
"fuse.js": "^7.1.0",
|
||||||
"mcp-proxy": "^2.10.4",
|
"mcp-proxy": "^3.0.3",
|
||||||
"strict-event-emitter-types": "^2.0.0",
|
"strict-event-emitter-types": "^2.0.0",
|
||||||
"undici": "^7.4.0",
|
"undici": "^7.8.0",
|
||||||
"uri-templates": "^0.2.0",
|
"uri-templates": "^0.2.0",
|
||||||
|
"xsschema": "0.3.0-beta.1",
|
||||||
"yargs": "^17.7.2",
|
"yargs": "^17.7.2",
|
||||||
"zod": "^3.24.2",
|
"zod": "^3.25.12",
|
||||||
"zod-to-json-schema": "^3.24.3"
|
"zod-to-json-schema": "^3.24.5"
|
||||||
},
|
},
|
||||||
"bin": {
|
"bin": {
|
||||||
"fastmcp": "dist/bin/fastmcp.js"
|
"fastmcp": "dist/bin/fastmcp.js"
|
||||||
@@ -9104,6 +9095,12 @@
|
|||||||
"integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==",
|
"integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==",
|
||||||
"license": "(AFL-2.1 OR BSD-3-Clause)"
|
"license": "(AFL-2.1 OR BSD-3-Clause)"
|
||||||
},
|
},
|
||||||
|
"node_modules/json-schema-traverse": {
|
||||||
|
"version": "0.4.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
|
||||||
|
"integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==",
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
"node_modules/json5": {
|
"node_modules/json5": {
|
||||||
"version": "2.2.3",
|
"version": "2.2.3",
|
||||||
"resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
|
"resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
|
||||||
@@ -9383,19 +9380,31 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/mcp-proxy": {
|
"node_modules/mcp-proxy": {
|
||||||
"version": "2.12.0",
|
"version": "3.3.0",
|
||||||
"resolved": "https://registry.npmjs.org/mcp-proxy/-/mcp-proxy-2.12.0.tgz",
|
"resolved": "https://registry.npmjs.org/mcp-proxy/-/mcp-proxy-3.3.0.tgz",
|
||||||
"integrity": "sha512-hL2Y6EtK7vkgAOZxOQe9M4Z9g5xEnvR4ZYBKqFi/5tjhz/1jyNEz5NL87Uzv46k8iZQPVNEof/T6arEooBU5bQ==",
|
"integrity": "sha512-xyFKQEZ64HC7lxScBHjb5fxiPoyJjjkPhwH5hWUT0oL/ttCpMGZDJrYZRGFKVJiLLkrZPAkHnMGkI+WMlyD/cg==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@modelcontextprotocol/sdk": "^1.6.0",
|
"@modelcontextprotocol/sdk": "^1.11.4",
|
||||||
"eventsource": "^3.0.5",
|
"eventsource": "^4.0.0",
|
||||||
"yargs": "^17.7.2"
|
"yargs": "^17.7.2"
|
||||||
},
|
},
|
||||||
"bin": {
|
"bin": {
|
||||||
"mcp-proxy": "dist/bin/mcp-proxy.js"
|
"mcp-proxy": "dist/bin/mcp-proxy.js"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/mcp-proxy/node_modules/eventsource": {
|
||||||
|
"version": "4.0.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/eventsource/-/eventsource-4.0.0.tgz",
|
||||||
|
"integrity": "sha512-fvIkb9qZzdMxgZrEQDyll+9oJsyaVvY92I2Re+qK0qEJ+w5s0X3dtz+M0VAPOjP1gtU3iqWyjQ0G3nvd5CLZ2g==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"eventsource-parser": "^3.0.1"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=20.0.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/media-typer": {
|
"node_modules/media-typer": {
|
||||||
"version": "0.3.0",
|
"version": "0.3.0",
|
||||||
"resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
|
"resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
|
||||||
@@ -10085,9 +10094,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/pkce-challenge": {
|
"node_modules/pkce-challenge": {
|
||||||
"version": "4.1.0",
|
"version": "5.0.0",
|
||||||
"resolved": "https://registry.npmjs.org/pkce-challenge/-/pkce-challenge-4.1.0.tgz",
|
"resolved": "https://registry.npmjs.org/pkce-challenge/-/pkce-challenge-5.0.0.tgz",
|
||||||
"integrity": "sha512-ZBmhE1C9LcPoH9XZSdwiPtbPHZROwAnMy+kIFQVrnMCxY4Cudlz3gBOpzilgc0jOgRaiT3sIWfpMomW2ar2orQ==",
|
"integrity": "sha512-ueGLflrrnvwB3xuo/uGob5pd5FN7l0MsLf0Z87o/UQmRtwjvfylfc9MurIxRAWywCYTgrvpXBcqjV4OfCYGCIQ==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=16.20.0"
|
"node": ">=16.20.0"
|
||||||
@@ -11319,9 +11328,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/undici": {
|
"node_modules/undici": {
|
||||||
"version": "7.6.0",
|
"version": "7.10.0",
|
||||||
"resolved": "https://registry.npmjs.org/undici/-/undici-7.6.0.tgz",
|
"resolved": "https://registry.npmjs.org/undici/-/undici-7.10.0.tgz",
|
||||||
"integrity": "sha512-gaFsbThjrDGvAaD670r81RZro/s6H2PVZF640Qn0p5kZK+/rim7/mmyfp2W7VB5vOMaFM8vuFBJUaMlaZTYHlA==",
|
"integrity": "sha512-u5otvFBOBZvmdjWLVW+5DAc9Nkq8f24g0O9oY7qw2JVIF1VocIFoyz9JFkuVOS2j41AufeO0xnlweJ2RLT8nGw==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=20.18.1"
|
"node": ">=20.18.1"
|
||||||
@@ -11395,6 +11404,15 @@
|
|||||||
"browserslist": ">= 4.21.0"
|
"browserslist": ">= 4.21.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/uri-js": {
|
||||||
|
"version": "4.4.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz",
|
||||||
|
"integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==",
|
||||||
|
"license": "BSD-2-Clause",
|
||||||
|
"dependencies": {
|
||||||
|
"punycode": "^2.1.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/uri-templates": {
|
"node_modules/uri-templates": {
|
||||||
"version": "0.2.0",
|
"version": "0.2.0",
|
||||||
"resolved": "https://registry.npmjs.org/uri-templates/-/uri-templates-0.2.0.tgz",
|
"resolved": "https://registry.npmjs.org/uri-templates/-/uri-templates-0.2.0.tgz",
|
||||||
@@ -11605,6 +11623,40 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/xsschema": {
|
||||||
|
"version": "0.3.0-beta.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/xsschema/-/xsschema-0.3.0-beta.1.tgz",
|
||||||
|
"integrity": "sha512-Z7ZlPKLTc8iUKVfic0Lr66NB777wJqZl3JVLIy1vaNxx6NNTuylYm4wbK78Sgg7kHwaPRqFnuT4IliQM1sDxvg==",
|
||||||
|
"license": "MIT",
|
||||||
|
"peerDependencies": {
|
||||||
|
"@valibot/to-json-schema": "^1.0.0",
|
||||||
|
"arktype": "^2.1.16",
|
||||||
|
"effect": "^3.14.5",
|
||||||
|
"sury": "^10.0.0-rc",
|
||||||
|
"zod": "^3.25.0",
|
||||||
|
"zod-to-json-schema": "^3.24.5"
|
||||||
|
},
|
||||||
|
"peerDependenciesMeta": {
|
||||||
|
"@valibot/to-json-schema": {
|
||||||
|
"optional": true
|
||||||
|
},
|
||||||
|
"arktype": {
|
||||||
|
"optional": true
|
||||||
|
},
|
||||||
|
"effect": {
|
||||||
|
"optional": true
|
||||||
|
},
|
||||||
|
"sury": {
|
||||||
|
"optional": true
|
||||||
|
},
|
||||||
|
"zod": {
|
||||||
|
"optional": true
|
||||||
|
},
|
||||||
|
"zod-to-json-schema": {
|
||||||
|
"optional": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/y18n": {
|
"node_modules/y18n": {
|
||||||
"version": "5.0.8",
|
"version": "5.0.8",
|
||||||
"resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",
|
"resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",
|
||||||
@@ -11734,9 +11786,9 @@
|
|||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
"node_modules/zod": {
|
"node_modules/zod": {
|
||||||
"version": "3.24.2",
|
"version": "3.25.56",
|
||||||
"resolved": "https://registry.npmjs.org/zod/-/zod-3.24.2.tgz",
|
"resolved": "https://registry.npmjs.org/zod/-/zod-3.25.56.tgz",
|
||||||
"integrity": "sha512-lY7CDW43ECgW9u1TcT3IoXHflywfVqDYze4waEz812jR/bZ8FHDsl7pFQoSZTz5N+2NqRXs8GBwnAwo3ZNxqhQ==",
|
"integrity": "sha512-rd6eEF3BTNvQnR2e2wwolfTmUTnp70aUTqr0oaGbHifzC3BKJsoV+Gat8vxUMR1hwOKBs6El+qWehrHbCpW6SQ==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"funding": {
|
"funding": {
|
||||||
"url": "https://github.com/sponsors/colinhacks"
|
"url": "https://github.com/sponsors/colinhacks"
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "task-master-ai",
|
"name": "task-master-ai",
|
||||||
"version": "0.16.0-rc.0",
|
"version": "0.16.2-rc.0",
|
||||||
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
|
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
|
||||||
"main": "index.js",
|
"main": "index.js",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
@@ -59,7 +59,7 @@
|
|||||||
"cors": "^2.8.5",
|
"cors": "^2.8.5",
|
||||||
"dotenv": "^16.3.1",
|
"dotenv": "^16.3.1",
|
||||||
"express": "^4.21.2",
|
"express": "^4.21.2",
|
||||||
"fastmcp": "^1.20.5",
|
"fastmcp": "^2.2.2",
|
||||||
"figlet": "^1.8.0",
|
"figlet": "^1.8.0",
|
||||||
"fuse.js": "^7.1.0",
|
"fuse.js": "^7.1.0",
|
||||||
"gradient-string": "^3.0.0",
|
"gradient-string": "^3.0.0",
|
||||||
|
|||||||
@@ -509,9 +509,9 @@ function createProjectStructure(addAliases, dryRun, options) {
|
|||||||
replacements
|
replacements
|
||||||
);
|
);
|
||||||
|
|
||||||
// Copy .taskmasterconfig with project name to NEW location
|
// Copy config.json with project name to NEW location
|
||||||
copyTemplateFile(
|
copyTemplateFile(
|
||||||
'.taskmasterconfig',
|
'config.json',
|
||||||
path.join(targetDir, TASKMASTER_CONFIG_FILE),
|
path.join(targetDir, TASKMASTER_CONFIG_FILE),
|
||||||
{
|
{
|
||||||
...replacements
|
...replacements
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ import {
|
|||||||
isApiKeySet,
|
isApiKeySet,
|
||||||
getOllamaBaseURL,
|
getOllamaBaseURL,
|
||||||
getAzureBaseURL,
|
getAzureBaseURL,
|
||||||
|
getBedrockBaseURL,
|
||||||
getVertexProjectId,
|
getVertexProjectId,
|
||||||
getVertexLocation
|
getVertexLocation
|
||||||
} from './config-manager.js';
|
} from './config-manager.js';
|
||||||
@@ -410,6 +411,10 @@ async function _unifiedServiceRunner(serviceType, params) {
|
|||||||
// For Ollama, use the global Ollama base URL if role-specific URL is not configured
|
// For Ollama, use the global Ollama base URL if role-specific URL is not configured
|
||||||
baseURL = getOllamaBaseURL(effectiveProjectRoot);
|
baseURL = getOllamaBaseURL(effectiveProjectRoot);
|
||||||
log('debug', `Using global Ollama base URL: ${baseURL}`);
|
log('debug', `Using global Ollama base URL: ${baseURL}`);
|
||||||
|
} else if (providerName?.toLowerCase() === 'bedrock' && !baseURL) {
|
||||||
|
// For Bedrock, use the global Bedrock base URL if role-specific URL is not configured
|
||||||
|
baseURL = getBedrockBaseURL(effectiveProjectRoot);
|
||||||
|
log('debug', `Using global Bedrock base URL: ${baseURL}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get AI parameters for the current role
|
// Get AI parameters for the current role
|
||||||
@@ -572,7 +577,8 @@ async function _unifiedServiceRunner(serviceType, params) {
|
|||||||
lowerCaseMessage.includes('does not support tool_use') ||
|
lowerCaseMessage.includes('does not support tool_use') ||
|
||||||
lowerCaseMessage.includes('tool use is not supported') ||
|
lowerCaseMessage.includes('tool use is not supported') ||
|
||||||
lowerCaseMessage.includes('tools are not supported') ||
|
lowerCaseMessage.includes('tools are not supported') ||
|
||||||
lowerCaseMessage.includes('function calling is not supported')
|
lowerCaseMessage.includes('function calling is not supported') ||
|
||||||
|
lowerCaseMessage.includes('tool use is not supported')
|
||||||
) {
|
) {
|
||||||
const specificErrorMsg = `Model '${modelId || 'unknown'}' via provider '${providerName || 'unknown'}' does not support the 'tool use' required by generateObjectService. Please configure a model that supports tool/function calling for the '${currentRole}' role, or use generateTextService if structured output is not strictly required.`;
|
const specificErrorMsg = `Model '${modelId || 'unknown'}' via provider '${providerName || 'unknown'}' does not support the 'tool use' required by generateObjectService. Please configure a model that supports tool/function calling for the '${currentRole}' role, or use generateTextService if structured output is not strictly required.`;
|
||||||
log('error', `[Tool Support Error] ${specificErrorMsg}`);
|
log('error', `[Tool Support Error] ${specificErrorMsg}`);
|
||||||
|
|||||||
@@ -87,6 +87,7 @@ import {
|
|||||||
TASK_STATUS_OPTIONS
|
TASK_STATUS_OPTIONS
|
||||||
} from '../../src/constants/task-status.js';
|
} from '../../src/constants/task-status.js';
|
||||||
import { getTaskMasterVersion } from '../../src/utils/getVersion.js';
|
import { getTaskMasterVersion } from '../../src/utils/getVersion.js';
|
||||||
|
import { syncTasksToReadme } from './sync-readme.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Runs the interactive setup process for model configuration.
|
* Runs the interactive setup process for model configuration.
|
||||||
@@ -2757,6 +2758,54 @@ Examples:
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// sync-readme command
|
||||||
|
programInstance
|
||||||
|
.command('sync-readme')
|
||||||
|
.description('Sync the current task list to README.md in the project root')
|
||||||
|
.option(
|
||||||
|
'-f, --file <file>',
|
||||||
|
'Path to the tasks file',
|
||||||
|
TASKMASTER_TASKS_FILE
|
||||||
|
)
|
||||||
|
.option('--with-subtasks', 'Include subtasks in the README output')
|
||||||
|
.option(
|
||||||
|
'-s, --status <status>',
|
||||||
|
'Show only tasks matching this status (e.g., pending, done)'
|
||||||
|
)
|
||||||
|
.action(async (options) => {
|
||||||
|
const tasksPath = options.file || TASKMASTER_TASKS_FILE;
|
||||||
|
const withSubtasks = options.withSubtasks || false;
|
||||||
|
const status = options.status || null;
|
||||||
|
|
||||||
|
// Find project root
|
||||||
|
const projectRoot = findProjectRoot();
|
||||||
|
if (!projectRoot) {
|
||||||
|
console.error(
|
||||||
|
chalk.red(
|
||||||
|
'Error: Could not find project root. Make sure you are in a Task Master project directory.'
|
||||||
|
)
|
||||||
|
);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
chalk.blue(
|
||||||
|
`📝 Syncing tasks to README.md${withSubtasks ? ' (with subtasks)' : ''}${status ? ` (status: ${status})` : ''}...`
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
const success = await syncTasksToReadme(projectRoot, {
|
||||||
|
withSubtasks,
|
||||||
|
status,
|
||||||
|
tasksPath
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!success) {
|
||||||
|
console.error(chalk.red('❌ Failed to sync tasks to README.md'));
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
return programInstance;
|
return programInstance;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -61,7 +61,8 @@ const DEFAULTS = {
|
|||||||
defaultSubtasks: 5,
|
defaultSubtasks: 5,
|
||||||
defaultPriority: 'medium',
|
defaultPriority: 'medium',
|
||||||
projectName: 'Task Master',
|
projectName: 'Task Master',
|
||||||
ollamaBaseURL: 'http://localhost:11434/api'
|
ollamaBaseURL: 'http://localhost:11434/api',
|
||||||
|
bedrockBaseURL: 'https://bedrock.us-east-1.amazonaws.com'
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -382,6 +383,11 @@ function getAzureBaseURL(explicitRoot = null) {
|
|||||||
return getGlobalConfig(explicitRoot).azureBaseURL;
|
return getGlobalConfig(explicitRoot).azureBaseURL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function getBedrockBaseURL(explicitRoot = null) {
|
||||||
|
// Directly return value from config
|
||||||
|
return getGlobalConfig(explicitRoot).bedrockBaseURL;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the Google Cloud project ID for Vertex AI from configuration
|
* Gets the Google Cloud project ID for Vertex AI from configuration
|
||||||
* @param {string|null} explicitRoot - Optional explicit path to the project root.
|
* @param {string|null} explicitRoot - Optional explicit path to the project root.
|
||||||
@@ -779,6 +785,7 @@ export {
|
|||||||
getProjectName,
|
getProjectName,
|
||||||
getOllamaBaseURL,
|
getOllamaBaseURL,
|
||||||
getAzureBaseURL,
|
getAzureBaseURL,
|
||||||
|
getBedrockBaseURL,
|
||||||
getParametersForRole,
|
getParametersForRole,
|
||||||
getUserId,
|
getUserId,
|
||||||
// API Key Checkers (still relevant)
|
// API Key Checkers (still relevant)
|
||||||
|
|||||||
@@ -563,11 +563,6 @@ function cleanupSubtaskDependencies(tasksData) {
|
|||||||
* @param {string} tasksPath - Path to tasks.json
|
* @param {string} tasksPath - Path to tasks.json
|
||||||
*/
|
*/
|
||||||
async function validateDependenciesCommand(tasksPath, options = {}) {
|
async function validateDependenciesCommand(tasksPath, options = {}) {
|
||||||
// Only display banner if not in silent mode
|
|
||||||
if (!isSilentMode()) {
|
|
||||||
displayBanner();
|
|
||||||
}
|
|
||||||
|
|
||||||
log('info', 'Checking for invalid dependencies in task files...');
|
log('info', 'Checking for invalid dependencies in task files...');
|
||||||
|
|
||||||
// Read tasks data
|
// Read tasks data
|
||||||
@@ -691,11 +686,6 @@ function countAllDependencies(tasks) {
|
|||||||
* @param {Object} options - Options object
|
* @param {Object} options - Options object
|
||||||
*/
|
*/
|
||||||
async function fixDependenciesCommand(tasksPath, options = {}) {
|
async function fixDependenciesCommand(tasksPath, options = {}) {
|
||||||
// Only display banner if not in silent mode
|
|
||||||
if (!isSilentMode()) {
|
|
||||||
displayBanner();
|
|
||||||
}
|
|
||||||
|
|
||||||
log('info', 'Checking for and fixing invalid dependencies in tasks.json...');
|
log('info', 'Checking for and fixing invalid dependencies in tasks.json...');
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
|||||||
@@ -153,7 +153,7 @@
|
|||||||
"id": "sonar-pro",
|
"id": "sonar-pro",
|
||||||
"swe_score": 0,
|
"swe_score": 0,
|
||||||
"cost_per_1m_tokens": { "input": 3, "output": 15 },
|
"cost_per_1m_tokens": { "input": 3, "output": 15 },
|
||||||
"allowed_roles": ["research"],
|
"allowed_roles": ["main", "research"],
|
||||||
"max_tokens": 8700
|
"max_tokens": 8700
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -174,14 +174,14 @@
|
|||||||
"id": "sonar-reasoning-pro",
|
"id": "sonar-reasoning-pro",
|
||||||
"swe_score": 0.211,
|
"swe_score": 0.211,
|
||||||
"cost_per_1m_tokens": { "input": 2, "output": 8 },
|
"cost_per_1m_tokens": { "input": 2, "output": 8 },
|
||||||
"allowed_roles": ["main", "fallback"],
|
"allowed_roles": ["main", "research", "fallback"],
|
||||||
"max_tokens": 8700
|
"max_tokens": 8700
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "sonar-reasoning",
|
"id": "sonar-reasoning",
|
||||||
"swe_score": 0.211,
|
"swe_score": 0.211,
|
||||||
"cost_per_1m_tokens": { "input": 1, "output": 5 },
|
"cost_per_1m_tokens": { "input": 1, "output": 5 },
|
||||||
"allowed_roles": ["main", "fallback"],
|
"allowed_roles": ["main", "research", "fallback"],
|
||||||
"max_tokens": 8700
|
"max_tokens": 8700
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
|||||||
184
scripts/modules/sync-readme.js
Normal file
184
scripts/modules/sync-readme.js
Normal file
@@ -0,0 +1,184 @@
|
|||||||
|
import fs from 'fs';
|
||||||
|
import path from 'path';
|
||||||
|
import chalk from 'chalk';
|
||||||
|
import { log, findProjectRoot } from './utils.js';
|
||||||
|
import { getProjectName } from './config-manager.js';
|
||||||
|
import listTasks from './task-manager/list-tasks.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a basic README structure if one doesn't exist
|
||||||
|
* @param {string} projectName - Name of the project
|
||||||
|
* @returns {string} - Basic README content
|
||||||
|
*/
|
||||||
|
function createBasicReadme(projectName) {
|
||||||
|
return `# ${projectName}
|
||||||
|
|
||||||
|
This project is managed using Task Master.
|
||||||
|
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create UTM tracking URL for task-master.dev
|
||||||
|
* @param {string} projectRoot - The project root path
|
||||||
|
* @returns {string} - UTM tracked URL
|
||||||
|
*/
|
||||||
|
function createTaskMasterUrl(projectRoot) {
|
||||||
|
// Get the actual folder name from the project root path
|
||||||
|
const folderName = path.basename(projectRoot);
|
||||||
|
|
||||||
|
// Clean folder name for UTM (replace spaces/special chars with hyphens)
|
||||||
|
const cleanFolderName = folderName
|
||||||
|
.toLowerCase()
|
||||||
|
.replace(/[^a-z0-9]/g, '-')
|
||||||
|
.replace(/-+/g, '-')
|
||||||
|
.replace(/^-|-$/g, '');
|
||||||
|
|
||||||
|
const utmParams = new URLSearchParams({
|
||||||
|
utm_source: 'github-readme',
|
||||||
|
utm_medium: 'readme-export',
|
||||||
|
utm_campaign: cleanFolderName || 'task-sync',
|
||||||
|
utm_content: 'task-export-link'
|
||||||
|
});
|
||||||
|
|
||||||
|
return `https://task-master.dev?${utmParams.toString()}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create the start marker with metadata
|
||||||
|
* @param {Object} options - Export options
|
||||||
|
* @returns {string} - Formatted start marker
|
||||||
|
*/
|
||||||
|
function createStartMarker(options) {
|
||||||
|
const { timestamp, withSubtasks, status, projectRoot } = options;
|
||||||
|
|
||||||
|
// Format status filter text
|
||||||
|
const statusText = status
|
||||||
|
? `Status filter: ${status}`
|
||||||
|
: 'Status filter: none';
|
||||||
|
const subtasksText = withSubtasks ? 'with subtasks' : 'without subtasks';
|
||||||
|
|
||||||
|
// Create the export info content
|
||||||
|
const exportInfo =
|
||||||
|
`🎯 **Taskmaster Export** - ${timestamp}\n` +
|
||||||
|
`📋 Export: ${subtasksText} • ${statusText}\n` +
|
||||||
|
`🔗 Powered by [Task Master](${createTaskMasterUrl(projectRoot)})`;
|
||||||
|
|
||||||
|
// Create a markdown box using code blocks and emojis to mimic our UI style
|
||||||
|
const boxContent =
|
||||||
|
`<!-- TASKMASTER_EXPORT_START -->\n` +
|
||||||
|
`> ${exportInfo.split('\n').join('\n> ')}\n\n`;
|
||||||
|
|
||||||
|
return boxContent;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create the end marker
|
||||||
|
* @returns {string} - Formatted end marker
|
||||||
|
*/
|
||||||
|
function createEndMarker() {
|
||||||
|
return (
|
||||||
|
`\n> 📋 **End of Taskmaster Export** - Tasks are synced from your project using the \`sync-readme\` command.\n` +
|
||||||
|
`<!-- TASKMASTER_EXPORT_END -->\n`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Syncs the current task list to README.md at the project root
|
||||||
|
* @param {string} projectRoot - Path to the project root directory
|
||||||
|
* @param {Object} options - Options for syncing
|
||||||
|
* @param {boolean} options.withSubtasks - Include subtasks in the output (default: false)
|
||||||
|
* @param {string} options.status - Filter by status (e.g., 'pending', 'done')
|
||||||
|
* @param {string} options.tasksPath - Custom path to tasks.json
|
||||||
|
* @returns {boolean} - True if sync was successful, false otherwise
|
||||||
|
*/
|
||||||
|
export async function syncTasksToReadme(projectRoot = null, options = {}) {
|
||||||
|
try {
|
||||||
|
const actualProjectRoot = projectRoot || findProjectRoot() || '.';
|
||||||
|
const { withSubtasks = false, status, tasksPath } = options;
|
||||||
|
|
||||||
|
// Get current tasks using the list-tasks functionality with markdown-readme format
|
||||||
|
const tasksOutput = await listTasks(
|
||||||
|
tasksPath ||
|
||||||
|
path.join(actualProjectRoot, '.taskmaster', 'tasks', 'tasks.json'),
|
||||||
|
status,
|
||||||
|
null,
|
||||||
|
withSubtasks,
|
||||||
|
'markdown-readme'
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!tasksOutput) {
|
||||||
|
console.log(chalk.red('❌ Failed to generate task output'));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate timestamp and metadata
|
||||||
|
const timestamp =
|
||||||
|
new Date().toISOString().replace('T', ' ').substring(0, 19) + ' UTC';
|
||||||
|
const projectName = getProjectName(actualProjectRoot);
|
||||||
|
|
||||||
|
// Create the export markers with metadata
|
||||||
|
const startMarker = createStartMarker({
|
||||||
|
timestamp,
|
||||||
|
withSubtasks,
|
||||||
|
status,
|
||||||
|
projectRoot: actualProjectRoot
|
||||||
|
});
|
||||||
|
|
||||||
|
const endMarker = createEndMarker();
|
||||||
|
|
||||||
|
// Create the complete task section
|
||||||
|
const taskSection = startMarker + tasksOutput + endMarker;
|
||||||
|
|
||||||
|
// Read current README content
|
||||||
|
const readmePath = path.join(actualProjectRoot, 'README.md');
|
||||||
|
let readmeContent = '';
|
||||||
|
try {
|
||||||
|
readmeContent = fs.readFileSync(readmePath, 'utf8');
|
||||||
|
} catch (err) {
|
||||||
|
if (err.code === 'ENOENT') {
|
||||||
|
// Create basic README if it doesn't exist
|
||||||
|
readmeContent = createBasicReadme(projectName);
|
||||||
|
} else {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if export markers exist and replace content between them
|
||||||
|
const startComment = '<!-- TASKMASTER_EXPORT_START -->';
|
||||||
|
const endComment = '<!-- TASKMASTER_EXPORT_END -->';
|
||||||
|
|
||||||
|
let updatedContent;
|
||||||
|
const startIndex = readmeContent.indexOf(startComment);
|
||||||
|
const endIndex = readmeContent.indexOf(endComment);
|
||||||
|
|
||||||
|
if (startIndex !== -1 && endIndex !== -1) {
|
||||||
|
// Replace existing task section
|
||||||
|
const beforeTasks = readmeContent.substring(0, startIndex);
|
||||||
|
const afterTasks = readmeContent.substring(endIndex + endComment.length);
|
||||||
|
updatedContent = beforeTasks + taskSection + afterTasks;
|
||||||
|
} else {
|
||||||
|
// Append to end of README
|
||||||
|
updatedContent = readmeContent + '\n' + taskSection;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write updated content to README
|
||||||
|
fs.writeFileSync(readmePath, updatedContent, 'utf8');
|
||||||
|
|
||||||
|
console.log(chalk.green('✅ Successfully synced tasks to README.md'));
|
||||||
|
console.log(
|
||||||
|
chalk.cyan(
|
||||||
|
`📋 Export details: ${withSubtasks ? 'with' : 'without'} subtasks${status ? `, status: ${status}` : ''}`
|
||||||
|
)
|
||||||
|
);
|
||||||
|
console.log(chalk.gray(`📍 Location: ${readmePath}`));
|
||||||
|
|
||||||
|
return true;
|
||||||
|
} catch (error) {
|
||||||
|
console.log(chalk.red('❌ Failed to sync tasks to README:'), error.message);
|
||||||
|
log('error', `README sync error: ${error.message}`);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export default syncTasksToReadme;
|
||||||
@@ -10,6 +10,8 @@ import {
|
|||||||
getStatusWithColor,
|
getStatusWithColor,
|
||||||
startLoadingIndicator,
|
startLoadingIndicator,
|
||||||
stopLoadingIndicator,
|
stopLoadingIndicator,
|
||||||
|
succeedLoadingIndicator,
|
||||||
|
failLoadingIndicator,
|
||||||
displayAiUsageSummary
|
displayAiUsageSummary
|
||||||
} from '../ui.js';
|
} from '../ui.js';
|
||||||
import { readJSON, writeJSON, log as consoleLog, truncate } from '../utils.js';
|
import { readJSON, writeJSON, log as consoleLog, truncate } from '../utils.js';
|
||||||
@@ -279,7 +281,7 @@ async function addTask(
|
|||||||
// CLI-only feedback for the dependency analysis
|
// CLI-only feedback for the dependency analysis
|
||||||
if (outputFormat === 'text') {
|
if (outputFormat === 'text') {
|
||||||
console.log(
|
console.log(
|
||||||
boxen(chalk.cyan.bold('Task Context Analysis') + '\n', {
|
boxen(chalk.cyan.bold('Task Context Analysis'), {
|
||||||
padding: { top: 0, bottom: 0, left: 1, right: 1 },
|
padding: { top: 0, bottom: 0, left: 1, right: 1 },
|
||||||
margin: { top: 0, bottom: 0 },
|
margin: { top: 0, bottom: 0 },
|
||||||
borderColor: 'cyan',
|
borderColor: 'cyan',
|
||||||
@@ -492,9 +494,9 @@ async function addTask(
|
|||||||
includeScore: true, // Return match scores
|
includeScore: true, // Return match scores
|
||||||
threshold: 0.4, // Lower threshold = stricter matching (range 0-1)
|
threshold: 0.4, // Lower threshold = stricter matching (range 0-1)
|
||||||
keys: [
|
keys: [
|
||||||
{ name: 'title', weight: 2 }, // Title is most important
|
{ name: 'title', weight: 1.5 }, // Title is most important
|
||||||
{ name: 'description', weight: 1.5 }, // Description is next
|
{ name: 'description', weight: 2 }, // Description is very important
|
||||||
{ name: 'details', weight: 0.8 }, // Details is less important
|
{ name: 'details', weight: 3 }, // Details is most important
|
||||||
// Search dependencies to find tasks that depend on similar things
|
// Search dependencies to find tasks that depend on similar things
|
||||||
{ name: 'dependencyTitles', weight: 0.5 }
|
{ name: 'dependencyTitles', weight: 0.5 }
|
||||||
],
|
],
|
||||||
@@ -502,8 +504,8 @@ async function addTask(
|
|||||||
shouldSort: true,
|
shouldSort: true,
|
||||||
// Allow searching in nested properties
|
// Allow searching in nested properties
|
||||||
useExtendedSearch: true,
|
useExtendedSearch: true,
|
||||||
// Return up to 15 matches
|
// Return up to 50 matches
|
||||||
limit: 15
|
limit: 50
|
||||||
};
|
};
|
||||||
|
|
||||||
// Prepare task data with dependencies expanded as titles for better semantic search
|
// Prepare task data with dependencies expanded as titles for better semantic search
|
||||||
@@ -596,32 +598,6 @@ async function addTask(
|
|||||||
// Get top N results for context
|
// Get top N results for context
|
||||||
const relatedTasks = allRelevantTasks.slice(0, 8);
|
const relatedTasks = allRelevantTasks.slice(0, 8);
|
||||||
|
|
||||||
// Also look for tasks with similar purposes or categories
|
|
||||||
const purposeCategories = [
|
|
||||||
{ pattern: /(command|cli|flag)/i, label: 'CLI commands' },
|
|
||||||
{ pattern: /(task|subtask|add)/i, label: 'Task management' },
|
|
||||||
{ pattern: /(dependency|depend)/i, label: 'Dependency handling' },
|
|
||||||
{ pattern: /(AI|model|prompt)/i, label: 'AI integration' },
|
|
||||||
{ pattern: /(UI|display|show)/i, label: 'User interface' },
|
|
||||||
{ pattern: /(schedule|time|cron)/i, label: 'Scheduling' }, // Added scheduling category
|
|
||||||
{ pattern: /(config|setting|option)/i, label: 'Configuration' } // Added configuration category
|
|
||||||
];
|
|
||||||
|
|
||||||
promptCategory = purposeCategories.find((cat) =>
|
|
||||||
cat.pattern.test(prompt)
|
|
||||||
);
|
|
||||||
const categoryTasks = promptCategory
|
|
||||||
? data.tasks
|
|
||||||
.filter(
|
|
||||||
(t) =>
|
|
||||||
promptCategory.pattern.test(t.title) ||
|
|
||||||
promptCategory.pattern.test(t.description) ||
|
|
||||||
(t.details && promptCategory.pattern.test(t.details))
|
|
||||||
)
|
|
||||||
.filter((t) => !relatedTasks.some((rt) => rt.id === t.id))
|
|
||||||
.slice(0, 3)
|
|
||||||
: [];
|
|
||||||
|
|
||||||
// Format basic task overviews
|
// Format basic task overviews
|
||||||
if (relatedTasks.length > 0) {
|
if (relatedTasks.length > 0) {
|
||||||
contextTasks = `\nRelevant tasks identified by semantic similarity:\n${relatedTasks
|
contextTasks = `\nRelevant tasks identified by semantic similarity:\n${relatedTasks
|
||||||
@@ -632,12 +608,6 @@ async function addTask(
|
|||||||
.join('\n')}`;
|
.join('\n')}`;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (categoryTasks.length > 0) {
|
|
||||||
contextTasks += `\n\nTasks related to ${promptCategory.label}:\n${categoryTasks
|
|
||||||
.map((t) => `- Task ${t.id}: ${t.title} - ${t.description}`)
|
|
||||||
.join('\n')}`;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (
|
if (
|
||||||
recentTasks.length > 0 &&
|
recentTasks.length > 0 &&
|
||||||
!contextTasks.includes('Recently created tasks')
|
!contextTasks.includes('Recently created tasks')
|
||||||
@@ -650,13 +620,10 @@ async function addTask(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add detailed information about the most relevant tasks
|
// Add detailed information about the most relevant tasks
|
||||||
const allDetailedTasks = [
|
const allDetailedTasks = [...relatedTasks.slice(0, 25)];
|
||||||
...relatedTasks.slice(0, 5),
|
|
||||||
...categoryTasks.slice(0, 2)
|
|
||||||
];
|
|
||||||
uniqueDetailedTasks = Array.from(
|
uniqueDetailedTasks = Array.from(
|
||||||
new Map(allDetailedTasks.map((t) => [t.id, t])).values()
|
new Map(allDetailedTasks.map((t) => [t.id, t])).values()
|
||||||
).slice(0, 8);
|
).slice(0, 20);
|
||||||
|
|
||||||
if (uniqueDetailedTasks.length > 0) {
|
if (uniqueDetailedTasks.length > 0) {
|
||||||
contextTasks += `\n\nDetailed information about relevant tasks:`;
|
contextTasks += `\n\nDetailed information about relevant tasks:`;
|
||||||
@@ -715,18 +682,14 @@ async function addTask(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Additional analysis of common patterns
|
// Additional analysis of common patterns
|
||||||
const similarPurposeTasks = promptCategory
|
const similarPurposeTasks = data.tasks.filter((t) =>
|
||||||
? data.tasks.filter(
|
prompt.toLowerCase().includes(t.title.toLowerCase())
|
||||||
(t) =>
|
);
|
||||||
promptCategory.pattern.test(t.title) ||
|
|
||||||
promptCategory.pattern.test(t.description)
|
|
||||||
)
|
|
||||||
: [];
|
|
||||||
|
|
||||||
let commonDeps = []; // Initialize commonDeps
|
let commonDeps = []; // Initialize commonDeps
|
||||||
|
|
||||||
if (similarPurposeTasks.length > 0) {
|
if (similarPurposeTasks.length > 0) {
|
||||||
contextTasks += `\n\nCommon patterns for ${promptCategory ? promptCategory.label : 'similar'} tasks:`;
|
contextTasks += `\n\nCommon patterns for similar tasks:`;
|
||||||
|
|
||||||
// Collect dependencies from similar purpose tasks
|
// Collect dependencies from similar purpose tasks
|
||||||
const similarDeps = similarPurposeTasks
|
const similarDeps = similarPurposeTasks
|
||||||
@@ -743,7 +706,7 @@ async function addTask(
|
|||||||
// Get most common dependencies for similar tasks
|
// Get most common dependencies for similar tasks
|
||||||
commonDeps = Object.entries(depCounts)
|
commonDeps = Object.entries(depCounts)
|
||||||
.sort((a, b) => b[1] - a[1])
|
.sort((a, b) => b[1] - a[1])
|
||||||
.slice(0, 5);
|
.slice(0, 10);
|
||||||
|
|
||||||
if (commonDeps.length > 0) {
|
if (commonDeps.length > 0) {
|
||||||
contextTasks += '\nMost common dependencies for similar tasks:';
|
contextTasks += '\nMost common dependencies for similar tasks:';
|
||||||
@@ -760,7 +723,7 @@ async function addTask(
|
|||||||
if (outputFormat === 'text') {
|
if (outputFormat === 'text') {
|
||||||
console.log(
|
console.log(
|
||||||
chalk.gray(
|
chalk.gray(
|
||||||
` Fuzzy search across ${data.tasks.length} tasks using full prompt and ${promptWords.length} keywords`
|
` Context search across ${data.tasks.length} tasks using full prompt and ${promptWords.length} keywords`
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -768,7 +731,7 @@ async function addTask(
|
|||||||
console.log(
|
console.log(
|
||||||
chalk.gray(`\n High relevance matches (score < 0.25):`)
|
chalk.gray(`\n High relevance matches (score < 0.25):`)
|
||||||
);
|
);
|
||||||
highRelevance.slice(0, 5).forEach((t) => {
|
highRelevance.slice(0, 25).forEach((t) => {
|
||||||
console.log(
|
console.log(
|
||||||
chalk.yellow(` • ⭐ Task ${t.id}: ${truncate(t.title, 50)}`)
|
chalk.yellow(` • ⭐ Task ${t.id}: ${truncate(t.title, 50)}`)
|
||||||
);
|
);
|
||||||
@@ -779,24 +742,13 @@ async function addTask(
|
|||||||
console.log(
|
console.log(
|
||||||
chalk.gray(`\n Medium relevance matches (score < 0.4):`)
|
chalk.gray(`\n Medium relevance matches (score < 0.4):`)
|
||||||
);
|
);
|
||||||
mediumRelevance.slice(0, 3).forEach((t) => {
|
mediumRelevance.slice(0, 10).forEach((t) => {
|
||||||
console.log(
|
console.log(
|
||||||
chalk.green(` • Task ${t.id}: ${truncate(t.title, 50)}`)
|
chalk.green(` • Task ${t.id}: ${truncate(t.title, 50)}`)
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
if (promptCategory && categoryTasks.length > 0) {
|
|
||||||
console.log(
|
|
||||||
chalk.gray(`\n Tasks related to ${promptCategory.label}:`)
|
|
||||||
);
|
|
||||||
categoryTasks.forEach((t) => {
|
|
||||||
console.log(
|
|
||||||
chalk.magenta(` • Task ${t.id}: ${truncate(t.title, 50)}`)
|
|
||||||
);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// Show dependency patterns
|
// Show dependency patterns
|
||||||
if (commonDeps && commonDeps.length > 0) {
|
if (commonDeps && commonDeps.length > 0) {
|
||||||
console.log(
|
console.log(
|
||||||
@@ -864,10 +816,7 @@ async function addTask(
|
|||||||
numericDependencies.length > 0
|
numericDependencies.length > 0
|
||||||
? dependentTasks.length // Use length of tasks from explicit dependency path
|
? dependentTasks.length // Use length of tasks from explicit dependency path
|
||||||
: uniqueDetailedTasks.length // Use length of tasks from fuzzy search path
|
: uniqueDetailedTasks.length // Use length of tasks from fuzzy search path
|
||||||
)}` +
|
)}`,
|
||||||
(promptCategory
|
|
||||||
? `\n${chalk.cyan('Category detected: ')}${chalk.yellow(promptCategory.label)}`
|
|
||||||
: ''),
|
|
||||||
{
|
{
|
||||||
padding: { top: 0, bottom: 1, left: 1, right: 1 },
|
padding: { top: 0, bottom: 1, left: 1, right: 1 },
|
||||||
margin: { top: 1, bottom: 0 },
|
margin: { top: 1, bottom: 0 },
|
||||||
@@ -976,17 +925,33 @@ async function addTask(
|
|||||||
}
|
}
|
||||||
|
|
||||||
report('Successfully generated task data from AI.', 'success');
|
report('Successfully generated task data from AI.', 'success');
|
||||||
|
|
||||||
|
// Success! Show checkmark
|
||||||
|
if (loadingIndicator) {
|
||||||
|
succeedLoadingIndicator(
|
||||||
|
loadingIndicator,
|
||||||
|
'Task generated successfully'
|
||||||
|
);
|
||||||
|
loadingIndicator = null; // Clear it
|
||||||
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
// Failure! Show X
|
||||||
|
if (loadingIndicator) {
|
||||||
|
failLoadingIndicator(loadingIndicator, 'AI generation failed');
|
||||||
|
loadingIndicator = null;
|
||||||
|
}
|
||||||
report(
|
report(
|
||||||
`DEBUG: generateObjectService caught error: ${error.message}`,
|
`DEBUG: generateObjectService caught error: ${error.message}`,
|
||||||
'debug'
|
'debug'
|
||||||
);
|
);
|
||||||
report(`Error generating task with AI: ${error.message}`, 'error');
|
report(`Error generating task with AI: ${error.message}`, 'error');
|
||||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
|
||||||
throw error; // Re-throw error after logging
|
throw error; // Re-throw error after logging
|
||||||
} finally {
|
} finally {
|
||||||
report('DEBUG: generateObjectService finally block reached.', 'debug');
|
report('DEBUG: generateObjectService finally block reached.', 'debug');
|
||||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator); // Ensure indicator stops
|
// Clean up if somehow still running
|
||||||
|
if (loadingIndicator) {
|
||||||
|
stopLoadingIndicator(loadingIndicator);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// --- End Refactored AI Interaction ---
|
// --- End Refactored AI Interaction ---
|
||||||
}
|
}
|
||||||
@@ -1057,7 +1022,7 @@ async function addTask(
|
|||||||
truncate(newTask.description, 47)
|
truncate(newTask.description, 47)
|
||||||
]);
|
]);
|
||||||
|
|
||||||
console.log(chalk.green('✅ New task created successfully:'));
|
console.log(chalk.green('✓ New task created successfully:'));
|
||||||
console.log(table.toString());
|
console.log(table.toString());
|
||||||
|
|
||||||
// Helper to get priority color
|
// Helper to get priority color
|
||||||
|
|||||||
@@ -13,8 +13,6 @@ import generateTaskFiles from './generate-task-files.js';
|
|||||||
* @param {string} taskIds - Task IDs to clear subtasks from
|
* @param {string} taskIds - Task IDs to clear subtasks from
|
||||||
*/
|
*/
|
||||||
function clearSubtasks(tasksPath, taskIds) {
|
function clearSubtasks(tasksPath, taskIds) {
|
||||||
displayBanner();
|
|
||||||
|
|
||||||
log('info', `Reading tasks from ${tasksPath}...`);
|
log('info', `Reading tasks from ${tasksPath}...`);
|
||||||
const data = readJSON(tasksPath);
|
const data = readJSON(tasksPath);
|
||||||
if (!data || !data.tasks) {
|
if (!data || !data.tasks) {
|
||||||
|
|||||||
@@ -36,11 +36,6 @@ function listTasks(
|
|||||||
outputFormat = 'text'
|
outputFormat = 'text'
|
||||||
) {
|
) {
|
||||||
try {
|
try {
|
||||||
// Only display banner for text output
|
|
||||||
if (outputFormat === 'text') {
|
|
||||||
displayBanner();
|
|
||||||
}
|
|
||||||
|
|
||||||
const data = readJSON(tasksPath); // Reads the whole tasks.json
|
const data = readJSON(tasksPath); // Reads the whole tasks.json
|
||||||
if (!data || !data.tasks) {
|
if (!data || !data.tasks) {
|
||||||
throw new Error(`No valid tasks found in ${tasksPath}`);
|
throw new Error(`No valid tasks found in ${tasksPath}`);
|
||||||
@@ -125,86 +120,7 @@ function listTasks(
|
|||||||
const subtaskCompletionPercentage =
|
const subtaskCompletionPercentage =
|
||||||
totalSubtasks > 0 ? (completedSubtasks / totalSubtasks) * 100 : 0;
|
totalSubtasks > 0 ? (completedSubtasks / totalSubtasks) * 100 : 0;
|
||||||
|
|
||||||
// For JSON output, return structured data
|
// Calculate dependency statistics (moved up to be available for all output formats)
|
||||||
if (outputFormat === 'json') {
|
|
||||||
// *** Modification: Remove 'details' field for JSON output ***
|
|
||||||
const tasksWithoutDetails = filteredTasks.map((task) => {
|
|
||||||
// <-- USES filteredTasks!
|
|
||||||
// Omit 'details' from the parent task
|
|
||||||
const { details, ...taskRest } = task;
|
|
||||||
|
|
||||||
// If subtasks exist, omit 'details' from them too
|
|
||||||
if (taskRest.subtasks && Array.isArray(taskRest.subtasks)) {
|
|
||||||
taskRest.subtasks = taskRest.subtasks.map((subtask) => {
|
|
||||||
const { details: subtaskDetails, ...subtaskRest } = subtask;
|
|
||||||
return subtaskRest;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return taskRest;
|
|
||||||
});
|
|
||||||
// *** End of Modification ***
|
|
||||||
|
|
||||||
return {
|
|
||||||
tasks: tasksWithoutDetails, // <--- THIS IS THE ARRAY BEING RETURNED
|
|
||||||
filter: statusFilter || 'all', // Return the actual filter used
|
|
||||||
stats: {
|
|
||||||
total: totalTasks,
|
|
||||||
completed: doneCount,
|
|
||||||
inProgress: inProgressCount,
|
|
||||||
pending: pendingCount,
|
|
||||||
blocked: blockedCount,
|
|
||||||
deferred: deferredCount,
|
|
||||||
cancelled: cancelledCount,
|
|
||||||
completionPercentage,
|
|
||||||
subtasks: {
|
|
||||||
total: totalSubtasks,
|
|
||||||
completed: completedSubtasks,
|
|
||||||
inProgress: inProgressSubtasks,
|
|
||||||
pending: pendingSubtasks,
|
|
||||||
blocked: blockedSubtasks,
|
|
||||||
deferred: deferredSubtasks,
|
|
||||||
cancelled: cancelledSubtasks,
|
|
||||||
completionPercentage: subtaskCompletionPercentage
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// ... existing code for text output ...
|
|
||||||
|
|
||||||
// Calculate status breakdowns as percentages of total
|
|
||||||
const taskStatusBreakdown = {
|
|
||||||
'in-progress': totalTasks > 0 ? (inProgressCount / totalTasks) * 100 : 0,
|
|
||||||
pending: totalTasks > 0 ? (pendingCount / totalTasks) * 100 : 0,
|
|
||||||
blocked: totalTasks > 0 ? (blockedCount / totalTasks) * 100 : 0,
|
|
||||||
deferred: totalTasks > 0 ? (deferredCount / totalTasks) * 100 : 0,
|
|
||||||
cancelled: totalTasks > 0 ? (cancelledCount / totalTasks) * 100 : 0
|
|
||||||
};
|
|
||||||
|
|
||||||
const subtaskStatusBreakdown = {
|
|
||||||
'in-progress':
|
|
||||||
totalSubtasks > 0 ? (inProgressSubtasks / totalSubtasks) * 100 : 0,
|
|
||||||
pending: totalSubtasks > 0 ? (pendingSubtasks / totalSubtasks) * 100 : 0,
|
|
||||||
blocked: totalSubtasks > 0 ? (blockedSubtasks / totalSubtasks) * 100 : 0,
|
|
||||||
deferred:
|
|
||||||
totalSubtasks > 0 ? (deferredSubtasks / totalSubtasks) * 100 : 0,
|
|
||||||
cancelled:
|
|
||||||
totalSubtasks > 0 ? (cancelledSubtasks / totalSubtasks) * 100 : 0
|
|
||||||
};
|
|
||||||
|
|
||||||
// Create progress bars with status breakdowns
|
|
||||||
const taskProgressBar = createProgressBar(
|
|
||||||
completionPercentage,
|
|
||||||
30,
|
|
||||||
taskStatusBreakdown
|
|
||||||
);
|
|
||||||
const subtaskProgressBar = createProgressBar(
|
|
||||||
subtaskCompletionPercentage,
|
|
||||||
30,
|
|
||||||
subtaskStatusBreakdown
|
|
||||||
);
|
|
||||||
|
|
||||||
// Calculate dependency statistics
|
|
||||||
const completedTaskIds = new Set(
|
const completedTaskIds = new Set(
|
||||||
data.tasks
|
data.tasks
|
||||||
.filter((t) => t.status === 'done' || t.status === 'completed')
|
.filter((t) => t.status === 'done' || t.status === 'completed')
|
||||||
@@ -276,6 +192,118 @@ function listTasks(
|
|||||||
// Find next task to work on, passing the complexity report
|
// Find next task to work on, passing the complexity report
|
||||||
const nextItem = findNextTask(data.tasks, complexityReport);
|
const nextItem = findNextTask(data.tasks, complexityReport);
|
||||||
|
|
||||||
|
// For JSON output, return structured data
|
||||||
|
if (outputFormat === 'json') {
|
||||||
|
// *** Modification: Remove 'details' field for JSON output ***
|
||||||
|
const tasksWithoutDetails = filteredTasks.map((task) => {
|
||||||
|
// <-- USES filteredTasks!
|
||||||
|
// Omit 'details' from the parent task
|
||||||
|
const { details, ...taskRest } = task;
|
||||||
|
|
||||||
|
// If subtasks exist, omit 'details' from them too
|
||||||
|
if (taskRest.subtasks && Array.isArray(taskRest.subtasks)) {
|
||||||
|
taskRest.subtasks = taskRest.subtasks.map((subtask) => {
|
||||||
|
const { details: subtaskDetails, ...subtaskRest } = subtask;
|
||||||
|
return subtaskRest;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
return taskRest;
|
||||||
|
});
|
||||||
|
// *** End of Modification ***
|
||||||
|
|
||||||
|
return {
|
||||||
|
tasks: tasksWithoutDetails, // <--- THIS IS THE ARRAY BEING RETURNED
|
||||||
|
filter: statusFilter || 'all', // Return the actual filter used
|
||||||
|
stats: {
|
||||||
|
total: totalTasks,
|
||||||
|
completed: doneCount,
|
||||||
|
inProgress: inProgressCount,
|
||||||
|
pending: pendingCount,
|
||||||
|
blocked: blockedCount,
|
||||||
|
deferred: deferredCount,
|
||||||
|
cancelled: cancelledCount,
|
||||||
|
completionPercentage,
|
||||||
|
subtasks: {
|
||||||
|
total: totalSubtasks,
|
||||||
|
completed: completedSubtasks,
|
||||||
|
inProgress: inProgressSubtasks,
|
||||||
|
pending: pendingSubtasks,
|
||||||
|
blocked: blockedSubtasks,
|
||||||
|
deferred: deferredSubtasks,
|
||||||
|
cancelled: cancelledSubtasks,
|
||||||
|
completionPercentage: subtaskCompletionPercentage
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// For markdown-readme output, return formatted markdown
|
||||||
|
if (outputFormat === 'markdown-readme') {
|
||||||
|
return generateMarkdownOutput(data, filteredTasks, {
|
||||||
|
totalTasks,
|
||||||
|
completedTasks,
|
||||||
|
completionPercentage,
|
||||||
|
doneCount,
|
||||||
|
inProgressCount,
|
||||||
|
pendingCount,
|
||||||
|
blockedCount,
|
||||||
|
deferredCount,
|
||||||
|
cancelledCount,
|
||||||
|
totalSubtasks,
|
||||||
|
completedSubtasks,
|
||||||
|
subtaskCompletionPercentage,
|
||||||
|
inProgressSubtasks,
|
||||||
|
pendingSubtasks,
|
||||||
|
blockedSubtasks,
|
||||||
|
deferredSubtasks,
|
||||||
|
cancelledSubtasks,
|
||||||
|
tasksWithNoDeps,
|
||||||
|
tasksReadyToWork,
|
||||||
|
tasksWithUnsatisfiedDeps,
|
||||||
|
mostDependedOnTask,
|
||||||
|
mostDependedOnTaskId,
|
||||||
|
maxDependents,
|
||||||
|
avgDependenciesPerTask,
|
||||||
|
complexityReport,
|
||||||
|
withSubtasks,
|
||||||
|
nextItem
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// ... existing code for text output ...
|
||||||
|
|
||||||
|
// Calculate status breakdowns as percentages of total
|
||||||
|
const taskStatusBreakdown = {
|
||||||
|
'in-progress': totalTasks > 0 ? (inProgressCount / totalTasks) * 100 : 0,
|
||||||
|
pending: totalTasks > 0 ? (pendingCount / totalTasks) * 100 : 0,
|
||||||
|
blocked: totalTasks > 0 ? (blockedCount / totalTasks) * 100 : 0,
|
||||||
|
deferred: totalTasks > 0 ? (deferredCount / totalTasks) * 100 : 0,
|
||||||
|
cancelled: totalTasks > 0 ? (cancelledCount / totalTasks) * 100 : 0
|
||||||
|
};
|
||||||
|
|
||||||
|
const subtaskStatusBreakdown = {
|
||||||
|
'in-progress':
|
||||||
|
totalSubtasks > 0 ? (inProgressSubtasks / totalSubtasks) * 100 : 0,
|
||||||
|
pending: totalSubtasks > 0 ? (pendingSubtasks / totalSubtasks) * 100 : 0,
|
||||||
|
blocked: totalSubtasks > 0 ? (blockedSubtasks / totalSubtasks) * 100 : 0,
|
||||||
|
deferred:
|
||||||
|
totalSubtasks > 0 ? (deferredSubtasks / totalSubtasks) * 100 : 0,
|
||||||
|
cancelled:
|
||||||
|
totalSubtasks > 0 ? (cancelledSubtasks / totalSubtasks) * 100 : 0
|
||||||
|
};
|
||||||
|
|
||||||
|
// Create progress bars with status breakdowns
|
||||||
|
const taskProgressBar = createProgressBar(
|
||||||
|
completionPercentage,
|
||||||
|
30,
|
||||||
|
taskStatusBreakdown
|
||||||
|
);
|
||||||
|
const subtaskProgressBar = createProgressBar(
|
||||||
|
subtaskCompletionPercentage,
|
||||||
|
30,
|
||||||
|
subtaskStatusBreakdown
|
||||||
|
);
|
||||||
|
|
||||||
// Get terminal width - more reliable method
|
// Get terminal width - more reliable method
|
||||||
let terminalWidth;
|
let terminalWidth;
|
||||||
try {
|
try {
|
||||||
@@ -764,4 +792,232 @@ function getWorkItemDescription(item, allTasks) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate markdown-formatted output for README files
|
||||||
|
* @param {Object} data - Full tasks data
|
||||||
|
* @param {Array} filteredTasks - Filtered tasks array
|
||||||
|
* @param {Object} stats - Statistics object
|
||||||
|
* @returns {string} - Formatted markdown string
|
||||||
|
*/
|
||||||
|
function generateMarkdownOutput(data, filteredTasks, stats) {
|
||||||
|
const {
|
||||||
|
totalTasks,
|
||||||
|
completedTasks,
|
||||||
|
completionPercentage,
|
||||||
|
doneCount,
|
||||||
|
inProgressCount,
|
||||||
|
pendingCount,
|
||||||
|
blockedCount,
|
||||||
|
deferredCount,
|
||||||
|
cancelledCount,
|
||||||
|
totalSubtasks,
|
||||||
|
completedSubtasks,
|
||||||
|
subtaskCompletionPercentage,
|
||||||
|
inProgressSubtasks,
|
||||||
|
pendingSubtasks,
|
||||||
|
blockedSubtasks,
|
||||||
|
deferredSubtasks,
|
||||||
|
cancelledSubtasks,
|
||||||
|
tasksWithNoDeps,
|
||||||
|
tasksReadyToWork,
|
||||||
|
tasksWithUnsatisfiedDeps,
|
||||||
|
mostDependedOnTask,
|
||||||
|
mostDependedOnTaskId,
|
||||||
|
maxDependents,
|
||||||
|
avgDependenciesPerTask,
|
||||||
|
complexityReport,
|
||||||
|
withSubtasks,
|
||||||
|
nextItem
|
||||||
|
} = stats;
|
||||||
|
|
||||||
|
let markdown = '';
|
||||||
|
|
||||||
|
// Create progress bars for markdown (using Unicode block characters)
|
||||||
|
const createMarkdownProgressBar = (percentage, width = 20) => {
|
||||||
|
const filled = Math.round((percentage / 100) * width);
|
||||||
|
const empty = width - filled;
|
||||||
|
return '█'.repeat(filled) + '░'.repeat(empty);
|
||||||
|
};
|
||||||
|
|
||||||
|
// Dashboard section
|
||||||
|
markdown += '```\n';
|
||||||
|
markdown +=
|
||||||
|
'╭─────────────────────────────────────────────────────────╮╭─────────────────────────────────────────────────────────╮\n';
|
||||||
|
markdown +=
|
||||||
|
'│ ││ │\n';
|
||||||
|
markdown +=
|
||||||
|
'│ Project Dashboard ││ Dependency Status & Next Task │\n';
|
||||||
|
markdown += `│ Tasks Progress: ${createMarkdownProgressBar(completionPercentage, 20)} ${Math.round(completionPercentage)}% ││ Dependency Metrics: │\n`;
|
||||||
|
markdown += `│ ${Math.round(completionPercentage)}% ││ • Tasks with no dependencies: ${tasksWithNoDeps} │\n`;
|
||||||
|
markdown += `│ Done: ${doneCount} In Progress: ${inProgressCount} Pending: ${pendingCount} Blocked: ${blockedCount} ││ • Tasks ready to work on: ${tasksReadyToWork} │\n`;
|
||||||
|
markdown += `│ Deferred: ${deferredCount} Cancelled: ${cancelledCount} ││ • Tasks blocked by dependencies: ${tasksWithUnsatisfiedDeps} │\n`;
|
||||||
|
markdown += `│ ││ • Most depended-on task: #${mostDependedOnTaskId} (${maxDependents} dependents) │\n`;
|
||||||
|
markdown += `│ Subtasks Progress: ${createMarkdownProgressBar(subtaskCompletionPercentage, 20)} ││ • Avg dependencies per task: ${avgDependenciesPerTask.toFixed(1)} │\n`;
|
||||||
|
markdown += `│ ${Math.round(subtaskCompletionPercentage)}% ${Math.round(subtaskCompletionPercentage)}% ││ │\n`;
|
||||||
|
markdown += `│ Completed: ${completedSubtasks}/${totalSubtasks} In Progress: ${inProgressSubtasks} Pending: ${pendingSubtasks} ││ Next Task to Work On: │\n`;
|
||||||
|
|
||||||
|
const nextTaskTitle = nextItem
|
||||||
|
? nextItem.title.length > 40
|
||||||
|
? nextItem.title.substring(0, 37) + '...'
|
||||||
|
: nextItem.title
|
||||||
|
: 'No task available';
|
||||||
|
|
||||||
|
markdown += `│ Blocked: ${blockedSubtasks} Deferred: ${deferredSubtasks} Cancelled: ${cancelledSubtasks} ││ ID: ${nextItem ? nextItem.id : 'N/A'} - ${nextTaskTitle} │\n`;
|
||||||
|
markdown += `│ ││ Priority: ${nextItem ? nextItem.priority || 'medium' : ''} Dependencies: ${nextItem && nextItem.dependencies && nextItem.dependencies.length > 0 ? 'Some' : 'None'} │\n`;
|
||||||
|
markdown += `│ Priority Breakdown: ││ Complexity: ${nextItem && nextItem.complexityScore ? '● ' + nextItem.complexityScore : 'N/A'} │\n`;
|
||||||
|
markdown += `│ • High priority: ${data.tasks.filter((t) => t.priority === 'high').length} │╰─────────────────────────────────────────────────────────╯\n`;
|
||||||
|
markdown += `│ • Medium priority: ${data.tasks.filter((t) => t.priority === 'medium').length} │\n`;
|
||||||
|
markdown += `│ • Low priority: ${data.tasks.filter((t) => t.priority === 'low').length} │\n`;
|
||||||
|
markdown += '│ │\n';
|
||||||
|
markdown += '╰─────────────────────────────────────────────────────────╯\n';
|
||||||
|
|
||||||
|
// Tasks table
|
||||||
|
markdown +=
|
||||||
|
'┌───────────┬──────────────────────────────────────┬─────────────────┬──────────────┬───────────────────────┬───────────┐\n';
|
||||||
|
markdown +=
|
||||||
|
'│ ID │ Title │ Status │ Priority │ Dependencies │ Complexi… │\n';
|
||||||
|
markdown +=
|
||||||
|
'├───────────┼──────────────────────────────────────┼─────────────────┼──────────────┼───────────────────────┼───────────┤\n';
|
||||||
|
|
||||||
|
// Helper function to format status with symbols
|
||||||
|
const getStatusSymbol = (status) => {
|
||||||
|
switch (status) {
|
||||||
|
case 'done':
|
||||||
|
case 'completed':
|
||||||
|
return '✓ done';
|
||||||
|
case 'in-progress':
|
||||||
|
return '► in-progress';
|
||||||
|
case 'pending':
|
||||||
|
return '○ pending';
|
||||||
|
case 'blocked':
|
||||||
|
return '⭕ blocked';
|
||||||
|
case 'deferred':
|
||||||
|
return 'x deferred';
|
||||||
|
case 'cancelled':
|
||||||
|
return 'x cancelled';
|
||||||
|
case 'review':
|
||||||
|
return '? review';
|
||||||
|
default:
|
||||||
|
return status || 'pending';
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Helper function to format dependencies without color codes
|
||||||
|
const formatDependenciesForMarkdown = (deps, allTasks) => {
|
||||||
|
if (!deps || deps.length === 0) return 'None';
|
||||||
|
return deps
|
||||||
|
.map((depId) => {
|
||||||
|
const depTask = allTasks.find((t) => t.id === depId);
|
||||||
|
return depTask ? depId.toString() : depId.toString();
|
||||||
|
})
|
||||||
|
.join(', ');
|
||||||
|
};
|
||||||
|
|
||||||
|
// Process all tasks
|
||||||
|
filteredTasks.forEach((task) => {
|
||||||
|
const taskTitle = task.title; // No truncation for README
|
||||||
|
const statusSymbol = getStatusSymbol(task.status);
|
||||||
|
const priority = task.priority || 'medium';
|
||||||
|
const deps = formatDependenciesForMarkdown(task.dependencies, data.tasks);
|
||||||
|
const complexity = task.complexityScore
|
||||||
|
? `● ${task.complexityScore}`
|
||||||
|
: 'N/A';
|
||||||
|
|
||||||
|
markdown += `│ ${task.id.toString().padEnd(9)} │ ${taskTitle.substring(0, 36).padEnd(36)} │ ${statusSymbol.padEnd(15)} │ ${priority.padEnd(12)} │ ${deps.substring(0, 21).padEnd(21)} │ ${complexity.padEnd(9)} │\n`;
|
||||||
|
|
||||||
|
// Add subtasks if requested
|
||||||
|
if (withSubtasks && task.subtasks && task.subtasks.length > 0) {
|
||||||
|
task.subtasks.forEach((subtask) => {
|
||||||
|
const subtaskTitle = `└─ ${subtask.title}`; // No truncation
|
||||||
|
const subtaskStatus = getStatusSymbol(subtask.status);
|
||||||
|
const subtaskDeps = formatDependenciesForMarkdown(
|
||||||
|
subtask.dependencies,
|
||||||
|
data.tasks
|
||||||
|
);
|
||||||
|
const subtaskComplexity = subtask.complexityScore
|
||||||
|
? subtask.complexityScore.toString()
|
||||||
|
: 'N/A';
|
||||||
|
|
||||||
|
markdown +=
|
||||||
|
'├───────────┼──────────────────────────────────────┼─────────────────┼──────────────┼───────────────────────┼───────────┤\n';
|
||||||
|
markdown += `│ ${task.id}.${subtask.id}${' '.padEnd(6)} │ ${subtaskTitle.substring(0, 36).padEnd(36)} │ ${subtaskStatus.padEnd(15)} │ - │ ${subtaskDeps.substring(0, 21).padEnd(21)} │ ${subtaskComplexity.padEnd(9)} │\n`;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
markdown +=
|
||||||
|
'├───────────┼──────────────────────────────────────┼─────────────────┼──────────────┼───────────────────────┼───────────┤\n';
|
||||||
|
});
|
||||||
|
|
||||||
|
// Close the table
|
||||||
|
markdown = markdown.slice(
|
||||||
|
0,
|
||||||
|
-1 *
|
||||||
|
'├───────────┼──────────────────────────────────────┼─────────────────┼──────────────┼───────────────────────┼───────────┤\n'
|
||||||
|
.length
|
||||||
|
);
|
||||||
|
markdown +=
|
||||||
|
'└───────────┴──────────────────────────────────────┴─────────────────┴──────────────┴───────────────────────┴───────────┘\n';
|
||||||
|
markdown += '```\n\n';
|
||||||
|
|
||||||
|
// Next task recommendation
|
||||||
|
if (nextItem) {
|
||||||
|
markdown +=
|
||||||
|
'╭────────────────────────────────────────────── ⚡ RECOMMENDED NEXT TASK ⚡ ──────────────────────────────────────────────╮\n';
|
||||||
|
markdown +=
|
||||||
|
'│ │\n';
|
||||||
|
markdown += `│ 🔥 Next Task to Work On: #${nextItem.id} - ${nextItem.title} │\n`;
|
||||||
|
markdown +=
|
||||||
|
'│ │\n';
|
||||||
|
markdown += `│ Priority: ${nextItem.priority || 'medium'} Status: ${getStatusSymbol(nextItem.status)} │\n`;
|
||||||
|
markdown += `│ Dependencies: ${nextItem.dependencies && nextItem.dependencies.length > 0 ? formatDependenciesForMarkdown(nextItem.dependencies, data.tasks) : 'None'} │\n`;
|
||||||
|
markdown +=
|
||||||
|
'│ │\n';
|
||||||
|
markdown += `│ Description: ${getWorkItemDescription(nextItem, data.tasks)} │\n`;
|
||||||
|
markdown +=
|
||||||
|
'│ │\n';
|
||||||
|
|
||||||
|
// Add subtasks if they exist
|
||||||
|
const parentTask = data.tasks.find((t) => t.id === nextItem.id);
|
||||||
|
if (parentTask && parentTask.subtasks && parentTask.subtasks.length > 0) {
|
||||||
|
markdown +=
|
||||||
|
'│ Subtasks: │\n';
|
||||||
|
parentTask.subtasks.forEach((subtask) => {
|
||||||
|
markdown += `│ ${nextItem.id}.${subtask.id} [${subtask.status || 'pending'}] ${subtask.title} │\n`;
|
||||||
|
});
|
||||||
|
markdown +=
|
||||||
|
'│ │\n';
|
||||||
|
}
|
||||||
|
|
||||||
|
markdown += `│ Start working: task-master set-status --id=${nextItem.id} --status=in-progress │\n`;
|
||||||
|
markdown += `│ View details: task-master show ${nextItem.id} │\n`;
|
||||||
|
markdown +=
|
||||||
|
'│ │\n';
|
||||||
|
markdown +=
|
||||||
|
'╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯\n\n';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Suggested next steps
|
||||||
|
markdown += '\n';
|
||||||
|
markdown +=
|
||||||
|
'╭──────────────────────────────────────────────────────────────────────────────────────╮\n';
|
||||||
|
markdown +=
|
||||||
|
'│ │\n';
|
||||||
|
markdown +=
|
||||||
|
'│ Suggested Next Steps: │\n';
|
||||||
|
markdown +=
|
||||||
|
'│ │\n';
|
||||||
|
markdown +=
|
||||||
|
'│ 1. Run task-master next to see what to work on next │\n';
|
||||||
|
markdown +=
|
||||||
|
'│ 2. Run task-master expand --id=<id> to break down a task into subtasks │\n';
|
||||||
|
markdown +=
|
||||||
|
'│ 3. Run task-master set-status --id=<id> --status=done to mark a task as complete │\n';
|
||||||
|
markdown +=
|
||||||
|
'│ │\n';
|
||||||
|
markdown +=
|
||||||
|
'╰──────────────────────────────────────────────────────────────────────────────────────╯\n';
|
||||||
|
|
||||||
|
return markdown;
|
||||||
|
}
|
||||||
|
|
||||||
export default listTasks;
|
export default listTasks;
|
||||||
|
|||||||
@@ -450,7 +450,14 @@ async function setModel(role, modelId, options = {}) {
|
|||||||
openRouterModels.some((m) => m.id === modelId)
|
openRouterModels.some((m) => m.id === modelId)
|
||||||
) {
|
) {
|
||||||
determinedProvider = 'openrouter';
|
determinedProvider = 'openrouter';
|
||||||
|
|
||||||
|
// Check if this is a free model (ends with :free)
|
||||||
|
if (modelId.endsWith(':free')) {
|
||||||
|
warningMessage = `Warning: OpenRouter free model '${modelId}' selected. Free models have significant limitations including lower context windows, reduced rate limits, and may not support advanced features like tool_use. Consider using the paid version '${modelId.replace(':free', '')}' for full functionality.`;
|
||||||
|
} else {
|
||||||
warningMessage = `Warning: Custom OpenRouter model '${modelId}' set. This model is not officially validated by Taskmaster and may not function as expected.`;
|
warningMessage = `Warning: Custom OpenRouter model '${modelId}' set. This model is not officially validated by Taskmaster and may not function as expected.`;
|
||||||
|
}
|
||||||
|
|
||||||
report('warn', warningMessage);
|
report('warn', warningMessage);
|
||||||
} else {
|
} else {
|
||||||
// Hinted as OpenRouter but not found in live check
|
// Hinted as OpenRouter but not found in live check
|
||||||
@@ -482,6 +489,11 @@ async function setModel(role, modelId, options = {}) {
|
|||||||
`Model ID "${modelId}" not found in the Ollama instance. Please verify the model is pulled and available. You can check available models with: curl ${tagsUrl}`
|
`Model ID "${modelId}" not found in the Ollama instance. Please verify the model is pulled and available. You can check available models with: curl ${tagsUrl}`
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
} else if (providerHint === 'bedrock') {
|
||||||
|
// Set provider without model validation since Bedrock models are managed by AWS
|
||||||
|
determinedProvider = 'bedrock';
|
||||||
|
warningMessage = `Warning: Custom Bedrock model '${modelId}' set. Please ensure the model ID is valid and accessible in your AWS account.`;
|
||||||
|
report('warn', warningMessage);
|
||||||
} else {
|
} else {
|
||||||
// Invalid provider hint - should not happen
|
// Invalid provider hint - should not happen
|
||||||
throw new Error(`Invalid provider hint received: ${providerHint}`);
|
throw new Error(`Invalid provider hint received: ${providerHint}`);
|
||||||
|
|||||||
@@ -33,8 +33,6 @@ async function setTaskStatus(tasksPath, taskIdInput, newStatus, options = {}) {
|
|||||||
|
|
||||||
// Only display UI elements if not in MCP mode
|
// Only display UI elements if not in MCP mode
|
||||||
if (!isMcpMode) {
|
if (!isMcpMode) {
|
||||||
displayBanner();
|
|
||||||
|
|
||||||
console.log(
|
console.log(
|
||||||
boxen(chalk.white.bold(`Updating Task Status to: ${newStatus}`), {
|
boxen(chalk.white.bold(`Updating Task Status to: ${newStatus}`), {
|
||||||
padding: 1,
|
padding: 1,
|
||||||
|
|||||||
@@ -24,7 +24,10 @@ import {
|
|||||||
} from './task-manager.js';
|
} from './task-manager.js';
|
||||||
import { getProjectName, getDefaultSubtasks } from './config-manager.js';
|
import { getProjectName, getDefaultSubtasks } from './config-manager.js';
|
||||||
import { TASK_STATUS_OPTIONS } from '../../src/constants/task-status.js';
|
import { TASK_STATUS_OPTIONS } from '../../src/constants/task-status.js';
|
||||||
import { TASKMASTER_TASKS_FILE } from '../../src/constants/paths.js';
|
import {
|
||||||
|
TASKMASTER_CONFIG_FILE,
|
||||||
|
TASKMASTER_TASKS_FILE
|
||||||
|
} from '../../src/constants/paths.js';
|
||||||
import { getTaskMasterVersion } from '../../src/utils/getVersion.js';
|
import { getTaskMasterVersion } from '../../src/utils/getVersion.js';
|
||||||
|
|
||||||
// Create a color gradient for the banner
|
// Create a color gradient for the banner
|
||||||
@@ -37,7 +40,7 @@ const warmGradient = gradient(['#fb8b24', '#e36414', '#9a031e']);
|
|||||||
function displayBanner() {
|
function displayBanner() {
|
||||||
if (isSilentMode()) return;
|
if (isSilentMode()) return;
|
||||||
|
|
||||||
console.clear();
|
// console.clear(); // Removing this to avoid clearing the terminal per command
|
||||||
const bannerText = figlet.textSync('Task Master', {
|
const bannerText = figlet.textSync('Task Master', {
|
||||||
font: 'Standard',
|
font: 'Standard',
|
||||||
horizontalLayout: 'default',
|
horizontalLayout: 'default',
|
||||||
@@ -75,6 +78,8 @@ function displayBanner() {
|
|||||||
* @returns {Object} Spinner object
|
* @returns {Object} Spinner object
|
||||||
*/
|
*/
|
||||||
function startLoadingIndicator(message) {
|
function startLoadingIndicator(message) {
|
||||||
|
if (isSilentMode()) return null;
|
||||||
|
|
||||||
const spinner = ora({
|
const spinner = ora({
|
||||||
text: message,
|
text: message,
|
||||||
color: 'cyan'
|
color: 'cyan'
|
||||||
@@ -84,15 +89,75 @@ function startLoadingIndicator(message) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Stop a loading indicator
|
* Stop a loading indicator (basic stop, no success/fail indicator)
|
||||||
* @param {Object} spinner - Spinner object to stop
|
* @param {Object} spinner - Spinner object to stop
|
||||||
*/
|
*/
|
||||||
function stopLoadingIndicator(spinner) {
|
function stopLoadingIndicator(spinner) {
|
||||||
if (spinner && spinner.stop) {
|
if (spinner && typeof spinner.stop === 'function') {
|
||||||
spinner.stop();
|
spinner.stop();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Complete a loading indicator with success (shows checkmark)
|
||||||
|
* @param {Object} spinner - Spinner object to complete
|
||||||
|
* @param {string} message - Optional success message (defaults to current text)
|
||||||
|
*/
|
||||||
|
function succeedLoadingIndicator(spinner, message = null) {
|
||||||
|
if (spinner && typeof spinner.succeed === 'function') {
|
||||||
|
if (message) {
|
||||||
|
spinner.succeed(message);
|
||||||
|
} else {
|
||||||
|
spinner.succeed();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Complete a loading indicator with failure (shows X)
|
||||||
|
* @param {Object} spinner - Spinner object to fail
|
||||||
|
* @param {string} message - Optional failure message (defaults to current text)
|
||||||
|
*/
|
||||||
|
function failLoadingIndicator(spinner, message = null) {
|
||||||
|
if (spinner && typeof spinner.fail === 'function') {
|
||||||
|
if (message) {
|
||||||
|
spinner.fail(message);
|
||||||
|
} else {
|
||||||
|
spinner.fail();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Complete a loading indicator with warning (shows warning symbol)
|
||||||
|
* @param {Object} spinner - Spinner object to warn
|
||||||
|
* @param {string} message - Optional warning message (defaults to current text)
|
||||||
|
*/
|
||||||
|
function warnLoadingIndicator(spinner, message = null) {
|
||||||
|
if (spinner && typeof spinner.warn === 'function') {
|
||||||
|
if (message) {
|
||||||
|
spinner.warn(message);
|
||||||
|
} else {
|
||||||
|
spinner.warn();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Complete a loading indicator with info (shows info symbol)
|
||||||
|
* @param {Object} spinner - Spinner object to complete with info
|
||||||
|
* @param {string} message - Optional info message (defaults to current text)
|
||||||
|
*/
|
||||||
|
function infoLoadingIndicator(spinner, message = null) {
|
||||||
|
if (spinner && typeof spinner.info === 'function') {
|
||||||
|
if (message) {
|
||||||
|
spinner.info(message);
|
||||||
|
} else {
|
||||||
|
spinner.info();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a colored progress bar
|
* Create a colored progress bar
|
||||||
* @param {number} percent - The completion percentage
|
* @param {number} percent - The completion percentage
|
||||||
@@ -229,14 +294,14 @@ function getStatusWithColor(status, forTable = false) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const statusConfig = {
|
const statusConfig = {
|
||||||
done: { color: chalk.green, icon: '✅', tableIcon: '✓' },
|
done: { color: chalk.green, icon: '✓', tableIcon: '✓' },
|
||||||
completed: { color: chalk.green, icon: '✅', tableIcon: '✓' },
|
completed: { color: chalk.green, icon: '✓', tableIcon: '✓' },
|
||||||
pending: { color: chalk.yellow, icon: '⏱️', tableIcon: '⏱' },
|
pending: { color: chalk.yellow, icon: '○', tableIcon: '⏱' },
|
||||||
'in-progress': { color: chalk.hex('#FFA500'), icon: '🔄', tableIcon: '►' },
|
'in-progress': { color: chalk.hex('#FFA500'), icon: '🔄', tableIcon: '►' },
|
||||||
deferred: { color: chalk.gray, icon: '⏱️', tableIcon: '⏱' },
|
deferred: { color: chalk.gray, icon: 'x', tableIcon: '⏱' },
|
||||||
blocked: { color: chalk.red, icon: '❌', tableIcon: '✗' },
|
blocked: { color: chalk.red, icon: '!', tableIcon: '✗' },
|
||||||
review: { color: chalk.magenta, icon: '👀', tableIcon: '👁' },
|
review: { color: chalk.magenta, icon: '?', tableIcon: '?' },
|
||||||
cancelled: { color: chalk.gray, icon: '❌', tableIcon: '✗' }
|
cancelled: { color: chalk.gray, icon: '❌', tableIcon: 'x' }
|
||||||
};
|
};
|
||||||
|
|
||||||
const config = statusConfig[status.toLowerCase()] || {
|
const config = statusConfig[status.toLowerCase()] || {
|
||||||
@@ -380,8 +445,6 @@ function formatDependenciesWithStatus(
|
|||||||
* Display a comprehensive help guide
|
* Display a comprehensive help guide
|
||||||
*/
|
*/
|
||||||
function displayHelp() {
|
function displayHelp() {
|
||||||
displayBanner();
|
|
||||||
|
|
||||||
// Get terminal width - moved to top of function to make it available throughout
|
// Get terminal width - moved to top of function to make it available throughout
|
||||||
const terminalWidth = process.stdout.columns || 100; // Default to 100 if can't detect
|
const terminalWidth = process.stdout.columns || 100; // Default to 100 if can't detect
|
||||||
|
|
||||||
@@ -462,6 +525,11 @@ function displayHelp() {
|
|||||||
args: '--id=<id> --status=<status>',
|
args: '--id=<id> --status=<status>',
|
||||||
desc: `Update task status (${TASK_STATUS_OPTIONS.join(', ')})`
|
desc: `Update task status (${TASK_STATUS_OPTIONS.join(', ')})`
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: 'sync-readme',
|
||||||
|
args: '[--with-subtasks] [--status=<status>]',
|
||||||
|
desc: 'Export tasks to README.md with professional formatting'
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: 'update',
|
name: 'update',
|
||||||
args: '--from=<id> --prompt="<context>"',
|
args: '--from=<id> --prompt="<context>"',
|
||||||
@@ -687,7 +755,7 @@ function displayHelp() {
|
|||||||
|
|
||||||
configTable.push(
|
configTable.push(
|
||||||
[
|
[
|
||||||
`${chalk.yellow('.taskmasterconfig')}${chalk.reset('')}`,
|
`${chalk.yellow(TASKMASTER_CONFIG_FILE)}${chalk.reset('')}`,
|
||||||
`${chalk.white('AI model configuration file (project root)')}${chalk.reset('')}`,
|
`${chalk.white('AI model configuration file (project root)')}${chalk.reset('')}`,
|
||||||
`${chalk.dim('Managed by models cmd')}${chalk.reset('')}`
|
`${chalk.dim('Managed by models cmd')}${chalk.reset('')}`
|
||||||
],
|
],
|
||||||
@@ -742,9 +810,9 @@ function displayHelp() {
|
|||||||
* @returns {string} Colored complexity score
|
* @returns {string} Colored complexity score
|
||||||
*/
|
*/
|
||||||
function getComplexityWithColor(score) {
|
function getComplexityWithColor(score) {
|
||||||
if (score <= 3) return chalk.green(`🟢 ${score}`);
|
if (score <= 3) return chalk.green(`● ${score}`);
|
||||||
if (score <= 6) return chalk.yellow(`🟡 ${score}`);
|
if (score <= 6) return chalk.yellow(`● ${score}`);
|
||||||
return chalk.red(`🔴 ${score}`);
|
return chalk.red(`● ${score}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -764,8 +832,6 @@ function truncateString(str, maxLength) {
|
|||||||
* @param {string} tasksPath - Path to the tasks.json file
|
* @param {string} tasksPath - Path to the tasks.json file
|
||||||
*/
|
*/
|
||||||
async function displayNextTask(tasksPath, complexityReportPath = null) {
|
async function displayNextTask(tasksPath, complexityReportPath = null) {
|
||||||
displayBanner();
|
|
||||||
|
|
||||||
// Read the tasks file
|
// Read the tasks file
|
||||||
const data = readJSON(tasksPath);
|
const data = readJSON(tasksPath);
|
||||||
if (!data || !data.tasks) {
|
if (!data || !data.tasks) {
|
||||||
@@ -1036,8 +1102,6 @@ async function displayTaskById(
|
|||||||
complexityReportPath = null,
|
complexityReportPath = null,
|
||||||
statusFilter = null
|
statusFilter = null
|
||||||
) {
|
) {
|
||||||
displayBanner();
|
|
||||||
|
|
||||||
// Read the tasks file
|
// Read the tasks file
|
||||||
const data = readJSON(tasksPath);
|
const data = readJSON(tasksPath);
|
||||||
if (!data || !data.tasks) {
|
if (!data || !data.tasks) {
|
||||||
@@ -1492,8 +1556,6 @@ async function displayTaskById(
|
|||||||
* @param {string} reportPath - Path to the complexity report file
|
* @param {string} reportPath - Path to the complexity report file
|
||||||
*/
|
*/
|
||||||
async function displayComplexityReport(reportPath) {
|
async function displayComplexityReport(reportPath) {
|
||||||
displayBanner();
|
|
||||||
|
|
||||||
// Check if the report exists
|
// Check if the report exists
|
||||||
if (!fs.existsSync(reportPath)) {
|
if (!fs.existsSync(reportPath)) {
|
||||||
console.log(
|
console.log(
|
||||||
@@ -1851,7 +1913,7 @@ function displayApiKeyStatus(statusReport) {
|
|||||||
console.log(table.toString());
|
console.log(table.toString());
|
||||||
console.log(
|
console.log(
|
||||||
chalk.gray(
|
chalk.gray(
|
||||||
' Note: Some providers (e.g., Azure, Ollama) may require additional endpoint configuration in .taskmasterconfig.'
|
` Note: Some providers (e.g., Azure, Ollama) may require additional endpoint configuration in ${TASKMASTER_CONFIG_FILE}.`
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -2090,5 +2152,9 @@ export {
|
|||||||
displayApiKeyStatus,
|
displayApiKeyStatus,
|
||||||
displayModelConfiguration,
|
displayModelConfiguration,
|
||||||
displayAvailableModels,
|
displayAvailableModels,
|
||||||
displayAiUsageSummary
|
displayAiUsageSummary,
|
||||||
|
succeedLoadingIndicator,
|
||||||
|
failLoadingIndicator,
|
||||||
|
warnLoadingIndicator,
|
||||||
|
infoLoadingIndicator
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -155,8 +155,17 @@ function log(level, ...args) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get log level dynamically from config-manager
|
// GUARD: Prevent circular dependency during config loading
|
||||||
const configLevel = getLogLevel() || 'info'; // Use getter
|
// Use a simple fallback log level instead of calling getLogLevel()
|
||||||
|
let configLevel = 'info'; // Default fallback
|
||||||
|
try {
|
||||||
|
// Only try to get config level if we're not in the middle of config loading
|
||||||
|
configLevel = getLogLevel() || 'info';
|
||||||
|
} catch (error) {
|
||||||
|
// If getLogLevel() fails (likely due to circular dependency),
|
||||||
|
// use default 'info' level and continue
|
||||||
|
configLevel = 'info';
|
||||||
|
}
|
||||||
|
|
||||||
// Use text prefixes instead of emojis
|
// Use text prefixes instead of emojis
|
||||||
const prefixes = {
|
const prefixes = {
|
||||||
@@ -190,8 +199,17 @@ function log(level, ...args) {
|
|||||||
* @returns {Object|null} Parsed JSON data or null if error occurs
|
* @returns {Object|null} Parsed JSON data or null if error occurs
|
||||||
*/
|
*/
|
||||||
function readJSON(filepath) {
|
function readJSON(filepath) {
|
||||||
// Get debug flag dynamically from config-manager
|
// GUARD: Prevent circular dependency during config loading
|
||||||
const isDebug = getDebugFlag();
|
let isDebug = false; // Default fallback
|
||||||
|
try {
|
||||||
|
// Only try to get debug flag if we're not in the middle of config loading
|
||||||
|
isDebug = getDebugFlag();
|
||||||
|
} catch (error) {
|
||||||
|
// If getDebugFlag() fails (likely due to circular dependency),
|
||||||
|
// use default false and continue
|
||||||
|
isDebug = false;
|
||||||
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const rawData = fs.readFileSync(filepath, 'utf8');
|
const rawData = fs.readFileSync(filepath, 'utf8');
|
||||||
return JSON.parse(rawData);
|
return JSON.parse(rawData);
|
||||||
@@ -212,8 +230,17 @@ function readJSON(filepath) {
|
|||||||
* @param {Object} data - Data to write
|
* @param {Object} data - Data to write
|
||||||
*/
|
*/
|
||||||
function writeJSON(filepath, data) {
|
function writeJSON(filepath, data) {
|
||||||
// Get debug flag dynamically from config-manager
|
// GUARD: Prevent circular dependency during config loading
|
||||||
const isDebug = getDebugFlag();
|
let isDebug = false; // Default fallback
|
||||||
|
try {
|
||||||
|
// Only try to get debug flag if we're not in the middle of config loading
|
||||||
|
isDebug = getDebugFlag();
|
||||||
|
} catch (error) {
|
||||||
|
// If getDebugFlag() fails (likely due to circular dependency),
|
||||||
|
// use default false and continue
|
||||||
|
isDebug = false;
|
||||||
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const dir = path.dirname(filepath);
|
const dir = path.dirname(filepath);
|
||||||
if (!fs.existsSync(dir)) {
|
if (!fs.existsSync(dir)) {
|
||||||
@@ -246,8 +273,17 @@ function sanitizePrompt(prompt) {
|
|||||||
* @returns {Object|null} The parsed complexity report or null if not found
|
* @returns {Object|null} The parsed complexity report or null if not found
|
||||||
*/
|
*/
|
||||||
function readComplexityReport(customPath = null) {
|
function readComplexityReport(customPath = null) {
|
||||||
// Get debug flag dynamically from config-manager
|
// GUARD: Prevent circular dependency during config loading
|
||||||
const isDebug = getDebugFlag();
|
let isDebug = false; // Default fallback
|
||||||
|
try {
|
||||||
|
// Only try to get debug flag if we're not in the middle of config loading
|
||||||
|
isDebug = getDebugFlag();
|
||||||
|
} catch (error) {
|
||||||
|
// If getDebugFlag() fails (likely due to circular dependency),
|
||||||
|
// use default false and continue
|
||||||
|
isDebug = false;
|
||||||
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
let reportPath;
|
let reportPath;
|
||||||
if (customPath) {
|
if (customPath) {
|
||||||
|
|||||||
@@ -189,7 +189,7 @@ export class BaseAIProvider {
|
|||||||
model: client(params.modelId),
|
model: client(params.modelId),
|
||||||
messages: params.messages,
|
messages: params.messages,
|
||||||
schema: params.schema,
|
schema: params.schema,
|
||||||
mode: 'tool',
|
mode: 'auto',
|
||||||
maxTokens: params.maxTokens,
|
maxTokens: params.maxTokens,
|
||||||
temperature: params.temperature
|
temperature: params.temperature
|
||||||
});
|
});
|
||||||
|
|||||||
31
src/utils/logger-utils.js
Normal file
31
src/utils/logger-utils.js
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
/**
|
||||||
|
* Logger utility functions for Task Master
|
||||||
|
* Provides standardized logging patterns for both CLI and utility contexts
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { log as utilLog } from '../../scripts/modules/utils.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a standard logger object that wraps the utility log function
|
||||||
|
* This provides a consistent logger interface across different parts of the application
|
||||||
|
* @returns {Object} A logger object with standard logging methods (info, warn, error, debug, success)
|
||||||
|
*/
|
||||||
|
export function createStandardLogger() {
|
||||||
|
return {
|
||||||
|
info: (msg, ...args) => utilLog('info', msg, ...args),
|
||||||
|
warn: (msg, ...args) => utilLog('warn', msg, ...args),
|
||||||
|
error: (msg, ...args) => utilLog('error', msg, ...args),
|
||||||
|
debug: (msg, ...args) => utilLog('debug', msg, ...args),
|
||||||
|
success: (msg, ...args) => utilLog('success', msg, ...args)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a logger using either the provided logger or a default standard logger
|
||||||
|
* This is the recommended pattern for functions that accept an optional logger parameter
|
||||||
|
* @param {Object|null} providedLogger - Optional logger object passed from caller
|
||||||
|
* @returns {Object} A logger object with standard logging methods
|
||||||
|
*/
|
||||||
|
export function getLoggerOrDefault(providedLogger = null) {
|
||||||
|
return providedLogger || createStandardLogger();
|
||||||
|
}
|
||||||
@@ -14,6 +14,33 @@ import {
|
|||||||
TASKMASTER_CONFIG_FILE,
|
TASKMASTER_CONFIG_FILE,
|
||||||
LEGACY_CONFIG_FILE
|
LEGACY_CONFIG_FILE
|
||||||
} from '../constants/paths.js';
|
} from '../constants/paths.js';
|
||||||
|
import { getLoggerOrDefault } from './logger-utils.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Normalize project root to ensure it doesn't end with .taskmaster
|
||||||
|
* This prevents double .taskmaster paths when using constants that include .taskmaster
|
||||||
|
* @param {string} projectRoot - The project root path to normalize
|
||||||
|
* @returns {string} - Normalized project root path
|
||||||
|
*/
|
||||||
|
export function normalizeProjectRoot(projectRoot) {
|
||||||
|
if (!projectRoot) return projectRoot;
|
||||||
|
|
||||||
|
// Split the path into segments
|
||||||
|
const segments = projectRoot.split(path.sep);
|
||||||
|
|
||||||
|
// Find the index of .taskmaster segment
|
||||||
|
const taskmasterIndex = segments.findIndex(
|
||||||
|
(segment) => segment === '.taskmaster'
|
||||||
|
);
|
||||||
|
|
||||||
|
if (taskmasterIndex !== -1) {
|
||||||
|
// If .taskmaster is found, return everything up to but not including .taskmaster
|
||||||
|
const normalizedSegments = segments.slice(0, taskmasterIndex);
|
||||||
|
return normalizedSegments.join(path.sep) || path.sep;
|
||||||
|
}
|
||||||
|
|
||||||
|
return projectRoot;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Find the project root directory by looking for project markers
|
* Find the project root directory by looking for project markers
|
||||||
@@ -59,13 +86,25 @@ export function findProjectRoot(startDir = process.cwd()) {
|
|||||||
* @returns {string|null} - Resolved tasks.json path or null if not found
|
* @returns {string|null} - Resolved tasks.json path or null if not found
|
||||||
*/
|
*/
|
||||||
export function findTasksPath(explicitPath = null, args = null, log = null) {
|
export function findTasksPath(explicitPath = null, args = null, log = null) {
|
||||||
const logger = log || console;
|
// Use the passed logger if available, otherwise use the default logger
|
||||||
|
const logger = getLoggerOrDefault(log);
|
||||||
|
|
||||||
// 1. If explicit path is provided, use it (highest priority)
|
// 1. First determine project root to use as base for all path resolution
|
||||||
|
const rawProjectRoot = args?.projectRoot || findProjectRoot();
|
||||||
|
|
||||||
|
if (!rawProjectRoot) {
|
||||||
|
logger.warn?.('Could not determine project root directory');
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Normalize project root to prevent double .taskmaster paths
|
||||||
|
const projectRoot = normalizeProjectRoot(rawProjectRoot);
|
||||||
|
|
||||||
|
// 3. If explicit path is provided, resolve it relative to project root (highest priority)
|
||||||
if (explicitPath) {
|
if (explicitPath) {
|
||||||
const resolvedPath = path.isAbsolute(explicitPath)
|
const resolvedPath = path.isAbsolute(explicitPath)
|
||||||
? explicitPath
|
? explicitPath
|
||||||
: path.resolve(process.cwd(), explicitPath);
|
: path.resolve(projectRoot, explicitPath);
|
||||||
|
|
||||||
if (fs.existsSync(resolvedPath)) {
|
if (fs.existsSync(resolvedPath)) {
|
||||||
logger.info?.(`Using explicit tasks path: ${resolvedPath}`);
|
logger.info?.(`Using explicit tasks path: ${resolvedPath}`);
|
||||||
@@ -77,18 +116,9 @@ export function findTasksPath(explicitPath = null, args = null, log = null) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. Try to get project root from args (MCP) or find it
|
// 4. Check possible locations in order of preference
|
||||||
const projectRoot = args?.projectRoot || findProjectRoot();
|
|
||||||
|
|
||||||
if (!projectRoot) {
|
|
||||||
logger.warn?.('Could not determine project root directory');
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
// 3. Check possible locations in order of preference
|
|
||||||
const possiblePaths = [
|
const possiblePaths = [
|
||||||
path.join(projectRoot, TASKMASTER_TASKS_FILE), // .taskmaster/tasks/tasks.json (NEW)
|
path.join(projectRoot, TASKMASTER_TASKS_FILE), // .taskmaster/tasks/tasks.json (NEW)
|
||||||
path.join(projectRoot, 'tasks.json'), // tasks.json in root (LEGACY)
|
|
||||||
path.join(projectRoot, LEGACY_TASKS_FILE) // tasks/tasks.json (LEGACY)
|
path.join(projectRoot, LEGACY_TASKS_FILE) // tasks/tasks.json (LEGACY)
|
||||||
];
|
];
|
||||||
|
|
||||||
@@ -130,7 +160,7 @@ export function findTasksPath(explicitPath = null, args = null, log = null) {
|
|||||||
* @returns {string|null} - Resolved PRD document path or null if not found
|
* @returns {string|null} - Resolved PRD document path or null if not found
|
||||||
*/
|
*/
|
||||||
export function findPRDPath(explicitPath = null, args = null, log = null) {
|
export function findPRDPath(explicitPath = null, args = null, log = null) {
|
||||||
const logger = log || console;
|
const logger = getLoggerOrDefault(log);
|
||||||
|
|
||||||
// 1. If explicit path is provided, use it (highest priority)
|
// 1. If explicit path is provided, use it (highest priority)
|
||||||
if (explicitPath) {
|
if (explicitPath) {
|
||||||
@@ -149,14 +179,17 @@ export function findPRDPath(explicitPath = null, args = null, log = null) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 2. Try to get project root from args (MCP) or find it
|
// 2. Try to get project root from args (MCP) or find it
|
||||||
const projectRoot = args?.projectRoot || findProjectRoot();
|
const rawProjectRoot = args?.projectRoot || findProjectRoot();
|
||||||
|
|
||||||
if (!projectRoot) {
|
if (!rawProjectRoot) {
|
||||||
logger.warn?.('Could not determine project root directory');
|
logger.warn?.('Could not determine project root directory');
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
// 3. Check possible locations in order of preference
|
// 3. Normalize project root to prevent double .taskmaster paths
|
||||||
|
const projectRoot = normalizeProjectRoot(rawProjectRoot);
|
||||||
|
|
||||||
|
// 4. Check possible locations in order of preference
|
||||||
const locations = [
|
const locations = [
|
||||||
TASKMASTER_DOCS_DIR, // .taskmaster/docs/ (NEW)
|
TASKMASTER_DOCS_DIR, // .taskmaster/docs/ (NEW)
|
||||||
'scripts/', // Legacy location
|
'scripts/', // Legacy location
|
||||||
@@ -199,7 +232,7 @@ export function findComplexityReportPath(
|
|||||||
args = null,
|
args = null,
|
||||||
log = null
|
log = null
|
||||||
) {
|
) {
|
||||||
const logger = log || console;
|
const logger = getLoggerOrDefault(log);
|
||||||
|
|
||||||
// 1. If explicit path is provided, use it (highest priority)
|
// 1. If explicit path is provided, use it (highest priority)
|
||||||
if (explicitPath) {
|
if (explicitPath) {
|
||||||
@@ -218,14 +251,17 @@ export function findComplexityReportPath(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 2. Try to get project root from args (MCP) or find it
|
// 2. Try to get project root from args (MCP) or find it
|
||||||
const projectRoot = args?.projectRoot || findProjectRoot();
|
const rawProjectRoot = args?.projectRoot || findProjectRoot();
|
||||||
|
|
||||||
if (!projectRoot) {
|
if (!rawProjectRoot) {
|
||||||
logger.warn?.('Could not determine project root directory');
|
logger.warn?.('Could not determine project root directory');
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
// 3. Check possible locations in order of preference
|
// 3. Normalize project root to prevent double .taskmaster paths
|
||||||
|
const projectRoot = normalizeProjectRoot(rawProjectRoot);
|
||||||
|
|
||||||
|
// 4. Check possible locations in order of preference
|
||||||
const locations = [
|
const locations = [
|
||||||
TASKMASTER_REPORTS_DIR, // .taskmaster/reports/ (NEW)
|
TASKMASTER_REPORTS_DIR, // .taskmaster/reports/ (NEW)
|
||||||
'scripts/', // Legacy location
|
'scripts/', // Legacy location
|
||||||
@@ -268,7 +304,7 @@ export function resolveTasksOutputPath(
|
|||||||
args = null,
|
args = null,
|
||||||
log = null
|
log = null
|
||||||
) {
|
) {
|
||||||
const logger = log || console;
|
const logger = getLoggerOrDefault(log);
|
||||||
|
|
||||||
// 1. If explicit path is provided, use it
|
// 1. If explicit path is provided, use it
|
||||||
if (explicitPath) {
|
if (explicitPath) {
|
||||||
@@ -281,9 +317,13 @@ export function resolveTasksOutputPath(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 2. Try to get project root from args (MCP) or find it
|
// 2. Try to get project root from args (MCP) or find it
|
||||||
const projectRoot = args?.projectRoot || findProjectRoot() || process.cwd();
|
const rawProjectRoot =
|
||||||
|
args?.projectRoot || findProjectRoot() || process.cwd();
|
||||||
|
|
||||||
// 3. Use new .taskmaster structure by default
|
// 3. Normalize project root to prevent double .taskmaster paths
|
||||||
|
const projectRoot = normalizeProjectRoot(rawProjectRoot);
|
||||||
|
|
||||||
|
// 4. Use new .taskmaster structure by default
|
||||||
const defaultPath = path.join(projectRoot, TASKMASTER_TASKS_FILE);
|
const defaultPath = path.join(projectRoot, TASKMASTER_TASKS_FILE);
|
||||||
logger.info?.(`Using default output path: ${defaultPath}`);
|
logger.info?.(`Using default output path: ${defaultPath}`);
|
||||||
|
|
||||||
@@ -309,7 +349,7 @@ export function resolveComplexityReportOutputPath(
|
|||||||
args = null,
|
args = null,
|
||||||
log = null
|
log = null
|
||||||
) {
|
) {
|
||||||
const logger = log || console;
|
const logger = getLoggerOrDefault(log);
|
||||||
|
|
||||||
// 1. If explicit path is provided, use it
|
// 1. If explicit path is provided, use it
|
||||||
if (explicitPath) {
|
if (explicitPath) {
|
||||||
@@ -324,9 +364,13 @@ export function resolveComplexityReportOutputPath(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 2. Try to get project root from args (MCP) or find it
|
// 2. Try to get project root from args (MCP) or find it
|
||||||
const projectRoot = args?.projectRoot || findProjectRoot() || process.cwd();
|
const rawProjectRoot =
|
||||||
|
args?.projectRoot || findProjectRoot() || process.cwd();
|
||||||
|
|
||||||
// 3. Use new .taskmaster structure by default
|
// 3. Normalize project root to prevent double .taskmaster paths
|
||||||
|
const projectRoot = normalizeProjectRoot(rawProjectRoot);
|
||||||
|
|
||||||
|
// 4. Use new .taskmaster structure by default
|
||||||
const defaultPath = path.join(projectRoot, COMPLEXITY_REPORT_FILE);
|
const defaultPath = path.join(projectRoot, COMPLEXITY_REPORT_FILE);
|
||||||
logger.info?.(`Using default complexity report output path: ${defaultPath}`);
|
logger.info?.(`Using default complexity report output path: ${defaultPath}`);
|
||||||
|
|
||||||
@@ -348,7 +392,7 @@ export function resolveComplexityReportOutputPath(
|
|||||||
* @returns {string|null} - Resolved config file path or null if not found
|
* @returns {string|null} - Resolved config file path or null if not found
|
||||||
*/
|
*/
|
||||||
export function findConfigPath(explicitPath = null, args = null, log = null) {
|
export function findConfigPath(explicitPath = null, args = null, log = null) {
|
||||||
const logger = log || console;
|
const logger = getLoggerOrDefault(log);
|
||||||
|
|
||||||
// 1. If explicit path is provided, use it (highest priority)
|
// 1. If explicit path is provided, use it (highest priority)
|
||||||
if (explicitPath) {
|
if (explicitPath) {
|
||||||
@@ -367,14 +411,17 @@ export function findConfigPath(explicitPath = null, args = null, log = null) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 2. Try to get project root from args (MCP) or find it
|
// 2. Try to get project root from args (MCP) or find it
|
||||||
const projectRoot = args?.projectRoot || findProjectRoot();
|
const rawProjectRoot = args?.projectRoot || findProjectRoot();
|
||||||
|
|
||||||
if (!projectRoot) {
|
if (!rawProjectRoot) {
|
||||||
logger.warn?.('Could not determine project root directory');
|
logger.warn?.('Could not determine project root directory');
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
// 3. Check possible locations in order of preference
|
// 3. Normalize project root to prevent double .taskmaster paths
|
||||||
|
const projectRoot = normalizeProjectRoot(rawProjectRoot);
|
||||||
|
|
||||||
|
// 4. Check possible locations in order of preference
|
||||||
const possiblePaths = [
|
const possiblePaths = [
|
||||||
path.join(projectRoot, TASKMASTER_CONFIG_FILE), // NEW location
|
path.join(projectRoot, TASKMASTER_CONFIG_FILE), // NEW location
|
||||||
path.join(projectRoot, LEGACY_CONFIG_FILE) // LEGACY location
|
path.join(projectRoot, LEGACY_CONFIG_FILE) // LEGACY location
|
||||||
@@ -382,12 +429,6 @@ export function findConfigPath(explicitPath = null, args = null, log = null) {
|
|||||||
|
|
||||||
for (const configPath of possiblePaths) {
|
for (const configPath of possiblePaths) {
|
||||||
if (fs.existsSync(configPath)) {
|
if (fs.existsSync(configPath)) {
|
||||||
try {
|
|
||||||
logger.info?.(`Found config file at: ${configPath}`);
|
|
||||||
} catch (error) {
|
|
||||||
// Silently handle logging errors during testing
|
|
||||||
}
|
|
||||||
|
|
||||||
// Issue deprecation warning for legacy paths
|
// Issue deprecation warning for legacy paths
|
||||||
if (configPath?.endsWith(LEGACY_CONFIG_FILE)) {
|
if (configPath?.endsWith(LEGACY_CONFIG_FILE)) {
|
||||||
logger.warn?.(
|
logger.warn?.(
|
||||||
|
|||||||
@@ -253,8 +253,7 @@ describe('MCP Server Direct Functions', () => {
|
|||||||
error: {
|
error: {
|
||||||
code: 'FILE_NOT_FOUND_ERROR',
|
code: 'FILE_NOT_FOUND_ERROR',
|
||||||
message: 'Tasks file not found'
|
message: 'Tasks file not found'
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -288,8 +287,7 @@ describe('MCP Server Direct Functions', () => {
|
|||||||
.length,
|
.length,
|
||||||
pending: tasksData.filter((t) => t.status === 'pending').length
|
pending: tasksData.filter((t) => t.status === 'pending').length
|
||||||
}
|
}
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -305,8 +303,7 @@ describe('MCP Server Direct Functions', () => {
|
|||||||
total: tasksData.length,
|
total: tasksData.length,
|
||||||
filtered: filteredTasks.length
|
filtered: filteredTasks.length
|
||||||
}
|
}
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -320,8 +317,7 @@ describe('MCP Server Direct Functions', () => {
|
|||||||
stats: {
|
stats: {
|
||||||
total: tasksData.length
|
total: tasksData.length
|
||||||
}
|
}
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -441,8 +437,7 @@ describe('MCP Server Direct Functions', () => {
|
|||||||
error: {
|
error: {
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
message: 'Task ID is required'
|
message: 'Task ID is required'
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -454,8 +449,7 @@ describe('MCP Server Direct Functions', () => {
|
|||||||
error: {
|
error: {
|
||||||
code: 'TASK_NOT_FOUND',
|
code: 'TASK_NOT_FOUND',
|
||||||
message: `Task with ID ${args.id} not found`
|
message: `Task with ID ${args.id} not found`
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -469,8 +463,7 @@ describe('MCP Server Direct Functions', () => {
|
|||||||
error: {
|
error: {
|
||||||
code: 'TASK_COMPLETED',
|
code: 'TASK_COMPLETED',
|
||||||
message: `Task ${args.id} is already marked as done and cannot be expanded`
|
message: `Task ${args.id} is already marked as done and cannot be expanded`
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -495,8 +488,7 @@ describe('MCP Server Direct Functions', () => {
|
|||||||
task: expandedTask,
|
task: expandedTask,
|
||||||
subtasksAdded: expandedTask.subtasks.length,
|
subtasksAdded: expandedTask.subtasks.length,
|
||||||
hasExistingSubtasks: false
|
hasExistingSubtasks: false
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -43,6 +43,7 @@ const mockGetBaseUrlForRole = jest.fn();
|
|||||||
const mockGetAllProviders = jest.fn();
|
const mockGetAllProviders = jest.fn();
|
||||||
const mockGetOllamaBaseURL = jest.fn();
|
const mockGetOllamaBaseURL = jest.fn();
|
||||||
const mockGetAzureBaseURL = jest.fn();
|
const mockGetAzureBaseURL = jest.fn();
|
||||||
|
const mockGetBedrockBaseURL = jest.fn();
|
||||||
const mockGetVertexProjectId = jest.fn();
|
const mockGetVertexProjectId = jest.fn();
|
||||||
const mockGetVertexLocation = jest.fn();
|
const mockGetVertexLocation = jest.fn();
|
||||||
const mockGetAvailableModels = jest.fn();
|
const mockGetAvailableModels = jest.fn();
|
||||||
@@ -113,6 +114,7 @@ jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
|
|||||||
getAllProviders: mockGetAllProviders,
|
getAllProviders: mockGetAllProviders,
|
||||||
getOllamaBaseURL: mockGetOllamaBaseURL,
|
getOllamaBaseURL: mockGetOllamaBaseURL,
|
||||||
getAzureBaseURL: mockGetAzureBaseURL,
|
getAzureBaseURL: mockGetAzureBaseURL,
|
||||||
|
getBedrockBaseURL: mockGetBedrockBaseURL,
|
||||||
getVertexProjectId: mockGetVertexProjectId,
|
getVertexProjectId: mockGetVertexProjectId,
|
||||||
getVertexLocation: mockGetVertexLocation,
|
getVertexLocation: mockGetVertexLocation,
|
||||||
getMcpApiKeyStatus: mockGetMcpApiKeyStatus
|
getMcpApiKeyStatus: mockGetMcpApiKeyStatus
|
||||||
|
|||||||
@@ -139,7 +139,8 @@ const DEFAULT_CONFIG = {
|
|||||||
defaultSubtasks: 5,
|
defaultSubtasks: 5,
|
||||||
defaultPriority: 'medium',
|
defaultPriority: 'medium',
|
||||||
projectName: 'Task Master',
|
projectName: 'Task Master',
|
||||||
ollamaBaseURL: 'http://localhost:11434/api'
|
ollamaBaseURL: 'http://localhost:11434/api',
|
||||||
|
bedrockBaseURL: 'https://bedrock.us-east-1.amazonaws.com'
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -22,6 +22,10 @@ jest.unstable_mockModule('../../../../../scripts/modules/ui.js', () => ({
|
|||||||
getStatusWithColor: jest.fn((status) => status),
|
getStatusWithColor: jest.fn((status) => status),
|
||||||
startLoadingIndicator: jest.fn(),
|
startLoadingIndicator: jest.fn(),
|
||||||
stopLoadingIndicator: jest.fn(),
|
stopLoadingIndicator: jest.fn(),
|
||||||
|
succeedLoadingIndicator: jest.fn(),
|
||||||
|
failLoadingIndicator: jest.fn(),
|
||||||
|
warnLoadingIndicator: jest.fn(),
|
||||||
|
infoLoadingIndicator: jest.fn(),
|
||||||
displayAiUsageSummary: jest.fn()
|
displayAiUsageSummary: jest.fn()
|
||||||
}));
|
}));
|
||||||
|
|
||||||
|
|||||||
@@ -127,6 +127,7 @@ jest.unstable_mockModule(
|
|||||||
getProjectName: jest.fn(() => 'Test Project'),
|
getProjectName: jest.fn(() => 'Test Project'),
|
||||||
getOllamaBaseURL: jest.fn(() => 'http://localhost:11434/api'),
|
getOllamaBaseURL: jest.fn(() => 'http://localhost:11434/api'),
|
||||||
getAzureBaseURL: jest.fn(() => undefined),
|
getAzureBaseURL: jest.fn(() => undefined),
|
||||||
|
getBedrockBaseURL: jest.fn(() => undefined),
|
||||||
getParametersForRole: jest.fn(() => ({
|
getParametersForRole: jest.fn(() => ({
|
||||||
maxTokens: 4000,
|
maxTokens: 4000,
|
||||||
temperature: 0.7
|
temperature: 0.7
|
||||||
|
|||||||
@@ -82,19 +82,19 @@ describe('UI Module', () => {
|
|||||||
test('should return done status with emoji for console output', () => {
|
test('should return done status with emoji for console output', () => {
|
||||||
const result = getStatusWithColor('done');
|
const result = getStatusWithColor('done');
|
||||||
expect(result).toMatch(/done/);
|
expect(result).toMatch(/done/);
|
||||||
expect(result).toContain('✅');
|
expect(result).toContain('✓');
|
||||||
});
|
});
|
||||||
|
|
||||||
test('should return pending status with emoji for console output', () => {
|
test('should return pending status with emoji for console output', () => {
|
||||||
const result = getStatusWithColor('pending');
|
const result = getStatusWithColor('pending');
|
||||||
expect(result).toMatch(/pending/);
|
expect(result).toMatch(/pending/);
|
||||||
expect(result).toContain('⏱️');
|
expect(result).toContain('○');
|
||||||
});
|
});
|
||||||
|
|
||||||
test('should return deferred status with emoji for console output', () => {
|
test('should return deferred status with emoji for console output', () => {
|
||||||
const result = getStatusWithColor('deferred');
|
const result = getStatusWithColor('deferred');
|
||||||
expect(result).toMatch(/deferred/);
|
expect(result).toMatch(/deferred/);
|
||||||
expect(result).toContain('⏱️');
|
expect(result).toContain('x');
|
||||||
});
|
});
|
||||||
|
|
||||||
test('should return in-progress status with emoji for console output', () => {
|
test('should return in-progress status with emoji for console output', () => {
|
||||||
@@ -222,25 +222,25 @@ describe('UI Module', () => {
|
|||||||
test('should return high complexity in red', () => {
|
test('should return high complexity in red', () => {
|
||||||
const result = getComplexityWithColor(8);
|
const result = getComplexityWithColor(8);
|
||||||
expect(result).toMatch(/8/);
|
expect(result).toMatch(/8/);
|
||||||
expect(result).toContain('🔴');
|
expect(result).toContain('●');
|
||||||
});
|
});
|
||||||
|
|
||||||
test('should return medium complexity in yellow', () => {
|
test('should return medium complexity in yellow', () => {
|
||||||
const result = getComplexityWithColor(5);
|
const result = getComplexityWithColor(5);
|
||||||
expect(result).toMatch(/5/);
|
expect(result).toMatch(/5/);
|
||||||
expect(result).toContain('🟡');
|
expect(result).toContain('●');
|
||||||
});
|
});
|
||||||
|
|
||||||
test('should return low complexity in green', () => {
|
test('should return low complexity in green', () => {
|
||||||
const result = getComplexityWithColor(3);
|
const result = getComplexityWithColor(3);
|
||||||
expect(result).toMatch(/3/);
|
expect(result).toMatch(/3/);
|
||||||
expect(result).toContain('🟢');
|
expect(result).toContain('●');
|
||||||
});
|
});
|
||||||
|
|
||||||
test('should handle non-numeric inputs', () => {
|
test('should handle non-numeric inputs', () => {
|
||||||
const result = getComplexityWithColor('high');
|
const result = getComplexityWithColor('high');
|
||||||
expect(result).toMatch(/high/);
|
expect(result).toMatch(/high/);
|
||||||
expect(result).toContain('🔴');
|
expect(result).toContain('●');
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
Reference in New Issue
Block a user